X86InstrAVX512.td revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1// Bitcasts between 512-bit vector types. Return the original type since
2// no instruction is needed for the conversion
3let Predicates = [HasAVX512] in {
4  def : Pat<(v8f64  (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
5  def : Pat<(v8f64  (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
6  def : Pat<(v8f64  (bitconvert (v8i64 VR512:$src))),  (v8f64 VR512:$src)>;
7  def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
8  def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))),  (v16f32 VR512:$src)>;
9  def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))),  (v16f32 VR512:$src)>;
10  def : Pat<(v8i64  (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
11  def : Pat<(v8i64  (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
12  def : Pat<(v8i64  (bitconvert (v8f64 VR512:$src))),  (v8i64 VR512:$src)>;
13  def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
14  def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))),  (v16i32 VR512:$src)>;
15  def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))),  (v16i32 VR512:$src)>;
16  def : Pat<(v8f64  (bitconvert (v8i64 VR512:$src))),  (v8f64 VR512:$src)>;
17
18  def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
19  def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
20  def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
21  def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
22  def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
23  def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
24  def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
25  def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
26  def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
27  def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
28  def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
29  def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
30  def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
31  def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
32  def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
33  def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
34  def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
35  def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
36  def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
37  def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
38  def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
39  def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
40  def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
41  def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
42  def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
43  def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
44  def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
45  def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
46  def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
47  def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
48
49// Bitcasts between 256-bit vector types. Return the original type since
50// no instruction is needed for the conversion
51  def : Pat<(v4f64  (bitconvert (v8f32 VR256X:$src))),  (v4f64 VR256X:$src)>;
52  def : Pat<(v4f64  (bitconvert (v8i32 VR256X:$src))),  (v4f64 VR256X:$src)>;
53  def : Pat<(v4f64  (bitconvert (v4i64 VR256X:$src))),  (v4f64 VR256X:$src)>;
54  def : Pat<(v4f64  (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
55  def : Pat<(v4f64  (bitconvert (v32i8 VR256X:$src))),  (v4f64 VR256X:$src)>;
56  def : Pat<(v8f32  (bitconvert (v8i32 VR256X:$src))),  (v8f32 VR256X:$src)>;
57  def : Pat<(v8f32  (bitconvert (v4i64 VR256X:$src))),  (v8f32 VR256X:$src)>;
58  def : Pat<(v8f32  (bitconvert (v4f64 VR256X:$src))),  (v8f32 VR256X:$src)>;
59  def : Pat<(v8f32  (bitconvert (v32i8 VR256X:$src))),  (v8f32 VR256X:$src)>;
60  def : Pat<(v8f32  (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
61  def : Pat<(v4i64  (bitconvert (v8f32 VR256X:$src))),  (v4i64 VR256X:$src)>;
62  def : Pat<(v4i64  (bitconvert (v8i32 VR256X:$src))),  (v4i64 VR256X:$src)>;
63  def : Pat<(v4i64  (bitconvert (v4f64 VR256X:$src))),  (v4i64 VR256X:$src)>;
64  def : Pat<(v4i64  (bitconvert (v32i8 VR256X:$src))),  (v4i64 VR256X:$src)>;
65  def : Pat<(v4i64  (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
66  def : Pat<(v32i8  (bitconvert (v4f64 VR256X:$src))),  (v32i8 VR256X:$src)>;
67  def : Pat<(v32i8  (bitconvert (v4i64 VR256X:$src))),  (v32i8 VR256X:$src)>;
68  def : Pat<(v32i8  (bitconvert (v8f32 VR256X:$src))),  (v32i8 VR256X:$src)>;
69  def : Pat<(v32i8  (bitconvert (v8i32 VR256X:$src))),  (v32i8 VR256X:$src)>;
70  def : Pat<(v32i8  (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
71  def : Pat<(v8i32  (bitconvert (v32i8 VR256X:$src))),  (v8i32 VR256X:$src)>;
72  def : Pat<(v8i32  (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
73  def : Pat<(v8i32  (bitconvert (v8f32 VR256X:$src))),  (v8i32 VR256X:$src)>;
74  def : Pat<(v8i32  (bitconvert (v4i64 VR256X:$src))),  (v8i32 VR256X:$src)>;
75  def : Pat<(v8i32  (bitconvert (v4f64 VR256X:$src))),  (v8i32 VR256X:$src)>;
76  def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))),  (v16i16 VR256X:$src)>;
77  def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))),  (v16i16 VR256X:$src)>;
78  def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))),  (v16i16 VR256X:$src)>;
79  def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))),  (v16i16 VR256X:$src)>;
80  def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))),  (v16i16 VR256X:$src)>;
81}
82
83//
84// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
85//
86
87let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
88    isPseudo = 1, Predicates = [HasAVX512] in {
89def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
90               [(set VR512:$dst, (v16f32 immAllZerosV))]>;
91}
92
93let Predicates = [HasAVX512] in {
94def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
95def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
96def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
97}
98
99//===----------------------------------------------------------------------===//
100// AVX-512 - VECTOR INSERT
101//
102// -- 32x8 form --
103let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
104def VINSERTF32x4rr : AVX512AIi8<0x18, MRMSrcReg, (outs VR512:$dst),
105          (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
106          "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
107          []>, EVEX_4V, EVEX_V512;
108let mayLoad = 1 in
109def VINSERTF32x4rm : AVX512AIi8<0x18, MRMSrcMem, (outs VR512:$dst),
110          (ins VR512:$src1, f128mem:$src2, i8imm:$src3),
111          "vinsertf32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
112          []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
113}
114
115// -- 64x4 fp form --
116let hasSideEffects = 0, ExeDomain = SSEPackedDouble in {
117def VINSERTF64x4rr : AVX512AIi8<0x1a, MRMSrcReg, (outs VR512:$dst),
118          (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
119          "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
120          []>, EVEX_4V, EVEX_V512, VEX_W;
121let mayLoad = 1 in
122def VINSERTF64x4rm : AVX512AIi8<0x1a, MRMSrcMem, (outs VR512:$dst),
123          (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
124          "vinsertf64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
125          []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
126}
127// -- 32x4 integer form --
128let hasSideEffects = 0 in {
129def VINSERTI32x4rr : AVX512AIi8<0x38, MRMSrcReg, (outs VR512:$dst),
130          (ins VR512:$src1, VR128X:$src2, i8imm:$src3),
131          "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
132          []>, EVEX_4V, EVEX_V512;
133let mayLoad = 1 in
134def VINSERTI32x4rm : AVX512AIi8<0x38, MRMSrcMem, (outs VR512:$dst),
135          (ins VR512:$src1, i128mem:$src2, i8imm:$src3),
136          "vinserti32x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
137          []>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VT4>;
138
139}
140
141let hasSideEffects = 0 in {
142// -- 64x4 form --
143def VINSERTI64x4rr : AVX512AIi8<0x3a, MRMSrcReg, (outs VR512:$dst),
144          (ins VR512:$src1, VR256X:$src2, i8imm:$src3),
145          "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
146          []>, EVEX_4V, EVEX_V512, VEX_W;
147let mayLoad = 1 in
148def VINSERTI64x4rm : AVX512AIi8<0x3a, MRMSrcMem, (outs VR512:$dst),
149          (ins VR512:$src1, i256mem:$src2, i8imm:$src3),
150          "vinserti64x4\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
151          []>, EVEX_4V, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
152}
153
154def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (v4f32 VR128X:$src2),
155           (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
156                        (INSERT_get_vinsert128_imm VR512:$ins))>;
157def : Pat<(vinsert128_insert:$ins (v8f64  VR512:$src1), (v2f64 VR128X:$src2),
158           (iPTR imm)), (VINSERTF32x4rr VR512:$src1, VR128X:$src2,
159                        (INSERT_get_vinsert128_imm VR512:$ins))>;
160def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (v2i64 VR128X:$src2),
161           (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
162                        (INSERT_get_vinsert128_imm VR512:$ins))>;
163def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v4i32 VR128X:$src2),
164           (iPTR imm)), (VINSERTI32x4rr VR512:$src1, VR128X:$src2,
165                        (INSERT_get_vinsert128_imm VR512:$ins))>;
166
167def : Pat<(vinsert128_insert:$ins (v16f32 VR512:$src1), (loadv4f32 addr:$src2),
168           (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
169                        (INSERT_get_vinsert128_imm VR512:$ins))>;
170def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1),
171                  (bc_v4i32 (loadv2i64 addr:$src2)),
172           (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
173                        (INSERT_get_vinsert128_imm VR512:$ins))>;
174def : Pat<(vinsert128_insert:$ins (v8f64  VR512:$src1), (loadv2f64 addr:$src2),
175           (iPTR imm)), (VINSERTF32x4rm VR512:$src1, addr:$src2,
176                        (INSERT_get_vinsert128_imm VR512:$ins))>;
177def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (loadv2i64 addr:$src2),
178           (iPTR imm)), (VINSERTI32x4rm VR512:$src1, addr:$src2,
179                        (INSERT_get_vinsert128_imm VR512:$ins))>;
180
181def : Pat<(vinsert256_insert:$ins (v16f32  VR512:$src1), (v8f32 VR256X:$src2),
182           (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
183                        (INSERT_get_vinsert256_imm VR512:$ins))>;
184def : Pat<(vinsert256_insert:$ins (v8f64  VR512:$src1), (v4f64 VR256X:$src2),
185           (iPTR imm)), (VINSERTF64x4rr VR512:$src1, VR256X:$src2,
186                        (INSERT_get_vinsert256_imm VR512:$ins))>;
187def : Pat<(vinsert128_insert:$ins (v8i64  VR512:$src1), (v4i64 VR256X:$src2),
188           (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
189                        (INSERT_get_vinsert256_imm VR512:$ins))>;
190def : Pat<(vinsert128_insert:$ins (v16i32 VR512:$src1), (v8i32 VR256X:$src2),
191           (iPTR imm)), (VINSERTI64x4rr VR512:$src1, VR256X:$src2,
192                        (INSERT_get_vinsert256_imm VR512:$ins))>;
193
194def : Pat<(vinsert256_insert:$ins (v16f32  VR512:$src1), (loadv8f32 addr:$src2),
195           (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
196                        (INSERT_get_vinsert256_imm VR512:$ins))>;
197def : Pat<(vinsert256_insert:$ins (v8f64  VR512:$src1), (loadv4f64 addr:$src2),
198           (iPTR imm)), (VINSERTF64x4rm VR512:$src1, addr:$src2,
199                        (INSERT_get_vinsert256_imm VR512:$ins))>;
200def : Pat<(vinsert256_insert:$ins (v8i64  VR512:$src1), (loadv4i64 addr:$src2),
201           (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
202                        (INSERT_get_vinsert256_imm VR512:$ins))>;
203def : Pat<(vinsert256_insert:$ins (v16i32 VR512:$src1),
204	                (bc_v8i32 (loadv4i64 addr:$src2)),
205           (iPTR imm)), (VINSERTI64x4rm VR512:$src1, addr:$src2,
206                        (INSERT_get_vinsert256_imm VR512:$ins))>;
207
208// vinsertps - insert f32 to XMM
209def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
210      (ins VR128X:$src1, VR128X:$src2, u32u8imm:$src3),
211      "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
212      [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
213      EVEX_4V;
214def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
215      (ins VR128X:$src1, f32mem:$src2, u32u8imm:$src3),
216      "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
217      [(set VR128X:$dst, (X86insertps VR128X:$src1,
218                          (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
219                          imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
220
221//===----------------------------------------------------------------------===//
222// AVX-512 VECTOR EXTRACT
223//---
224let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
225// -- 32x4 form --
226def VEXTRACTF32x4rr : AVX512AIi8<0x19, MRMDestReg, (outs VR128X:$dst),
227          (ins VR512:$src1, i8imm:$src2),
228          "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
229          []>, EVEX, EVEX_V512;
230def VEXTRACTF32x4mr : AVX512AIi8<0x19, MRMDestMem, (outs),
231          (ins f128mem:$dst, VR512:$src1, i8imm:$src2),
232          "vextractf32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
233          []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
234
235// -- 64x4 form --
236def VEXTRACTF64x4rr : AVX512AIi8<0x1b, MRMDestReg, (outs VR256X:$dst),
237          (ins VR512:$src1, i8imm:$src2),
238          "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
239          []>, EVEX, EVEX_V512, VEX_W;
240let mayStore = 1 in
241def VEXTRACTF64x4mr : AVX512AIi8<0x1b, MRMDestMem, (outs),
242          (ins f256mem:$dst, VR512:$src1, i8imm:$src2),
243          "vextractf64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
244          []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
245}
246
247let hasSideEffects = 0 in {
248// -- 32x4 form --
249def VEXTRACTI32x4rr : AVX512AIi8<0x39, MRMDestReg, (outs VR128X:$dst),
250          (ins VR512:$src1, i8imm:$src2),
251          "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
252          []>, EVEX, EVEX_V512;
253def VEXTRACTI32x4mr : AVX512AIi8<0x39, MRMDestMem, (outs),
254          (ins i128mem:$dst, VR512:$src1, i8imm:$src2),
255          "vextracti32x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
256          []>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VT4>;
257
258// -- 64x4 form --
259def VEXTRACTI64x4rr : AVX512AIi8<0x3b, MRMDestReg, (outs VR256X:$dst),
260          (ins VR512:$src1, i8imm:$src2),
261          "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
262          []>, EVEX, EVEX_V512, VEX_W;
263let mayStore = 1 in
264def VEXTRACTI64x4mr : AVX512AIi8<0x3b, MRMDestMem, (outs),
265          (ins i256mem:$dst, VR512:$src1, i8imm:$src2),
266          "vextracti64x4\t{$src2, $src1, $dst|$dst, $src1, $src2}",
267          []>, EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT4>;
268}
269
270def : Pat<(vextract128_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
271          (v4f32 (VEXTRACTF32x4rr VR512:$src1,
272                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
273
274def : Pat<(vextract128_extract:$ext VR512:$src1, (iPTR imm)),
275          (v4i32 (VEXTRACTF32x4rr VR512:$src1,
276                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
277
278def : Pat<(vextract128_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
279          (v2f64 (VEXTRACTF32x4rr VR512:$src1,
280                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
281
282def : Pat<(vextract128_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
283          (v2i64 (VEXTRACTI32x4rr VR512:$src1,
284                  (EXTRACT_get_vextract128_imm VR128X:$ext)))>;
285
286
287def : Pat<(vextract256_extract:$ext (v16f32 VR512:$src1), (iPTR imm)),
288          (v8f32 (VEXTRACTF64x4rr VR512:$src1,
289                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
290
291def : Pat<(vextract256_extract:$ext (v16i32 VR512:$src1), (iPTR imm)),
292          (v8i32 (VEXTRACTI64x4rr VR512:$src1,
293                    (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
294
295def : Pat<(vextract256_extract:$ext (v8f64 VR512:$src1), (iPTR imm)),
296          (v4f64 (VEXTRACTF64x4rr VR512:$src1,
297                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
298
299def : Pat<(vextract256_extract:$ext (v8i64 VR512:$src1), (iPTR imm)),
300          (v4i64 (VEXTRACTI64x4rr VR512:$src1,
301                  (EXTRACT_get_vextract256_imm VR256X:$ext)))>;
302
303// A 256-bit subvector extract from the first 512-bit vector position
304// is a subregister copy that needs no instruction.
305def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
306          (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
307def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
308          (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
309def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
310          (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
311def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
312          (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
313
314// zmm -> xmm
315def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
316          (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
317def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
318          (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
319def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
320          (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
321def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
322          (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
323
324
325// A 128-bit subvector insert to the first 512-bit vector position
326// is a subregister copy that needs no instruction.
327def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
328          (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
329          (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
330          sub_ymm)>;
331def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
332          (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
333          (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
334          sub_ymm)>;
335def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
336          (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
337          (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
338          sub_ymm)>;
339def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
340          (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
341          (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
342          sub_ymm)>;
343
344def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
345          (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
346def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
347          (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
348def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
349          (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
350def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
351          (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
352
353// vextractps - extract 32 bits from XMM
354def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
355      (ins VR128X:$src1, u32u8imm:$src2),
356      "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
357      [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
358      EVEX;
359
360def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
361      (ins f32mem:$dst, VR128X:$src1, u32u8imm:$src2),
362      "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
363      [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
364                          addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
365
366//===---------------------------------------------------------------------===//
367// AVX-512 BROADCAST
368//---
369multiclass avx512_fp_broadcast<bits<8> opc, string OpcodeStr, 
370                         RegisterClass DestRC,
371                         RegisterClass SrcRC, X86MemOperand x86memop> {
372  def rr : AVX5128I<opc, MRMSrcReg, (outs DestRC:$dst), (ins SrcRC:$src),
373         !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
374         []>, EVEX;
375  def rm : AVX5128I<opc, MRMSrcMem, (outs DestRC:$dst), (ins x86memop:$src),
376        !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),[]>, EVEX;
377}
378let ExeDomain = SSEPackedSingle in {
379  defm VBROADCASTSSZ  : avx512_fp_broadcast<0x18, "vbroadcastss", VR512,
380                                       VR128X, f32mem>,
381                                       EVEX_V512, EVEX_CD8<32, CD8VT1>;
382}
383
384let ExeDomain = SSEPackedDouble in {
385  defm VBROADCASTSDZ  : avx512_fp_broadcast<0x19, "vbroadcastsd", VR512,
386                                       VR128X, f64mem>,
387                                       EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
388}
389
390def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
391          (VBROADCASTSSZrm addr:$src)>;
392def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
393          (VBROADCASTSDZrm addr:$src)>;
394
395def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
396          (VBROADCASTSSZrm addr:$src)>;
397def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
398          (VBROADCASTSDZrm addr:$src)>;
399
400multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
401                          RegisterClass SrcRC, RegisterClass KRC> {
402  def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
403                   !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
404                   []>, EVEX, EVEX_V512;
405  def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), 
406                   (ins KRC:$mask, SrcRC:$src),
407                   !strconcat(OpcodeStr, 
408                        " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
409                   []>, EVEX, EVEX_V512, EVEX_KZ;
410}
411
412defm VPBROADCASTDr  : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
413defm VPBROADCASTQr  : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
414                                            VEX_W;
415                                            
416def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
417           (VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
418
419def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
420           (VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
421
422def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
423        (VPBROADCASTDrZrr GR32:$src)>;
424def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
425        (VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
426def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
427        (VPBROADCASTQrZrr GR64:$src)>;
428def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
429        (VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
430
431def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
432        (VPBROADCASTDrZrr GR32:$src)>;
433def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
434        (VPBROADCASTQrZrr GR64:$src)>;
435
436def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
437                   (v16i32 immAllZerosV), (i16 GR16:$mask))),
438          (VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
439def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
440                   (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
441          (VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
442
443multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
444                          X86MemOperand x86memop, PatFrag ld_frag,
445                          RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
446                          RegisterClass KRC> {
447  def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
448                  !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
449                  [(set DstRC:$dst,
450                    (OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
451  def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
452                                                         VR128X:$src),
453                    !strconcat(OpcodeStr, 
454                    " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
455                    [(set DstRC:$dst,
456                      (OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
457                    EVEX, EVEX_KZ;
458  let mayLoad = 1 in {
459  def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
460                  !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
461                  [(set DstRC:$dst, 
462                    (OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
463  def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
464                                                         x86memop:$src),
465                  !strconcat(OpcodeStr, 
466                      " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
467                  [(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask, 
468                                     (ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
469  }
470}
471
472defm VPBROADCASTDZ  : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
473                      loadi32, VR512, v16i32, v4i32, VK16WM>,
474                      EVEX_V512, EVEX_CD8<32, CD8VT1>;
475defm VPBROADCASTQZ  : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
476                      loadi64, VR512, v8i64, v2i64, VK8WM>,  EVEX_V512, VEX_W,
477                      EVEX_CD8<64, CD8VT1>;
478
479def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
480          (VPBROADCASTDZrr VR128X:$src)>;
481def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
482          (VPBROADCASTQZrr VR128X:$src)>;
483
484def : Pat<(v16f32 (X86VBroadcast (v4f32 VR128X:$src))),
485          (VBROADCASTSSZrr VR128X:$src)>;
486def : Pat<(v8f64 (X86VBroadcast (v2f64 VR128X:$src))),
487          (VBROADCASTSDZrr VR128X:$src)>;
488
489def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
490          (VBROADCASTSSZrr VR128X:$src)>;
491def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
492          (VBROADCASTSDZrr VR128X:$src)>;
493    
494// Provide fallback in case the load node that is used in the patterns above
495// is used by additional users, which prevents the pattern selection.
496def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
497          (VBROADCASTSSZrr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
498def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
499          (VBROADCASTSDZrr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
500
501
502let Predicates = [HasAVX512] in {
503def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
504           (EXTRACT_SUBREG 
505              (v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
506                       addr:$src)), sub_ymm)>;
507}
508//===----------------------------------------------------------------------===//
509// AVX-512 BROADCAST MASK TO VECTOR REGISTER
510//---
511
512multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
513                       RegisterClass DstRC, RegisterClass KRC,
514                       ValueType OpVT, ValueType SrcVT> {
515def rr : AVX512XS8I<opc, MRMDestReg, (outs DstRC:$dst), (ins KRC:$src),
516                  !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
517                  []>, EVEX;
518}
519
520defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d", VR512,
521                                             VK16, v16i32, v16i1>, EVEX_V512;
522defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q", VR512,
523                                            VK8, v8i64, v8i1>, EVEX_V512, VEX_W;
524
525//===----------------------------------------------------------------------===//
526// AVX-512 - VPERM
527//
528// -- immediate form --
529multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
530                         SDNode OpNode, PatFrag mem_frag, 
531                         X86MemOperand x86memop, ValueType OpVT> {
532  def ri : AVX512AIi8<opc, MRMSrcReg, (outs RC:$dst),
533                     (ins RC:$src1, i8imm:$src2),
534                     !strconcat(OpcodeStr,
535                         " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
536                     [(set RC:$dst,
537                       (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
538                     EVEX;
539  def mi : AVX512AIi8<opc, MRMSrcMem, (outs RC:$dst),
540                     (ins x86memop:$src1, i8imm:$src2),
541                     !strconcat(OpcodeStr,
542                         " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
543                     [(set RC:$dst,
544                       (OpVT (OpNode (mem_frag addr:$src1),
545                              (i8 imm:$src2))))]>, EVEX;
546}
547
548defm VPERMQZ  : avx512_perm_imm<0x00, "vpermq", VR512, X86VPermi, memopv8i64,
549                        i512mem, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
550let ExeDomain = SSEPackedDouble in 
551defm VPERMPDZ  : avx512_perm_imm<0x01, "vpermpd", VR512, X86VPermi, memopv8f64, 
552                        f512mem, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
553
554// -- VPERM - register form --
555multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC, 
556                     PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
557
558  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
559                   (ins RC:$src1, RC:$src2),
560                   !strconcat(OpcodeStr,
561                       " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
562                   [(set RC:$dst,
563                     (OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
564
565  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
566                   (ins RC:$src1, x86memop:$src2),
567                   !strconcat(OpcodeStr,
568                       " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
569                   [(set RC:$dst,
570                     (OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
571                     EVEX_4V;
572}
573
574defm VPERMDZ   : avx512_perm<0x36, "vpermd",  VR512,  memopv16i32, i512mem,
575                           v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
576defm VPERMQZ   : avx512_perm<0x36, "vpermq",  VR512,  memopv8i64,  i512mem, 
577                           v8i64>,  EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
578let ExeDomain = SSEPackedSingle in
579defm VPERMPSZ  : avx512_perm<0x16, "vpermps", VR512,  memopv16f32, f512mem,
580                           v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
581let ExeDomain = SSEPackedDouble in
582defm VPERMPDZ  : avx512_perm<0x16, "vpermpd", VR512,  memopv8f64, f512mem, 
583                           v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
584
585// -- VPERM2I - 3 source operands form --
586multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
587                          PatFrag mem_frag, X86MemOperand x86memop,
588                          SDNode OpNode, ValueType OpVT> {
589let Constraints = "$src1 = $dst" in {
590  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
591                   (ins RC:$src1, RC:$src2, RC:$src3),
592                   !strconcat(OpcodeStr,
593                       " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
594                   [(set RC:$dst,
595                     (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
596                    EVEX_4V;
597
598  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
599                   (ins RC:$src1, RC:$src2, x86memop:$src3),
600                   !strconcat(OpcodeStr,
601                    " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
602                   [(set RC:$dst,
603                     (OpVT (OpNode RC:$src1, RC:$src2, 
604                      (mem_frag addr:$src3))))]>, EVEX_4V;
605  }
606}
607defm VPERMI2D  : avx512_perm_3src<0x76, "vpermi2d",  VR512, memopv16i32, i512mem, 
608                               X86VPermiv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
609defm VPERMI2Q  : avx512_perm_3src<0x76, "vpermi2q",  VR512, memopv8i64, i512mem, 
610                               X86VPermiv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
611defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps",  VR512, memopv16f32, i512mem, 
612                               X86VPermiv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
613defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd",  VR512, memopv8f64, i512mem, 
614                               X86VPermiv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
615
616defm VPERMT2D  : avx512_perm_3src<0x7E, "vpermt2d",  VR512, memopv16i32, i512mem, 
617                               X86VPermv3, v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
618defm VPERMT2Q  : avx512_perm_3src<0x7E, "vpermt2q",  VR512, memopv8i64, i512mem, 
619                               X86VPermv3, v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
620defm VPERMT2PS : avx512_perm_3src<0x7F, "vpermt2ps",  VR512, memopv16f32, i512mem, 
621                               X86VPermv3, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
622defm VPERMT2PD : avx512_perm_3src<0x7F, "vpermt2pd",  VR512, memopv8f64, i512mem, 
623                               X86VPermv3, v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
624
625def : Pat<(v16f32 (int_x86_avx512_mask_vpermt_ps_512 (v16i32 VR512:$idx),
626                   (v16f32 VR512:$src1), (v16f32 VR512:$src2), (i16 -1))),
627          (VPERMT2PSrr VR512:$src1, VR512:$idx, VR512:$src2)>;
628
629def : Pat<(v16i32 (int_x86_avx512_mask_vpermt_d_512 (v16i32 VR512:$idx),
630                   (v16i32 VR512:$src1), (v16i32 VR512:$src2), (i16 -1))),
631          (VPERMT2Drr VR512:$src1, VR512:$idx, VR512:$src2)>;
632
633def : Pat<(v8f64 (int_x86_avx512_mask_vpermt_pd_512 (v8i64 VR512:$idx),
634                   (v8f64 VR512:$src1), (v8f64 VR512:$src2), (i8 -1))),
635          (VPERMT2PDrr VR512:$src1, VR512:$idx, VR512:$src2)>;
636
637def : Pat<(v8i64 (int_x86_avx512_mask_vpermt_q_512 (v8i64 VR512:$idx),
638                   (v8i64 VR512:$src1), (v8i64 VR512:$src2), (i8 -1))),
639          (VPERMT2Qrr VR512:$src1, VR512:$idx, VR512:$src2)>;
640//===----------------------------------------------------------------------===//
641// AVX-512 - BLEND using mask
642//
643multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
644                          RegisterClass KRC, RegisterClass RC,
645                          X86MemOperand x86memop, PatFrag mem_frag,
646                          SDNode OpNode, ValueType vt> {
647  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
648             (ins KRC:$mask, RC:$src1, RC:$src2),
649             !strconcat(OpcodeStr,
650             " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
651             [(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
652                 (vt RC:$src1)))]>, EVEX_4V, EVEX_K;
653  let mayLoad = 1 in
654  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
655             (ins KRC:$mask, RC:$src1, x86memop:$src2),
656             !strconcat(OpcodeStr,
657             " \t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
658             []>, EVEX_4V, EVEX_K;
659}
660
661let ExeDomain = SSEPackedSingle in
662defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps", 
663                              VK16WM, VR512, f512mem,
664                              memopv16f32, vselect, v16f32>, 
665                              EVEX_CD8<32, CD8VF>, EVEX_V512;
666let ExeDomain = SSEPackedDouble in
667defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd", 
668                              VK8WM, VR512, f512mem,
669                              memopv8f64, vselect, v8f64>, 
670                              VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
671
672def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
673                 (v16f32 VR512:$src2), (i16 GR16:$mask))),
674        (VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
675         VR512:$src1, VR512:$src2)>;
676
677def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
678                 (v8f64 VR512:$src2), (i8 GR8:$mask))),
679        (VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
680         VR512:$src1, VR512:$src2)>;
681
682defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd", 
683                              VK16WM, VR512, f512mem, 
684                              memopv16i32, vselect, v16i32>, 
685                              EVEX_CD8<32, CD8VF>, EVEX_V512;
686
687defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq", 
688                              VK8WM, VR512, f512mem, 
689                              memopv8i64, vselect, v8i64>, 
690                              VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
691
692def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
693                 (v16i32 VR512:$src2), (i16 GR16:$mask))),
694        (VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
695         VR512:$src1, VR512:$src2)>;
696
697def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
698                 (v8i64 VR512:$src2), (i8 GR8:$mask))),
699        (VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
700         VR512:$src1, VR512:$src2)>;
701
702let Predicates = [HasAVX512] in {
703def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
704                            (v8f32 VR256X:$src2))),
705            (EXTRACT_SUBREG 
706              (v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), 
707            (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
708            (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
709
710def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
711                            (v8i32 VR256X:$src2))),
712            (EXTRACT_SUBREG 
713                (v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM), 
714            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
715            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
716}
717//===----------------------------------------------------------------------===//
718// Compare Instructions
719//===----------------------------------------------------------------------===//
720
721// avx512_cmp_scalar - AVX512 CMPSS and CMPSD
722multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
723                            Operand CC, SDNode OpNode, ValueType VT,
724                            PatFrag ld_frag, string asm, string asm_alt> {
725  def rr : AVX512Ii8<0xC2, MRMSrcReg,
726                (outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
727                [(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
728                IIC_SSE_ALU_F32S_RR>, EVEX_4V;
729  def rm : AVX512Ii8<0xC2, MRMSrcMem,
730                (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
731                [(set VK1:$dst, (OpNode (VT RC:$src1),
732                (ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
733  let isAsmParserOnly = 1, hasSideEffects = 0 in {
734    def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
735               (outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
736               asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
737    def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
738               (outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
739               asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
740  }
741}
742
743let Predicates = [HasAVX512] in {
744defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
745                 "vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
746                 "vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
747                 XS;
748defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
749                 "vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
750                 "vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
751                 XD, VEX_W;
752}
753
754multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, RegisterClass KRC, 
755              RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag, 
756              SDNode OpNode, ValueType vt> {
757  def rr : AVX512BI<opc, MRMSrcReg,
758             (outs KRC:$dst), (ins RC:$src1, RC:$src2), 
759             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
760             [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))], 
761             IIC_SSE_ALU_F32P_RR>, EVEX_4V;
762  def rm : AVX512BI<opc, MRMSrcMem,
763             (outs KRC:$dst), (ins RC:$src1, x86memop:$src2), 
764             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
765             [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2)))],
766             IIC_SSE_ALU_F32P_RM>, EVEX_4V;
767}
768
769defm VPCMPEQDZ : avx512_icmp_packed<0x76, "vpcmpeqd", VK16, VR512, i512mem, 
770                           memopv16i32, X86pcmpeqm, v16i32>, EVEX_V512,
771                           EVEX_CD8<32, CD8VF>;
772defm VPCMPEQQZ : avx512_icmp_packed<0x29, "vpcmpeqq", VK8, VR512, i512mem, 
773                           memopv8i64, X86pcmpeqm, v8i64>, T8PD, EVEX_V512,
774                           VEX_W, EVEX_CD8<64, CD8VF>;
775
776defm VPCMPGTDZ : avx512_icmp_packed<0x66, "vpcmpgtd", VK16, VR512, i512mem, 
777                           memopv16i32, X86pcmpgtm, v16i32>, EVEX_V512,
778                           EVEX_CD8<32, CD8VF>;
779defm VPCMPGTQZ : avx512_icmp_packed<0x37, "vpcmpgtq", VK8, VR512, i512mem, 
780                           memopv8i64, X86pcmpgtm, v8i64>, T8PD, EVEX_V512,
781                           VEX_W, EVEX_CD8<64, CD8VF>;
782
783def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
784            (COPY_TO_REGCLASS (VPCMPGTDZrr 
785            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
786            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
787
788def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
789            (COPY_TO_REGCLASS (VPCMPEQDZrr 
790            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
791            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
792
793multiclass avx512_icmp_cc<bits<8> opc, RegisterClass KRC,
794              RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag, 
795              SDNode OpNode, ValueType vt, Operand CC, string asm,
796              string asm_alt> {
797  def rri : AVX512AIi8<opc, MRMSrcReg,
798             (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
799             [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))], 
800             IIC_SSE_ALU_F32P_RR>, EVEX_4V;
801  def rmi : AVX512AIi8<opc, MRMSrcMem,
802             (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
803             [(set KRC:$dst, (OpNode (vt RC:$src1), (memop_frag addr:$src2),
804                              imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
805  // Accept explicit immediate argument form instead of comparison code.
806  let isAsmParserOnly = 1, hasSideEffects = 0 in {
807    def rri_alt : AVX512AIi8<opc, MRMSrcReg,
808               (outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
809               asm_alt, [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
810    def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
811               (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
812               asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
813  }
814}
815
816defm VPCMPDZ : avx512_icmp_cc<0x1F, VK16, VR512, i512mem, memopv16i32,
817                              X86cmpm, v16i32, AVXCC,
818              "vpcmp${cc}d\t{$src2, $src1, $dst|$dst, $src1, $src2}",
819              "vpcmpd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
820              EVEX_V512, EVEX_CD8<32, CD8VF>;
821defm VPCMPUDZ : avx512_icmp_cc<0x1E, VK16, VR512, i512mem, memopv16i32,
822                               X86cmpmu, v16i32, AVXCC,
823              "vpcmp${cc}ud\t{$src2, $src1, $dst|$dst, $src1, $src2}",
824              "vpcmpud\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
825              EVEX_V512, EVEX_CD8<32, CD8VF>;
826
827defm VPCMPQZ : avx512_icmp_cc<0x1F, VK8, VR512, i512mem, memopv8i64,
828                              X86cmpm, v8i64, AVXCC,
829              "vpcmp${cc}q\t{$src2, $src1, $dst|$dst, $src1, $src2}",
830              "vpcmpq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
831              VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
832defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64,
833                               X86cmpmu, v8i64, AVXCC,
834              "vpcmp${cc}uq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
835              "vpcmpuq\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
836              VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
837
838// avx512_cmp_packed - sse 1 & 2 compare packed instructions
839multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
840                           X86MemOperand x86memop, ValueType vt,
841                           string suffix, Domain d> {
842  def rri : AVX512PIi8<0xC2, MRMSrcReg,
843             (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
844             !strconcat("vcmp${cc}", suffix,
845                        " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
846             [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
847  def rrib: AVX512PIi8<0xC2, MRMSrcReg,
848             (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
849     !strconcat("vcmp${cc}", suffix,
850                " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
851                [], d>, EVEX_B;
852  def rmi : AVX512PIi8<0xC2, MRMSrcMem,
853             (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
854              !strconcat("vcmp${cc}", suffix,
855                         " \t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
856             [(set KRC:$dst,
857              (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
858
859  // Accept explicit immediate argument form instead of comparison code.
860  let isAsmParserOnly = 1, hasSideEffects = 0 in {
861    def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
862               (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
863              !strconcat("vcmp", suffix,
864                        " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
865    def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
866               (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
867              !strconcat("vcmp", suffix,
868                        " \t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
869  }
870}
871
872defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
873               "ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
874               EVEX_CD8<32, CD8VF>;
875defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
876               "pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
877               EVEX_CD8<64, CD8VF>;
878
879def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
880          (COPY_TO_REGCLASS (VCMPPSZrri
881            (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
882            (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
883            imm:$cc), VK8)>;
884def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
885          (COPY_TO_REGCLASS (VPCMPDZrri
886            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
887            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
888            imm:$cc), VK8)>;
889def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
890          (COPY_TO_REGCLASS (VPCMPUDZrri
891            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
892            (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
893            imm:$cc), VK8)>;
894
895def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
896                (v16f32 VR512:$src2), imm:$cc, (i16 -1),
897                 FROUND_NO_EXC)),
898          (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
899                             (I8Imm imm:$cc)), GR16)>;
900           
901def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
902                (v8f64 VR512:$src2), imm:$cc, (i8 -1),
903                 FROUND_NO_EXC)),
904          (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
905                             (I8Imm imm:$cc)), GR8)>;
906
907def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
908                (v16f32 VR512:$src2), imm:$cc, (i16 -1),
909                FROUND_CURRENT)),
910          (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
911                             (I8Imm imm:$cc)), GR16)>;
912
913def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
914                (v8f64 VR512:$src2), imm:$cc, (i8 -1),
915                 FROUND_CURRENT)),
916          (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
917                             (I8Imm imm:$cc)), GR8)>;
918
919// Mask register copy, including
920// - copy between mask registers
921// - load/store mask registers
922// - copy from GPR to mask register and vice versa
923//
924multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
925                         string OpcodeStr, RegisterClass KRC,
926                         ValueType vt, X86MemOperand x86memop> {
927  let hasSideEffects = 0 in {
928    def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
929               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
930    let mayLoad = 1 in
931    def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
932               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
933               [(set KRC:$dst, (vt (load addr:$src)))]>;
934    let mayStore = 1 in
935    def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
936               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
937  }
938}
939
940multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
941                             string OpcodeStr,
942                             RegisterClass KRC, RegisterClass GRC> {
943  let hasSideEffects = 0 in {
944    def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
945               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
946    def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
947               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"), []>;
948  }
949}
950
951let Predicates = [HasAVX512] in {
952  defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
953               VEX, PS;
954  defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
955               VEX, PS;
956}
957
958let Predicates = [HasAVX512] in {
959  // GR16 from/to 16-bit mask
960  def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
961            (KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
962  def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
963            (EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
964
965  // Store kreg in memory
966  def : Pat<(store (v16i1 VK16:$src), addr:$dst),
967            (KMOVWmk addr:$dst, VK16:$src)>;
968
969  def : Pat<(store VK8:$src, addr:$dst),
970            (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
971
972  def : Pat<(i1 (load addr:$src)),
973            (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
974
975  def : Pat<(v8i1 (load addr:$src)),
976            (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
977
978  def : Pat<(i1 (trunc (i32 GR32:$src))),
979            (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
980
981  def : Pat<(i1 (trunc (i8 GR8:$src))),
982       (COPY_TO_REGCLASS
983        (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
984       VK1)>;
985  def : Pat<(i1 (trunc (i16 GR16:$src))),
986       (COPY_TO_REGCLASS
987        (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
988       VK1)>;
989            
990  def : Pat<(i32 (zext VK1:$src)),
991            (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
992  def : Pat<(i8 (zext VK1:$src)),
993            (EXTRACT_SUBREG
994             (AND32ri (KMOVWrk
995                       (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
996  def : Pat<(i64 (zext VK1:$src)),
997            (AND64ri8 (SUBREG_TO_REG (i64 0),
998             (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
999  def : Pat<(i16 (zext VK1:$src)),
1000            (EXTRACT_SUBREG
1001             (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
1002              sub_16bit)>;
1003  def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
1004            (COPY_TO_REGCLASS VK1:$src, VK16)>;
1005  def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
1006            (COPY_TO_REGCLASS VK1:$src, VK8)>;
1007}
1008// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1009let Predicates = [HasAVX512] in {
1010  // GR from/to 8-bit mask without native support
1011  def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
1012            (COPY_TO_REGCLASS
1013              (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
1014              VK8)>;
1015  def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
1016            (EXTRACT_SUBREG
1017              (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
1018              sub_8bit)>;
1019
1020  def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
1021            (COPY_TO_REGCLASS VK16:$src, VK1)>;
1022  def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
1023            (COPY_TO_REGCLASS VK8:$src, VK1)>;
1024
1025}
1026
1027// Mask unary operation
1028// - KNOT
1029multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
1030                         RegisterClass KRC, SDPatternOperator OpNode> {
1031  let Predicates = [HasAVX512] in
1032    def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
1033               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
1034               [(set KRC:$dst, (OpNode KRC:$src))]>;
1035}
1036
1037multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
1038                               SDPatternOperator OpNode> {
1039  defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1040                          VEX, PS;
1041}
1042
1043defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
1044
1045multiclass avx512_mask_unop_int<string IntName, string InstName> {
1046  let Predicates = [HasAVX512] in
1047    def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1048                (i16 GR16:$src)),
1049              (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1050              (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
1051}
1052defm : avx512_mask_unop_int<"knot", "KNOT">;
1053
1054def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
1055def : Pat<(xor VK8:$src1,  (v8i1 immAllOnesV)),
1056          (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
1057
1058// With AVX-512, 8-bit mask is promoted to 16-bit mask.
1059def : Pat<(not VK8:$src),
1060          (COPY_TO_REGCLASS
1061            (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
1062
1063// Mask binary operation
1064// - KAND, KANDN, KOR, KXNOR, KXOR
1065multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
1066                           RegisterClass KRC, SDPatternOperator OpNode> {
1067  let Predicates = [HasAVX512] in
1068    def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1069               !strconcat(OpcodeStr,
1070                          " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1071               [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
1072}
1073
1074multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
1075                             SDPatternOperator OpNode> {
1076  defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1077                           VEX_4V, VEX_L, PS;
1078}
1079
1080def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
1081def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
1082
1083let isCommutable = 1 in {
1084  defm KAND  : avx512_mask_binop_w<0x41, "kand",  and>;
1085  let isCommutable = 0 in
1086  defm KANDN : avx512_mask_binop_w<0x42, "kandn", andn>;
1087  defm KOR   : avx512_mask_binop_w<0x45, "kor",   or>;
1088  defm KXNOR : avx512_mask_binop_w<0x46, "kxnor", xnor>;
1089  defm KXOR  : avx512_mask_binop_w<0x47, "kxor",  xor>;
1090}
1091
1092def : Pat<(xor VK1:$src1, VK1:$src2),
1093     (COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1094                                (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1095
1096def : Pat<(or VK1:$src1, VK1:$src2),
1097     (COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1098                               (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1099
1100def : Pat<(and VK1:$src1, VK1:$src2),
1101     (COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1102                                (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
1103
1104multiclass avx512_mask_binop_int<string IntName, string InstName> {
1105  let Predicates = [HasAVX512] in
1106    def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
1107                (i16 GR16:$src1), (i16 GR16:$src2)),
1108              (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
1109              (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1110              (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1111}
1112
1113defm : avx512_mask_binop_int<"kand",  "KAND">;
1114defm : avx512_mask_binop_int<"kandn", "KANDN">;
1115defm : avx512_mask_binop_int<"kor",   "KOR">;
1116defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
1117defm : avx512_mask_binop_int<"kxor",  "KXOR">;
1118
1119// With AVX-512, 8-bit mask is promoted to 16-bit mask.
1120multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
1121  let Predicates = [HasAVX512] in
1122    def : Pat<(OpNode VK8:$src1, VK8:$src2),
1123              (COPY_TO_REGCLASS
1124                (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
1125                      (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
1126}
1127
1128defm : avx512_binop_pat<and,  KANDWrr>;
1129defm : avx512_binop_pat<andn, KANDNWrr>;
1130defm : avx512_binop_pat<or,   KORWrr>;
1131defm : avx512_binop_pat<xnor, KXNORWrr>;
1132defm : avx512_binop_pat<xor,  KXORWrr>;
1133
1134// Mask unpacking
1135multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
1136                           RegisterClass KRC> {
1137  let Predicates = [HasAVX512] in
1138    def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
1139               !strconcat(OpcodeStr,
1140                          " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1141}
1142
1143multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
1144  defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
1145                            VEX_4V, VEX_L, PD;
1146}
1147
1148defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
1149def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
1150          (KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
1151                  (COPY_TO_REGCLASS VK8:$src1, VK16))>;
1152
1153
1154multiclass avx512_mask_unpck_int<string IntName, string InstName> {
1155  let Predicates = [HasAVX512] in
1156    def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
1157                (i16 GR16:$src1), (i16 GR16:$src2)),
1158              (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
1159              (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
1160              (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
1161}
1162defm : avx512_mask_unpck_int<"kunpck",  "KUNPCK">;
1163
1164// Mask bit testing
1165multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1166                            SDNode OpNode> {
1167  let Predicates = [HasAVX512], Defs = [EFLAGS] in
1168    def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
1169               !strconcat(OpcodeStr, " \t{$src2, $src1|$src1, $src2}"),
1170               [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
1171}
1172
1173multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1174  defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1175                            VEX, PS;
1176}
1177
1178defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
1179
1180def : Pat<(X86cmp VK1:$src1, (i1 0)),
1181          (KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
1182           (COPY_TO_REGCLASS VK1:$src1, VK16))>;
1183
1184// Mask shift
1185multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
1186                             SDNode OpNode> {
1187  let Predicates = [HasAVX512] in
1188    def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
1189                 !strconcat(OpcodeStr,
1190                            " \t{$imm, $src, $dst|$dst, $src, $imm}"),
1191                            [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
1192}
1193
1194multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
1195                               SDNode OpNode> {
1196  defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
1197                             VEX, TAPD, VEX_W;
1198}
1199
1200defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
1201defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
1202
1203// Mask setting all 0s or 1s
1204multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
1205  let Predicates = [HasAVX512] in
1206    let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
1207      def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
1208                     [(set KRC:$dst, (VT Val))]>;
1209}
1210
1211multiclass avx512_mask_setop_w<PatFrag Val> {
1212  defm B : avx512_mask_setop<VK8,   v8i1, Val>;
1213  defm W : avx512_mask_setop<VK16, v16i1, Val>;
1214}
1215
1216defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
1217defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
1218
1219// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
1220let Predicates = [HasAVX512] in {
1221  def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
1222  def : Pat<(v8i1 immAllOnesV),  (COPY_TO_REGCLASS (KSET1W), VK8)>;
1223  def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
1224  def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1225  def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
1226}
1227def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
1228          (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
1229
1230def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
1231          (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
1232
1233def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
1234          (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
1235
1236def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
1237          (v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1238
1239def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
1240          (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
1241//===----------------------------------------------------------------------===//
1242// AVX-512 - Aligned and unaligned load and store
1243//
1244
1245multiclass avx512_load<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1246                            X86MemOperand x86memop, PatFrag ld_frag, 
1247                            string asm, Domain d,
1248                            ValueType vt, bit IsReMaterializable = 1> {
1249let hasSideEffects = 0 in {
1250  def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
1251              !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1252              EVEX;
1253  def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
1254               !strconcat(asm,
1255               " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1256               [], d>, EVEX, EVEX_KZ;
1257  }
1258  let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
1259  def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1260              !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1261               [(set (vt RC:$dst), (ld_frag addr:$src))], d>, EVEX;
1262  let Constraints = "$src1 = $dst",  hasSideEffects = 0 in {
1263  def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), 
1264                                     (ins RC:$src1, KRC:$mask, RC:$src2),
1265              !strconcat(asm, 
1266              " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1267              EVEX, EVEX_K;
1268  let mayLoad = 1 in
1269  def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1270                                (ins RC:$src1, KRC:$mask, x86memop:$src2),
1271              !strconcat(asm, 
1272              " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
1273               [], d>, EVEX, EVEX_K;
1274  }
1275  let mayLoad = 1 in
1276  def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
1277                      (ins KRC:$mask, x86memop:$src2),
1278              !strconcat(asm,
1279              " \t{$src2, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src2}"),
1280               [], d>, EVEX, EVEX_KZ;
1281}
1282
1283multiclass avx512_store<bits<8> opc, RegisterClass RC, RegisterClass KRC,
1284                            X86MemOperand x86memop, PatFrag store_frag,
1285                            string asm, Domain d, ValueType vt> {
1286  let isAsmParserOnly = 1, hasSideEffects = 0 in {
1287  def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
1288              !strconcat(asm, " \t{$src, $dst|$dst, $src}"), [], d>,
1289              EVEX;
1290  let Constraints = "$src1 = $dst" in
1291  def alt_rrk : AVX512PI<opc, MRMDestReg, (outs  RC:$dst),
1292                                     (ins RC:$src1, KRC:$mask, RC:$src2),
1293              !strconcat(asm,
1294              " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
1295              EVEX, EVEX_K;
1296  def alt_rrkz : AVX512PI<opc, MRMDestReg, (outs  RC:$dst),
1297                                           (ins KRC:$mask, RC:$src),
1298              !strconcat(asm,
1299              " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1300              [], d>, EVEX, EVEX_KZ;
1301  }
1302  let mayStore = 1 in {
1303  def mr : AVX512PI<opc, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1304              !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1305               [(store_frag (vt RC:$src), addr:$dst)], d>, EVEX;
1306  def mrk : AVX512PI<opc, MRMDestMem, (outs),
1307                                (ins x86memop:$dst, KRC:$mask, RC:$src),
1308              !strconcat(asm,
1309              " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
1310               [], d>, EVEX, EVEX_K;
1311  def mrkz : AVX512PI<opc, MRMDestMem, (outs),
1312                      (ins x86memop:$dst, KRC:$mask, RC:$src),
1313              !strconcat(asm,
1314              " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
1315               [], d>, EVEX, EVEX_KZ;
1316  }
1317}
1318
1319defm VMOVAPSZ : avx512_load<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
1320                              "vmovaps", SSEPackedSingle, v16f32>,
1321                avx512_store<0x29, VR512, VK16WM, f512mem, alignedstore512,
1322                              "vmovaps", SSEPackedSingle, v16f32>,
1323                               PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1324defm VMOVAPDZ : avx512_load<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
1325                              "vmovapd", SSEPackedDouble, v8f64>,
1326                avx512_store<0x29, VR512, VK8WM, f512mem, alignedstore512,
1327                              "vmovapd", SSEPackedDouble, v8f64>,
1328                              PD, EVEX_V512, VEX_W,
1329                              EVEX_CD8<64, CD8VF>;
1330defm VMOVUPSZ : avx512_load<0x10, VR512, VK16WM, f512mem, loadv16f32,
1331                              "vmovups", SSEPackedSingle, v16f32>,
1332                avx512_store<0x11, VR512, VK16WM, f512mem, store,
1333                              "vmovups", SSEPackedSingle, v16f32>,
1334                              PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1335defm VMOVUPDZ : avx512_load<0x10, VR512, VK8WM, f512mem, loadv8f64,
1336                              "vmovupd", SSEPackedDouble, v8f64, 0>,
1337                avx512_store<0x11, VR512, VK8WM, f512mem, store,
1338                              "vmovupd", SSEPackedDouble, v8f64>,
1339                               PD, EVEX_V512, VEX_W,
1340                               EVEX_CD8<64, CD8VF>;
1341def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
1342                 (bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
1343       (VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1344
1345def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
1346                 (bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
1347       (VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1348
1349def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
1350          GR16:$mask),
1351         (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1352            VR512:$src)>;
1353def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
1354          GR8:$mask),
1355         (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1356            VR512:$src)>;
1357
1358defm VMOVDQA32: avx512_load<0x6F, VR512, VK16WM, i512mem, alignedloadv16i32,
1359                              "vmovdqa32", SSEPackedInt, v16i32>,
1360                avx512_store<0x7F, VR512, VK16WM, i512mem, alignedstore512,
1361                              "vmovdqa32", SSEPackedInt, v16i32>,
1362                               PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1363defm VMOVDQA64: avx512_load<0x6F, VR512, VK8WM, i512mem, alignedloadv8i64,
1364                              "vmovdqa64", SSEPackedInt, v8i64>,
1365                avx512_store<0x7F, VR512, VK8WM, i512mem, alignedstore512,
1366                              "vmovdqa64", SSEPackedInt, v8i64>,
1367                               PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1368defm VMOVDQU32: avx512_load<0x6F, VR512, VK16WM, i512mem, load,
1369                              "vmovdqu32", SSEPackedInt, v16i32>,
1370                avx512_store<0x7F, VR512, VK16WM, i512mem, store,
1371                              "vmovdqu32", SSEPackedInt, v16i32>,
1372                               XS, EVEX_V512, EVEX_CD8<32, CD8VF>;
1373defm VMOVDQU64: avx512_load<0x6F, VR512, VK8WM, i512mem, load,
1374                              "vmovdqu64", SSEPackedInt, v8i64>,
1375                avx512_store<0x7F, VR512, VK8WM, i512mem, store,
1376                              "vmovdqu64", SSEPackedInt, v8i64>,
1377                               XS, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
1378
1379def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
1380                 (v16i32 immAllZerosV), GR16:$mask)),
1381       (VMOVDQU32rmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
1382
1383def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
1384                 (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
1385       (VMOVDQU64rmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
1386
1387def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
1388          GR16:$mask),
1389         (VMOVDQU32mrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
1390            VR512:$src)>;
1391def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
1392          GR8:$mask),
1393         (VMOVDQU64mrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
1394            VR512:$src)>;
1395
1396let AddedComplexity = 20 in {
1397def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
1398                           (bc_v8i64 (v16i32 immAllZerosV)))),
1399                  (VMOVDQU64rrkz VK8WM:$mask, VR512:$src)>;
1400
1401def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
1402                  (v8i64 VR512:$src))),
1403   (VMOVDQU64rrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
1404                                              VK8), VR512:$src)>;
1405
1406def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
1407                           (v16i32 immAllZerosV))),
1408                  (VMOVDQU32rrkz VK16WM:$mask, VR512:$src)>;
1409
1410def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
1411                   (v16i32 VR512:$src))),
1412   (VMOVDQU32rrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
1413                                              
1414def : Pat<(v16f32 (vselect VK16WM:$mask, (v16f32 VR512:$src1), 
1415                           (v16f32 VR512:$src2))),
1416                  (VMOVUPSZrrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1417def : Pat<(v8f64 (vselect VK8WM:$mask, (v8f64 VR512:$src1), 
1418                           (v8f64 VR512:$src2))),
1419                  (VMOVUPDZrrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1420def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src1), 
1421                           (v16i32 VR512:$src2))),
1422                  (VMOVDQU32rrk VR512:$src2, VK16WM:$mask, VR512:$src1)>;
1423def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src1), 
1424                           (v8i64 VR512:$src2))),
1425                  (VMOVDQU64rrk VR512:$src2, VK8WM:$mask, VR512:$src1)>;
1426}
1427// Move Int Doubleword to Packed Double Int
1428//
1429def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
1430                      "vmovd\t{$src, $dst|$dst, $src}",
1431                      [(set VR128X:$dst,
1432                        (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
1433                        EVEX, VEX_LIG;
1434def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
1435                      "vmovd\t{$src, $dst|$dst, $src}",
1436                      [(set VR128X:$dst,
1437                        (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
1438                        IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1439def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
1440                      "vmovq\t{$src, $dst|$dst, $src}",
1441                        [(set VR128X:$dst,
1442                          (v2i64 (scalar_to_vector GR64:$src)))],
1443                          IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
1444let isCodeGenOnly = 1 in {
1445def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
1446                       "vmovq\t{$src, $dst|$dst, $src}",
1447                       [(set FR64:$dst, (bitconvert GR64:$src))],
1448                       IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1449def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
1450                         "vmovq\t{$src, $dst|$dst, $src}",
1451                         [(set GR64:$dst, (bitconvert FR64:$src))],
1452                         IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
1453}
1454def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
1455                         "vmovq\t{$src, $dst|$dst, $src}",
1456                         [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
1457                         IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
1458                         EVEX_CD8<64, CD8VT1>;
1459
1460// Move Int Doubleword to Single Scalar
1461//
1462let isCodeGenOnly = 1 in {
1463def VMOVDI2SSZrr  : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
1464                      "vmovd\t{$src, $dst|$dst, $src}",
1465                      [(set FR32X:$dst, (bitconvert GR32:$src))],
1466                      IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
1467
1468def VMOVDI2SSZrm  : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
1469                      "vmovd\t{$src, $dst|$dst, $src}",
1470                      [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
1471                      IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1472}
1473
1474// Move doubleword from xmm register to r/m32
1475//
1476def VMOVPDI2DIZrr  : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
1477                       "vmovd\t{$src, $dst|$dst, $src}",
1478                       [(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
1479                                        (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
1480                       EVEX, VEX_LIG;
1481def VMOVPDI2DIZmr  : AVX512BI<0x7E, MRMDestMem, (outs),
1482                       (ins i32mem:$dst, VR128X:$src),
1483                       "vmovd\t{$src, $dst|$dst, $src}",
1484                       [(store (i32 (vector_extract (v4i32 VR128X:$src),
1485                                     (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
1486                       EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1487
1488// Move quadword from xmm1 register to r/m64
1489//
1490def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
1491                      "vmovq\t{$src, $dst|$dst, $src}",
1492                      [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
1493                                                   (iPTR 0)))],
1494                      IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
1495                      Requires<[HasAVX512, In64BitMode]>;
1496
1497def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
1498                       (ins i64mem:$dst, VR128X:$src),
1499                       "vmovq\t{$src, $dst|$dst, $src}",
1500                       [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
1501                               addr:$dst)], IIC_SSE_MOVDQ>,
1502                       EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
1503                       Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
1504
1505// Move Scalar Single to Double Int
1506//
1507let isCodeGenOnly = 1 in {
1508def VMOVSS2DIZrr  : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
1509                      (ins FR32X:$src),
1510                      "vmovd\t{$src, $dst|$dst, $src}",
1511                      [(set GR32:$dst, (bitconvert FR32X:$src))],
1512                      IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
1513def VMOVSS2DIZmr  : AVX512BI<0x7E, MRMDestMem, (outs),
1514                      (ins i32mem:$dst, FR32X:$src),
1515                      "vmovd\t{$src, $dst|$dst, $src}",
1516                      [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
1517                      IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
1518}
1519
1520// Move Quadword Int to Packed Quadword Int
1521//
1522def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
1523                      (ins i64mem:$src),
1524                      "vmovq\t{$src, $dst|$dst, $src}",
1525                      [(set VR128X:$dst,
1526                        (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
1527                      EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
1528
1529//===----------------------------------------------------------------------===//
1530// AVX-512  MOVSS, MOVSD
1531//===----------------------------------------------------------------------===//
1532
1533multiclass avx512_move_scalar <string asm, RegisterClass RC, 
1534                              SDNode OpNode, ValueType vt,
1535                              X86MemOperand x86memop, PatFrag mem_pat> {
1536  let hasSideEffects = 0 in {
1537  def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2), 
1538              !strconcat(asm, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1539              [(set VR128X:$dst, (vt (OpNode VR128X:$src1,
1540                                      (scalar_to_vector RC:$src2))))],
1541              IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
1542  let Constraints = "$src1 = $dst" in
1543  def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
1544              (ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
1545              !strconcat(asm,
1546                " \t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
1547              [], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
1548  def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
1549              !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1550              [(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
1551              EVEX, VEX_LIG;
1552  def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
1553             !strconcat(asm, " \t{$src, $dst|$dst, $src}"),
1554             [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
1555             EVEX, VEX_LIG;
1556  } //hasSideEffects = 0
1557}
1558
1559let ExeDomain = SSEPackedSingle in
1560defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
1561                                 loadf32>, XS, EVEX_CD8<32, CD8VT1>;
1562
1563let ExeDomain = SSEPackedDouble in
1564defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
1565                                 loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
1566
1567def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
1568          (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
1569           VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
1570
1571def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
1572          (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
1573           VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
1574
1575// For the disassembler
1576let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1577  def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1578                        (ins VR128X:$src1, FR32X:$src2),
1579                        "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1580                        IIC_SSE_MOV_S_RR>,
1581                        XS, EVEX_4V, VEX_LIG;
1582  def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
1583                        (ins VR128X:$src1, FR64X:$src2),
1584                        "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1585                        IIC_SSE_MOV_S_RR>,
1586                        XD, EVEX_4V, VEX_LIG, VEX_W;
1587}
1588
1589let Predicates = [HasAVX512] in {
1590  let AddedComplexity = 15 in {
1591  // Move scalar to XMM zero-extended, zeroing a VR128X then do a
1592  // MOVS{S,D} to the lower bits.
1593  def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
1594            (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
1595  def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
1596            (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1597  def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
1598            (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
1599  def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
1600            (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
1601
1602  // Move low f32 and clear high bits.
1603  def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
1604            (SUBREG_TO_REG (i32 0),
1605             (VMOVSSZrr (v4f32 (V_SET0)), 
1606              (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
1607  def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
1608            (SUBREG_TO_REG (i32 0),
1609             (VMOVSSZrr (v4i32 (V_SET0)),
1610                       (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
1611  }
1612
1613  let AddedComplexity = 20 in {
1614  // MOVSSrm zeros the high parts of the register; represent this
1615  // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1616  def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
1617            (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1618  def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
1619            (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1620  def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
1621            (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
1622
1623  // MOVSDrm zeros the high parts of the register; represent this
1624  // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
1625  def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
1626            (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1627  def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
1628            (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1629  def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
1630            (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1631  def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
1632            (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1633  def : Pat<(v2f64 (X86vzload addr:$src)),
1634            (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
1635
1636  // Represent the same patterns above but in the form they appear for
1637  // 256-bit types
1638  def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1639                   (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
1640            (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
1641  def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1642                   (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
1643            (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
1644  def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1645                   (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
1646            (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
1647  }
1648  def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
1649                   (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
1650            (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
1651                                            FR32X:$src)), sub_xmm)>;
1652  def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
1653                   (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
1654            (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
1655                                     FR64X:$src)), sub_xmm)>;
1656  def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1657                   (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
1658            (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
1659
1660  // Move low f64 and clear high bits.
1661  def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
1662            (SUBREG_TO_REG (i32 0),
1663             (VMOVSDZrr (v2f64 (V_SET0)),
1664                       (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
1665
1666  def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
1667            (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
1668                       (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
1669
1670  // Extract and store.
1671  def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
1672                   addr:$dst),
1673            (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
1674  def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
1675                   addr:$dst),
1676            (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
1677
1678  // Shuffle with VMOVSS
1679  def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
1680            (VMOVSSZrr (v4i32 VR128X:$src1),
1681                      (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
1682  def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
1683            (VMOVSSZrr (v4f32 VR128X:$src1),
1684                      (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
1685
1686  // 256-bit variants
1687  def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
1688            (SUBREG_TO_REG (i32 0),
1689              (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
1690                        (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
1691              sub_xmm)>;
1692  def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
1693            (SUBREG_TO_REG (i32 0),
1694              (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
1695                        (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
1696              sub_xmm)>;
1697
1698  // Shuffle with VMOVSD
1699  def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1700            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1701  def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
1702            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1703  def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1704            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1705  def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
1706            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1707
1708  // 256-bit variants
1709  def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1710            (SUBREG_TO_REG (i32 0),
1711              (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
1712                        (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
1713              sub_xmm)>;
1714  def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
1715            (SUBREG_TO_REG (i32 0),
1716              (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
1717                        (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
1718              sub_xmm)>;
1719
1720  def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1721            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1722  def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
1723            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1724  def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1725            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1726  def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
1727            (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
1728}
1729
1730let AddedComplexity = 15 in
1731def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
1732                                (ins VR128X:$src),
1733                                "vmovq\t{$src, $dst|$dst, $src}",
1734                                [(set VR128X:$dst, (v2i64 (X86vzmovl 
1735                                                   (v2i64 VR128X:$src))))],
1736                                IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
1737
1738let AddedComplexity = 20 in
1739def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
1740                                 (ins i128mem:$src),
1741                                 "vmovq\t{$src, $dst|$dst, $src}",
1742                                 [(set VR128X:$dst, (v2i64 (X86vzmovl
1743                                                     (loadv2i64 addr:$src))))],
1744                                 IIC_SSE_MOVDQ>, EVEX, VEX_W,
1745                                 EVEX_CD8<8, CD8VT8>;
1746
1747let Predicates = [HasAVX512] in {
1748  // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
1749  let AddedComplexity = 20 in {
1750    def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
1751              (VMOVDI2PDIZrm addr:$src)>;
1752    def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
1753              (VMOV64toPQIZrr GR64:$src)>;
1754    def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
1755              (VMOVDI2PDIZrr GR32:$src)>;
1756              
1757    def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
1758              (VMOVDI2PDIZrm addr:$src)>;
1759    def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
1760              (VMOVDI2PDIZrm addr:$src)>;
1761    def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
1762            (VMOVZPQILo2PQIZrm addr:$src)>;
1763    def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
1764            (VMOVZPQILo2PQIZrr VR128X:$src)>;
1765    def : Pat<(v2i64 (X86vzload addr:$src)),
1766            (VMOVZPQILo2PQIZrm addr:$src)>;
1767  }
1768
1769  // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
1770  def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
1771                               (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
1772            (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
1773  def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
1774                               (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
1775            (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
1776}
1777
1778def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
1779        (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1780
1781def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
1782        (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1783
1784def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
1785        (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
1786
1787def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
1788        (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
1789
1790//===----------------------------------------------------------------------===//
1791// AVX-512 - Integer arithmetic
1792//
1793multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1794                        ValueType OpVT, RegisterClass KRC,
1795                        RegisterClass RC, PatFrag memop_frag,
1796                        X86MemOperand x86memop, PatFrag scalar_mfrag,
1797                        X86MemOperand x86scalar_mop, string BrdcstStr,
1798                        OpndItins itins, bit IsCommutable = 0> {
1799  let isCommutable = IsCommutable in
1800    def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1801              (ins RC:$src1, RC:$src2),
1802              !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1803              [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
1804              itins.rr>, EVEX_4V;
1805  let AddedComplexity = 30 in {
1806    let Constraints = "$src0 = $dst" in
1807      def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1808                 (ins RC:$src0, KRC:$mask, RC:$src1, RC:$src2),
1809                 !strconcat(OpcodeStr,
1810                    " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1811                 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1812                                  (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1813                                  RC:$src0)))],
1814                 itins.rr>, EVEX_4V, EVEX_K;
1815    def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1816                (ins KRC:$mask, RC:$src1, RC:$src2),
1817                !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1818                    "|$dst {${mask}} {z}, $src1, $src2}"),
1819                [(set RC:$dst, (OpVT (vselect KRC:$mask,
1820                                  (OpNode (OpVT RC:$src1), (OpVT RC:$src2)),
1821                                  (OpVT immAllZerosV))))],
1822                itins.rr>, EVEX_4V, EVEX_KZ;
1823  }
1824
1825  let mayLoad = 1 in {
1826    def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1827              (ins RC:$src1, x86memop:$src2),
1828              !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1829              [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (memop_frag addr:$src2))))],
1830              itins.rm>, EVEX_4V;
1831    let AddedComplexity = 30 in {
1832    let Constraints = "$src0 = $dst" in
1833      def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1834                 (ins RC:$src0, KRC:$mask, RC:$src1, x86memop:$src2),
1835                 !strconcat(OpcodeStr,
1836                     " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1837                 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1838                                    (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1839                                    RC:$src0)))],
1840                 itins.rm>, EVEX_4V, EVEX_K;
1841    def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1842                (ins KRC:$mask, RC:$src1, x86memop:$src2),
1843                !strconcat(OpcodeStr,
1844                    " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1845                [(set RC:$dst, (OpVT (vselect KRC:$mask,
1846                                    (OpNode (OpVT RC:$src1), (memop_frag addr:$src2)),
1847                                    (OpVT immAllZerosV))))],
1848                itins.rm>, EVEX_4V, EVEX_KZ;
1849    }
1850    def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1851               (ins RC:$src1, x86scalar_mop:$src2),
1852               !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1853                          ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1854               [(set RC:$dst, (OpNode RC:$src1,
1855                               (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))))],
1856               itins.rm>, EVEX_4V, EVEX_B;
1857    let AddedComplexity = 30 in {
1858    let Constraints = "$src0 = $dst" in
1859      def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1860                  (ins RC:$src0, KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1861                  !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1862                             ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
1863                             BrdcstStr, "}"),
1864                  [(set RC:$dst, (OpVT (vselect KRC:$mask,
1865                                    (OpNode (OpVT RC:$src1),
1866                                     (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1867                                    RC:$src0)))],
1868                  itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
1869    def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1870                 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1871                 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1872                            ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
1873                            BrdcstStr, "}"),
1874                 [(set RC:$dst, (OpVT (vselect KRC:$mask,
1875                                    (OpNode (OpVT RC:$src1),
1876                                     (OpVT (X86VBroadcast (scalar_mfrag addr:$src2)))),
1877                                    (OpVT immAllZerosV))))],
1878                 itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
1879    }
1880  }
1881}
1882
1883multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
1884                            ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
1885                            PatFrag memop_frag, X86MemOperand x86memop,
1886                            PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
1887                            string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
1888  let isCommutable = IsCommutable in
1889  {
1890    def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1891       (ins RC:$src1, RC:$src2),
1892       !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1893       []>, EVEX_4V;
1894    def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1895               (ins KRC:$mask, RC:$src1, RC:$src2),
1896               !strconcat(OpcodeStr,
1897                  " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1898               [], itins.rr>, EVEX_4V, EVEX_K;
1899    def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
1900                (ins KRC:$mask, RC:$src1, RC:$src2),
1901                !strconcat(OpcodeStr, " \t{$src2, $src1, $dst {${mask}} {z}" ,
1902                    "|$dst {${mask}} {z}, $src1, $src2}"),
1903                [], itins.rr>, EVEX_4V, EVEX_KZ;
1904  }
1905  let mayLoad = 1 in {
1906    def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1907              (ins RC:$src1, x86memop:$src2),
1908              !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1909              []>, EVEX_4V;
1910    def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1911               (ins KRC:$mask, RC:$src1, x86memop:$src2),
1912               !strconcat(OpcodeStr,
1913                   " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
1914               [], itins.rm>, EVEX_4V, EVEX_K;
1915    def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1916                (ins KRC:$mask, RC:$src1, x86memop:$src2),
1917                !strconcat(OpcodeStr,
1918                    " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
1919                [], itins.rm>, EVEX_4V, EVEX_KZ;
1920    def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1921               (ins RC:$src1, x86scalar_mop:$src2),
1922               !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1923                          ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
1924               [], itins.rm>, EVEX_4V, EVEX_B;
1925    def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1926                (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1927                !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1928                           ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
1929                           BrdcstStr, "}"),
1930                [], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
1931    def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
1932                 (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
1933                 !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
1934                            ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
1935                            BrdcstStr, "}"),
1936                 [], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
1937  }
1938}
1939
1940defm VPADDDZ : avx512_binop_rm<0xFE, "vpaddd", add, v16i32, VK16WM, VR512,
1941                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1942                   SSE_INTALU_ITINS_P, 1>, EVEX_V512, EVEX_CD8<32, CD8VF>;
1943
1944defm VPSUBDZ : avx512_binop_rm<0xFA, "vpsubd", sub, v16i32, VK16WM, VR512,
1945                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1946                   SSE_INTALU_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
1947
1948defm VPMULLDZ : avx512_binop_rm<0x40, "vpmulld", mul, v16i32, VK16WM, VR512,
1949                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1950                   SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1951
1952defm VPADDQZ : avx512_binop_rm<0xD4, "vpaddq", add, v8i64, VK8WM, VR512,
1953                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1954                   SSE_INTALU_ITINS_P, 1>, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_W;
1955
1956defm VPSUBQZ : avx512_binop_rm<0xFB, "vpsubq", sub, v8i64, VK8WM, VR512,
1957                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1958                   SSE_INTALU_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1959
1960defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
1961                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1962                   SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
1963                   EVEX_CD8<64, CD8VF>, VEX_W;
1964
1965defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
1966                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1967                   SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
1968
1969def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
1970          (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
1971
1972def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
1973           (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
1974          (VPMULUDQZrr VR512:$src1, VR512:$src2)>;
1975def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
1976           (v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
1977          (VPMULDQZrr VR512:$src1, VR512:$src2)>;
1978
1979defm VPMAXUDZ : avx512_binop_rm<0x3F, "vpmaxud", X86umax, v16i32, VK16WM, VR512,
1980                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1981                   SSE_INTALU_ITINS_P, 1>,
1982                   T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1983defm VPMAXUQZ : avx512_binop_rm<0x3F, "vpmaxuq", X86umax, v8i64, VK8WM, VR512,
1984                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1985                   SSE_INTALU_ITINS_P, 0>,
1986                   T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1987
1988defm VPMAXSDZ : avx512_binop_rm<0x3D, "vpmaxsd", X86smax, v16i32, VK16WM, VR512,
1989                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1990                   SSE_INTALU_ITINS_P, 1>,
1991                   T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
1992defm VPMAXSQZ : avx512_binop_rm<0x3D, "vpmaxsq", X86smax, v8i64, VK8WM, VR512,
1993                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
1994                   SSE_INTALU_ITINS_P, 0>,
1995                   T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
1996
1997defm VPMINUDZ : avx512_binop_rm<0x3B, "vpminud", X86umin, v16i32, VK16WM, VR512,
1998                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
1999                   SSE_INTALU_ITINS_P, 1>,
2000                   T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2001defm VPMINUQZ : avx512_binop_rm<0x3B, "vpminuq", X86umin, v8i64, VK8WM, VR512,
2002                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2003                   SSE_INTALU_ITINS_P, 0>,
2004                   T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2005
2006defm VPMINSDZ : avx512_binop_rm<0x39, "vpminsd", X86smin, v16i32, VK16WM, VR512,
2007                   memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2008                   SSE_INTALU_ITINS_P, 1>,
2009                   T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2010defm VPMINSQZ : avx512_binop_rm<0x39, "vpminsq", X86smin, v8i64, VK8WM, VR512,
2011                   memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2012                   SSE_INTALU_ITINS_P, 0>,
2013                   T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2014
2015def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
2016                    (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2017           (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
2018def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
2019                    (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2020           (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
2021def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
2022                (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2023           (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
2024def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
2025                (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2026           (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
2027def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
2028                    (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2029           (VPMINSDZrr VR512:$src1, VR512:$src2)>;
2030def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
2031                    (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
2032           (VPMINUDZrr VR512:$src1, VR512:$src2)>;
2033def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
2034                (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2035           (VPMINSQZrr VR512:$src1, VR512:$src2)>;
2036def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
2037                (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
2038           (VPMINUQZrr VR512:$src1, VR512:$src2)>;
2039//===----------------------------------------------------------------------===//
2040// AVX-512 - Unpack Instructions
2041//===----------------------------------------------------------------------===//
2042
2043multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
2044                                   PatFrag mem_frag, RegisterClass RC,
2045                                   X86MemOperand x86memop, string asm,
2046                                   Domain d> {
2047    def rr : AVX512PI<opc, MRMSrcReg,
2048                (outs RC:$dst), (ins RC:$src1, RC:$src2),
2049                asm, [(set RC:$dst,
2050                           (vt (OpNode RC:$src1, RC:$src2)))],
2051                           d>, EVEX_4V;
2052    def rm : AVX512PI<opc, MRMSrcMem,
2053                (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2054                asm, [(set RC:$dst,
2055                       (vt (OpNode RC:$src1,
2056                            (bitconvert (mem_frag addr:$src2)))))],
2057                        d>, EVEX_4V;
2058}
2059
2060defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
2061      VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2062      SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2063defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
2064      VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2065      SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2066defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
2067      VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2068      SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
2069defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
2070      VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2071      SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2072
2073multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
2074                        ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2075                        X86MemOperand x86memop> {
2076  def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2077       (ins RC:$src1, RC:$src2),
2078       !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2079       [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))], 
2080       IIC_SSE_UNPCK>, EVEX_4V;
2081  def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2082       (ins RC:$src1, x86memop:$src2),
2083       !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2084       [(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
2085                                     (bitconvert (memop_frag addr:$src2)))))],
2086                                     IIC_SSE_UNPCK>, EVEX_4V;
2087}
2088defm VPUNPCKLDQZ  : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
2089                                VR512, memopv16i32, i512mem>, EVEX_V512,
2090                                EVEX_CD8<32, CD8VF>;
2091defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
2092                                VR512, memopv8i64, i512mem>, EVEX_V512,
2093                                VEX_W, EVEX_CD8<64, CD8VF>;
2094defm VPUNPCKHDQZ  : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
2095                                VR512, memopv16i32, i512mem>, EVEX_V512,
2096                                EVEX_CD8<32, CD8VF>;
2097defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
2098                                VR512, memopv8i64, i512mem>, EVEX_V512,
2099                                VEX_W, EVEX_CD8<64, CD8VF>;
2100//===----------------------------------------------------------------------===//
2101// AVX-512 - PSHUFD
2102//
2103
2104multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
2105                         SDNode OpNode, PatFrag mem_frag, 
2106                         X86MemOperand x86memop, ValueType OpVT> {
2107  def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
2108                     (ins RC:$src1, i8imm:$src2),
2109                     !strconcat(OpcodeStr,
2110                         " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2111                     [(set RC:$dst,
2112                       (OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
2113                     EVEX;
2114  def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
2115                     (ins x86memop:$src1, i8imm:$src2),
2116                     !strconcat(OpcodeStr,
2117                         " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2118                     [(set RC:$dst,
2119                       (OpVT (OpNode (mem_frag addr:$src1),
2120                              (i8 imm:$src2))))]>, EVEX;
2121}
2122
2123defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
2124                      i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
2125
2126let ExeDomain = SSEPackedSingle in
2127defm VPERMILPSZ : avx512_pshuf_imm<0x04, "vpermilps", VR512, X86VPermilp,
2128                      memopv16f32, i512mem, v16f32>, TAPD, EVEX_V512,
2129                      EVEX_CD8<32, CD8VF>;
2130let ExeDomain = SSEPackedDouble in
2131defm VPERMILPDZ : avx512_pshuf_imm<0x05, "vpermilpd", VR512, X86VPermilp,
2132                      memopv8f64, i512mem, v8f64>, TAPD, EVEX_V512,
2133                      VEX_W, EVEX_CD8<32, CD8VF>;
2134
2135def : Pat<(v16i32 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2136          (VPERMILPSZri VR512:$src1, imm:$imm)>;
2137def : Pat<(v8i64 (X86VPermilp VR512:$src1, (i8 imm:$imm))),
2138          (VPERMILPDZri VR512:$src1, imm:$imm)>;
2139
2140//===----------------------------------------------------------------------===//
2141// AVX-512  Logical Instructions
2142//===----------------------------------------------------------------------===//
2143
2144defm VPANDDZ : avx512_binop_rm<0xDB, "vpandd", and, v16i32, VK16WM, VR512, memopv16i32,
2145                      i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2146                      EVEX_V512, EVEX_CD8<32, CD8VF>;
2147defm VPANDQZ : avx512_binop_rm<0xDB, "vpandq", and, v8i64, VK8WM, VR512, memopv8i64,
2148                      i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2149                      EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2150defm VPORDZ  : avx512_binop_rm<0xEB, "vpord", or, v16i32, VK16WM, VR512, memopv16i32,
2151                      i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2152                      EVEX_V512, EVEX_CD8<32, CD8VF>;
2153defm VPORQZ  : avx512_binop_rm<0xEB, "vporq", or, v8i64, VK8WM, VR512, memopv8i64,
2154                      i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2155                      EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2156defm VPXORDZ : avx512_binop_rm<0xEF, "vpxord", xor, v16i32, VK16WM, VR512, memopv16i32,
2157                      i512mem, loadi32, i32mem, "{1to16}", SSE_BIT_ITINS_P, 1>,
2158                      EVEX_V512, EVEX_CD8<32, CD8VF>;
2159defm VPXORQZ : avx512_binop_rm<0xEF, "vpxorq", xor, v8i64, VK8WM, VR512, memopv8i64,
2160                      i512mem, loadi64, i64mem, "{1to8}", SSE_BIT_ITINS_P, 1>,
2161                      EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2162defm VPANDNDZ : avx512_binop_rm<0xDF, "vpandnd", X86andnp, v16i32, VK16WM, VR512,
2163                      memopv16i32, i512mem, loadi32, i32mem, "{1to16}",
2164                      SSE_BIT_ITINS_P, 0>, EVEX_V512, EVEX_CD8<32, CD8VF>;
2165defm VPANDNQZ : avx512_binop_rm<0xDF, "vpandnq", X86andnp, v8i64, VK8WM, VR512,
2166                      memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
2167                      SSE_BIT_ITINS_P, 0>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
2168
2169//===----------------------------------------------------------------------===//
2170// AVX-512  FP arithmetic
2171//===----------------------------------------------------------------------===//
2172
2173multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2174                                  SizeItins itins> {
2175  defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
2176                             f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
2177                             EVEX_CD8<32, CD8VT1>;
2178  defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
2179                             f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
2180                             EVEX_CD8<64, CD8VT1>;
2181}
2182
2183let isCommutable = 1 in {
2184defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
2185defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
2186defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
2187defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
2188}
2189let isCommutable = 0 in {
2190defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
2191defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
2192}
2193
2194multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
2195                           RegisterClass KRC,
2196                           RegisterClass RC, ValueType vt,
2197                           X86MemOperand x86memop, PatFrag mem_frag,
2198                           X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2199                           string BrdcstStr,
2200                           Domain d, OpndItins itins, bit commutable> {
2201  let isCommutable = commutable in {
2202    def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2203       !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2204       [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
2205       EVEX_4V;
2206
2207    def rrk: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2208       !strconcat(OpcodeStr,
2209           " \t{$src2, $src1, $dst {${mask}} |$dst {${mask}}, $src1, $src2}"),
2210       [], itins.rr, d>, EVEX_4V, EVEX_K;
2211
2212    def rrkz: PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src1, RC:$src2),
2213       !strconcat(OpcodeStr,
2214           " \t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2215       [], itins.rr, d>, EVEX_4V, EVEX_KZ;
2216  }
2217
2218  let mayLoad = 1 in {
2219    def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2220       !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2221       [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
2222          itins.rm, d>, EVEX_4V;
2223
2224    def rmb : PI<opc, MRMSrcMem, (outs RC:$dst),
2225       (ins RC:$src1, x86scalar_mop:$src2),
2226       !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2227           ", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
2228       [(set RC:$dst, (OpNode RC:$src1, 
2229                       (vt (X86VBroadcast (scalar_mfrag addr:$src2)))))],
2230       itins.rm, d>, EVEX_4V, EVEX_B;
2231
2232    def rmk : PI<opc, MRMSrcMem, (outs RC:$dst),
2233       (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2234           "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2235       [], itins.rm, d>, EVEX_4V, EVEX_K;
2236
2237    def rmkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2238       (ins KRC:$mask, RC:$src1, x86memop:$src2), !strconcat(OpcodeStr,
2239           "\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
2240       [], itins.rm, d>, EVEX_4V, EVEX_KZ;
2241
2242    def rmbk : PI<opc, MRMSrcMem, (outs RC:$dst),
2243       (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2244           " \t{${src2}", BrdcstStr,
2245           ", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}", BrdcstStr, "}"),
2246       [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_K;
2247
2248    def rmbkz : PI<opc, MRMSrcMem, (outs RC:$dst),
2249       (ins KRC:$mask, RC:$src1, x86scalar_mop:$src2), !strconcat(OpcodeStr,
2250           " \t{${src2}", BrdcstStr,
2251           ", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
2252           BrdcstStr, "}"),
2253       [], itins.rm, d>, EVEX_4V, EVEX_B, EVEX_KZ;
2254  }
2255}
2256
2257defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VK16WM, VR512, v16f32, f512mem,
2258                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle, 
2259                   SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2260                   
2261defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VK8WM, VR512, v8f64, f512mem,
2262                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2263                   SSE_ALU_ITINS_P.d, 1>,
2264                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2265
2266defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VK16WM, VR512, v16f32, f512mem,
2267                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2268                   SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2269defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VK8WM, VR512, v8f64, f512mem,
2270                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2271                   SSE_ALU_ITINS_P.d, 1>,
2272                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2273
2274defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VK16WM, VR512, v16f32, f512mem,
2275                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2276                   SSE_ALU_ITINS_P.s, 1>,
2277                   EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2278defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VK16WM, VR512, v16f32, f512mem,
2279                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2280                   SSE_ALU_ITINS_P.s, 1>,
2281                   EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2282
2283defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VK8WM, VR512, v8f64, f512mem,
2284                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2285                   SSE_ALU_ITINS_P.d, 1>,
2286                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2287defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VK8WM, VR512, v8f64, f512mem,
2288                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2289                   SSE_ALU_ITINS_P.d, 1>,
2290                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2291
2292defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VK16WM, VR512, v16f32, f512mem,
2293                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2294                   SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2295defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VK16WM, VR512, v16f32, f512mem,
2296                   memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
2297                   SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
2298
2299defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VK8WM, VR512, v8f64, f512mem,
2300                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2301                   SSE_ALU_ITINS_P.d, 0>, 
2302                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2303defm VDIVPDZ : avx512_fp_packed<0x5E, "divpd", fdiv, VK8WM, VR512, v8f64, f512mem,
2304                   memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
2305                   SSE_ALU_ITINS_P.d, 0>, 
2306                   EVEX_V512, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2307
2308def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
2309                   (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2310                   (i16 -1), FROUND_CURRENT)),
2311          (VMAXPSZrr VR512:$src1, VR512:$src2)>;
2312
2313def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
2314                   (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2315                   (i8 -1), FROUND_CURRENT)),
2316          (VMAXPDZrr VR512:$src1, VR512:$src2)>;
2317
2318def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
2319                   (v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
2320                   (i16 -1), FROUND_CURRENT)),
2321          (VMINPSZrr VR512:$src1, VR512:$src2)>;
2322
2323def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
2324                   (v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
2325                   (i8 -1), FROUND_CURRENT)),
2326          (VMINPDZrr VR512:$src1, VR512:$src2)>;
2327//===----------------------------------------------------------------------===//
2328// AVX-512  VPTESTM instructions
2329//===----------------------------------------------------------------------===//
2330
2331multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC, 
2332              RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag, 
2333              SDNode OpNode, ValueType vt> {
2334  def rr : AVX512PI<opc, MRMSrcReg,
2335             (outs KRC:$dst), (ins RC:$src1, RC:$src2), 
2336             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2337             [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
2338             SSEPackedInt>, EVEX_4V;
2339  def rm : AVX512PI<opc, MRMSrcMem,
2340             (outs KRC:$dst), (ins RC:$src1, x86memop:$src2), 
2341             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2342             [(set KRC:$dst, (OpNode (vt RC:$src1), 
2343              (bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
2344}
2345
2346defm VPTESTMDZ  : avx512_vptest<0x27, "vptestmd", VK16, VR512,  f512mem,
2347                              memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
2348                              EVEX_CD8<32, CD8VF>;
2349defm VPTESTMQZ  : avx512_vptest<0x27, "vptestmq", VK8, VR512,  f512mem,
2350                              memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
2351                              EVEX_CD8<64, CD8VF>;
2352
2353let Predicates = [HasCDI] in {
2354defm VPTESTNMDZ  : avx512_vptest<0x27, "vptestnmd", VK16, VR512,  f512mem,
2355                              memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
2356                              EVEX_CD8<32, CD8VF>;
2357defm VPTESTNMQZ  : avx512_vptest<0x27, "vptestnmq", VK8, VR512,  f512mem,
2358                              memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
2359                              EVEX_CD8<64, CD8VF>;
2360}
2361
2362def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
2363                 (v16i32 VR512:$src2), (i16 -1))),
2364                 (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
2365
2366def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
2367                 (v8i64 VR512:$src2), (i8 -1))),
2368                 (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
2369//===----------------------------------------------------------------------===//
2370// AVX-512  Shift instructions
2371//===----------------------------------------------------------------------===//
2372multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
2373                         string OpcodeStr, SDNode OpNode, RegisterClass RC,
2374                         ValueType vt, X86MemOperand x86memop, PatFrag mem_frag,
2375                         RegisterClass KRC> {
2376  def ri : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2377       (ins RC:$src1, i8imm:$src2),
2378           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2379       [(set RC:$dst, (vt (OpNode RC:$src1, (i8 imm:$src2))))],
2380        SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2381  def rik : AVX512BIi8<opc, ImmFormR, (outs RC:$dst),
2382       (ins KRC:$mask, RC:$src1, i8imm:$src2),
2383           !strconcat(OpcodeStr,
2384                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2385       [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2386  def mi: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2387       (ins x86memop:$src1, i8imm:$src2),
2388           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2389       [(set RC:$dst, (OpNode (mem_frag addr:$src1),
2390                     (i8 imm:$src2)))], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2391  def mik: AVX512BIi8<opc, ImmFormM, (outs RC:$dst),
2392       (ins KRC:$mask, x86memop:$src1, i8imm:$src2),
2393           !strconcat(OpcodeStr,
2394                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2395       [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2396}
2397
2398multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2399                          RegisterClass RC, ValueType vt, ValueType SrcVT,
2400                          PatFrag bc_frag, RegisterClass KRC> {
2401  // src2 is always 128-bit
2402  def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2403       (ins RC:$src1, VR128X:$src2),
2404           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2405       [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
2406        SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
2407  def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
2408       (ins KRC:$mask, RC:$src1, VR128X:$src2),
2409           !strconcat(OpcodeStr,
2410                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2411       [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
2412  def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2413       (ins RC:$src1, i128mem:$src2),
2414           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2415       [(set RC:$dst, (vt (OpNode RC:$src1,
2416                       (bc_frag (memopv2i64 addr:$src2)))))],
2417                        SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
2418  def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
2419       (ins KRC:$mask, RC:$src1, i128mem:$src2),
2420           !strconcat(OpcodeStr,
2421                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
2422       [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
2423}
2424
2425defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
2426                           VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2427                           EVEX_V512, EVEX_CD8<32, CD8VF>;
2428defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
2429                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2430                           EVEX_CD8<32, CD8VQ>;
2431                           
2432defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
2433                           VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2434                           EVEX_CD8<64, CD8VF>, VEX_W;
2435defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
2436                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2437                           EVEX_CD8<64, CD8VQ>, VEX_W;
2438
2439defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
2440                           VR512, v16i32, i512mem, memopv16i32, VK16WM>, EVEX_V512,
2441                           EVEX_CD8<32, CD8VF>;
2442defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
2443                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2444                           EVEX_CD8<32, CD8VQ>;
2445                           
2446defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
2447                           VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2448                           EVEX_CD8<64, CD8VF>, VEX_W;
2449defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
2450                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2451                           EVEX_CD8<64, CD8VQ>, VEX_W;
2452
2453defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
2454                           VR512, v16i32, i512mem, memopv16i32, VK16WM>,
2455                           EVEX_V512, EVEX_CD8<32, CD8VF>;
2456defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
2457                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
2458                           EVEX_CD8<32, CD8VQ>;
2459                           
2460defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
2461                           VR512, v8i64, i512mem, memopv8i64, VK8WM>, EVEX_V512,
2462                           EVEX_CD8<64, CD8VF>, VEX_W;
2463defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
2464                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
2465                           EVEX_CD8<64, CD8VQ>, VEX_W;
2466
2467//===-------------------------------------------------------------------===//
2468// Variable Bit Shifts
2469//===-------------------------------------------------------------------===//
2470multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
2471                           RegisterClass RC, ValueType vt,
2472                           X86MemOperand x86memop, PatFrag mem_frag> {
2473  def rr  : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
2474             (ins RC:$src1, RC:$src2),
2475             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2476             [(set RC:$dst,
2477               (vt (OpNode RC:$src1, (vt RC:$src2))))]>,
2478             EVEX_4V;
2479  def rm  : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
2480             (ins RC:$src1, x86memop:$src2),
2481             !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2482             [(set RC:$dst,
2483               (vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
2484             EVEX_4V;
2485}
2486
2487defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32, 
2488                               i512mem, memopv16i32>, EVEX_V512,
2489                               EVEX_CD8<32, CD8VF>;
2490defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64, 
2491                               i512mem, memopv8i64>, EVEX_V512, VEX_W,
2492                               EVEX_CD8<64, CD8VF>;
2493defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32, 
2494                               i512mem, memopv16i32>, EVEX_V512,
2495                               EVEX_CD8<32, CD8VF>;
2496defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64, 
2497                               i512mem, memopv8i64>, EVEX_V512, VEX_W,
2498                               EVEX_CD8<64, CD8VF>;
2499defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32, 
2500                               i512mem, memopv16i32>, EVEX_V512,
2501                               EVEX_CD8<32, CD8VF>;
2502defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64, 
2503                               i512mem, memopv8i64>, EVEX_V512, VEX_W,
2504                               EVEX_CD8<64, CD8VF>;
2505
2506//===----------------------------------------------------------------------===//
2507// AVX-512 - MOVDDUP
2508//===----------------------------------------------------------------------===//
2509
2510multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT, 
2511                        X86MemOperand x86memop, PatFrag memop_frag> {
2512def rr  : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2513                    !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2514                    [(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
2515def rm  : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2516                    !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2517                    [(set RC:$dst,
2518                      (VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
2519}
2520
2521defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
2522                 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
2523def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
2524          (VMOVDDUPZrm addr:$src)>;
2525
2526//===---------------------------------------------------------------------===//
2527// Replicate Single FP - MOVSHDUP and MOVSLDUP
2528//===---------------------------------------------------------------------===//
2529multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
2530                              ValueType vt, RegisterClass RC, PatFrag mem_frag,
2531                              X86MemOperand x86memop> {
2532  def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
2533                    !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2534                      [(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
2535  let mayLoad = 1 in
2536  def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
2537                    !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
2538                      [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
2539}
2540
2541defm VMOVSHDUPZ  : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
2542                       v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2543                       EVEX_CD8<32, CD8VF>;
2544defm VMOVSLDUPZ  : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
2545                       v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
2546                       EVEX_CD8<32, CD8VF>;
2547
2548def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
2549def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
2550           (VMOVSHDUPZrm addr:$src)>;
2551def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
2552def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
2553           (VMOVSLDUPZrm addr:$src)>;
2554
2555//===----------------------------------------------------------------------===//
2556// Move Low to High and High to Low packed FP Instructions
2557//===----------------------------------------------------------------------===//
2558def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
2559          (ins VR128X:$src1, VR128X:$src2),
2560          "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2561          [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
2562           IIC_SSE_MOV_LH>, EVEX_4V;
2563def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
2564          (ins VR128X:$src1, VR128X:$src2),
2565          "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2566          [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
2567          IIC_SSE_MOV_LH>, EVEX_4V;
2568
2569let Predicates = [HasAVX512] in {
2570  // MOVLHPS patterns
2571  def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2572            (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
2573  def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
2574            (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
2575
2576  // MOVHLPS patterns
2577  def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
2578            (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
2579}
2580
2581//===----------------------------------------------------------------------===//
2582// FMA - Fused Multiply Operations
2583//
2584let Constraints = "$src1 = $dst" in {
2585multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr,
2586            RegisterClass RC, X86MemOperand x86memop,
2587            PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2588            string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2589  def r: AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2590          (ins RC:$src1, RC:$src2, RC:$src3),
2591          !strconcat(OpcodeStr," \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2592          [(set RC:$dst, (OpVT(OpNode RC:$src1, RC:$src2, RC:$src3)))]>;
2593
2594  let mayLoad = 1 in
2595  def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2596          (ins RC:$src1, RC:$src2, x86memop:$src3),
2597          !strconcat(OpcodeStr, " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2598          [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2,
2599                                               (mem_frag addr:$src3))))]>;
2600   def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2601           (ins RC:$src1, RC:$src2, x86scalar_mop:$src3),
2602           !strconcat(OpcodeStr, " \t{${src3}", BrdcstStr,
2603            ", $src2, $dst|$dst, $src2, ${src3}", BrdcstStr, "}"),
2604           [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
2605           (OpVT (X86VBroadcast (scalar_mfrag addr:$src3)))))]>, EVEX_B;
2606}
2607} // Constraints = "$src1 = $dst"
2608
2609let ExeDomain = SSEPackedSingle in {
2610  defm VFMADD213PSZ    : avx512_fma3p_rm<0xA8, "vfmadd213ps", VR512, f512mem,
2611                                    memopv16f32, f32mem, loadf32, "{1to16}",
2612                                    X86Fmadd, v16f32>, EVEX_V512,
2613                                    EVEX_CD8<32, CD8VF>;
2614  defm VFMSUB213PSZ    : avx512_fma3p_rm<0xAA, "vfmsub213ps", VR512, f512mem,
2615                                    memopv16f32, f32mem, loadf32, "{1to16}",
2616                                    X86Fmsub, v16f32>, EVEX_V512,
2617                                    EVEX_CD8<32, CD8VF>;
2618  defm VFMADDSUB213PSZ : avx512_fma3p_rm<0xA6, "vfmaddsub213ps", VR512, f512mem,
2619                                    memopv16f32, f32mem, loadf32, "{1to16}",
2620                                    X86Fmaddsub, v16f32>,
2621                                    EVEX_V512, EVEX_CD8<32, CD8VF>;
2622  defm VFMSUBADD213PSZ : avx512_fma3p_rm<0xA7, "vfmsubadd213ps", VR512, f512mem,
2623                                    memopv16f32, f32mem, loadf32, "{1to16}",
2624                                    X86Fmsubadd, v16f32>,
2625                                    EVEX_V512, EVEX_CD8<32, CD8VF>;
2626  defm VFNMADD213PSZ   : avx512_fma3p_rm<0xAC, "vfnmadd213ps", VR512, f512mem,
2627                                    memopv16f32, f32mem, loadf32, "{1to16}",
2628                                    X86Fnmadd, v16f32>, EVEX_V512,
2629                                    EVEX_CD8<32, CD8VF>;
2630  defm VFNMSUB213PSZ   : avx512_fma3p_rm<0xAE, "vfnmsub213ps", VR512, f512mem,
2631                                    memopv16f32, f32mem, loadf32, "{1to16}",
2632                                    X86Fnmsub, v16f32>, EVEX_V512,
2633                                    EVEX_CD8<32, CD8VF>;
2634}
2635let ExeDomain = SSEPackedDouble in {
2636  defm VFMADD213PDZ    : avx512_fma3p_rm<0xA8, "vfmadd213pd", VR512, f512mem,
2637                                    memopv8f64, f64mem, loadf64, "{1to8}",
2638                                    X86Fmadd, v8f64>, EVEX_V512,
2639                                    VEX_W, EVEX_CD8<64, CD8VF>;
2640  defm VFMSUB213PDZ    : avx512_fma3p_rm<0xAA, "vfmsub213pd", VR512, f512mem,
2641                                    memopv8f64, f64mem, loadf64, "{1to8}",
2642                                    X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2643                                    EVEX_CD8<64, CD8VF>;
2644  defm VFMADDSUB213PDZ : avx512_fma3p_rm<0xA6, "vfmaddsub213pd", VR512, f512mem,
2645                                    memopv8f64, f64mem, loadf64, "{1to8}",
2646                                    X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2647                                    EVEX_CD8<64, CD8VF>;
2648  defm VFMSUBADD213PDZ : avx512_fma3p_rm<0xA7, "vfmsubadd213pd", VR512, f512mem,
2649                                    memopv8f64, f64mem, loadf64, "{1to8}",
2650                                    X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2651                                    EVEX_CD8<64, CD8VF>;
2652  defm VFNMADD213PDZ : avx512_fma3p_rm<0xAC, "vfnmadd213pd", VR512, f512mem,
2653                                  memopv8f64, f64mem, loadf64, "{1to8}",
2654                                  X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2655                                  EVEX_CD8<64, CD8VF>;
2656  defm VFNMSUB213PDZ : avx512_fma3p_rm<0xAE, "vfnmsub213pd", VR512, f512mem,
2657                                  memopv8f64, f64mem, loadf64, "{1to8}",
2658                                  X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2659                                  EVEX_CD8<64, CD8VF>;
2660}
2661
2662let Constraints = "$src1 = $dst" in {
2663multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr,
2664            RegisterClass RC, X86MemOperand x86memop,
2665            PatFrag mem_frag, X86MemOperand x86scalar_mop, PatFrag scalar_mfrag,
2666            string BrdcstStr, SDNode OpNode, ValueType OpVT> {
2667  let mayLoad = 1 in
2668  def m: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2669          (ins RC:$src1, RC:$src3, x86memop:$src2),
2670          !strconcat(OpcodeStr, " \t{$src2, $src3, $dst|$dst, $src3, $src2}"),
2671          [(set RC:$dst, (OpVT (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3)))]>;
2672   def mb: AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2673           (ins RC:$src1, RC:$src3, x86scalar_mop:$src2),
2674           !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
2675            ", $src3, $dst|$dst, $src3, ${src2}", BrdcstStr, "}"),
2676           [(set RC:$dst, (OpNode RC:$src1, 
2677           (OpVT (X86VBroadcast (scalar_mfrag addr:$src2))), RC:$src3))]>, EVEX_B;
2678}
2679} // Constraints = "$src1 = $dst"
2680
2681
2682let ExeDomain = SSEPackedSingle in {
2683  defm VFMADD132PSZ    : avx512_fma3p_m132<0x98, "vfmadd132ps", VR512, f512mem,
2684                                    memopv16f32, f32mem, loadf32, "{1to16}",
2685                                    X86Fmadd, v16f32>, EVEX_V512,
2686                                    EVEX_CD8<32, CD8VF>;
2687  defm VFMSUB132PSZ    : avx512_fma3p_m132<0x9A, "vfmsub132ps", VR512, f512mem,
2688                                    memopv16f32, f32mem, loadf32, "{1to16}",
2689                                    X86Fmsub, v16f32>, EVEX_V512,
2690                                    EVEX_CD8<32, CD8VF>;
2691  defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", VR512, f512mem,
2692                                    memopv16f32, f32mem, loadf32, "{1to16}",
2693                                    X86Fmaddsub, v16f32>,
2694                                    EVEX_V512, EVEX_CD8<32, CD8VF>;
2695  defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", VR512, f512mem,
2696                                    memopv16f32, f32mem, loadf32, "{1to16}",
2697                                    X86Fmsubadd, v16f32>,
2698                                    EVEX_V512, EVEX_CD8<32, CD8VF>;
2699  defm VFNMADD132PSZ   : avx512_fma3p_m132<0x9C, "vfnmadd132ps", VR512, f512mem,
2700                                    memopv16f32, f32mem, loadf32, "{1to16}",
2701                                    X86Fnmadd, v16f32>, EVEX_V512,
2702                                    EVEX_CD8<32, CD8VF>;
2703  defm VFNMSUB132PSZ   : avx512_fma3p_m132<0x9E, "vfnmsub132ps", VR512, f512mem,
2704                                    memopv16f32, f32mem, loadf32, "{1to16}",
2705                                    X86Fnmsub, v16f32>, EVEX_V512,
2706                                    EVEX_CD8<32, CD8VF>;
2707}
2708let ExeDomain = SSEPackedDouble in {
2709  defm VFMADD132PDZ    : avx512_fma3p_m132<0x98, "vfmadd132pd", VR512, f512mem,
2710                                    memopv8f64, f64mem, loadf64, "{1to8}",
2711                                    X86Fmadd, v8f64>, EVEX_V512,
2712                                    VEX_W, EVEX_CD8<64, CD8VF>;
2713  defm VFMSUB132PDZ    : avx512_fma3p_m132<0x9A, "vfmsub132pd", VR512, f512mem,
2714                                    memopv8f64, f64mem, loadf64, "{1to8}",
2715                                    X86Fmsub, v8f64>, EVEX_V512, VEX_W,
2716                                    EVEX_CD8<64, CD8VF>;
2717  defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", VR512, f512mem,
2718                                    memopv8f64, f64mem, loadf64, "{1to8}",
2719                                    X86Fmaddsub, v8f64>, EVEX_V512, VEX_W,
2720                                    EVEX_CD8<64, CD8VF>;
2721  defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", VR512, f512mem,
2722                                    memopv8f64, f64mem, loadf64, "{1to8}",
2723                                    X86Fmsubadd, v8f64>, EVEX_V512, VEX_W,
2724                                    EVEX_CD8<64, CD8VF>;
2725  defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", VR512, f512mem,
2726                                  memopv8f64, f64mem, loadf64, "{1to8}",
2727                                  X86Fnmadd, v8f64>, EVEX_V512, VEX_W,
2728                                  EVEX_CD8<64, CD8VF>;
2729  defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", VR512, f512mem,
2730                                  memopv8f64, f64mem, loadf64, "{1to8}",
2731                                  X86Fnmsub, v8f64>, EVEX_V512, VEX_W,
2732                                  EVEX_CD8<64, CD8VF>;
2733}
2734
2735// Scalar FMA
2736let Constraints = "$src1 = $dst" in {
2737multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, 
2738                 RegisterClass RC, ValueType OpVT, 
2739                 X86MemOperand x86memop, Operand memop, 
2740                 PatFrag mem_frag> {
2741  let isCommutable = 1 in
2742  def r     : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
2743                   (ins RC:$src1, RC:$src2, RC:$src3),
2744                   !strconcat(OpcodeStr,
2745                              " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2746                   [(set RC:$dst,
2747                     (OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
2748  let mayLoad = 1 in
2749  def m     : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
2750                   (ins RC:$src1, RC:$src2, f128mem:$src3),
2751                   !strconcat(OpcodeStr,
2752                              " \t{$src3, $src2, $dst|$dst, $src2, $src3}"),
2753                   [(set RC:$dst,
2754                     (OpVT (OpNode RC:$src2, RC:$src1,
2755                            (mem_frag addr:$src3))))]>;
2756}
2757
2758} // Constraints = "$src1 = $dst"
2759
2760defm VFMADDSSZ  : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
2761                      f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2762defm VFMADDSDZ  : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
2763                      f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2764defm VFMSUBSSZ  : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
2765                      f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2766defm VFMSUBSDZ  : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
2767                      f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2768defm VFNMADDSSZ  : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
2769                      f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2770defm VFNMADDSDZ  : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
2771                      f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2772defm VFNMSUBSSZ  : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
2773                      f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
2774defm VFNMSUBSDZ  : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
2775                      f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
2776
2777//===----------------------------------------------------------------------===//
2778// AVX-512  Scalar convert from sign integer to float/double
2779//===----------------------------------------------------------------------===//
2780
2781multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2782                          X86MemOperand x86memop, string asm> {
2783let hasSideEffects = 0 in {
2784  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
2785              !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2786              EVEX_4V;
2787  let mayLoad = 1 in
2788  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
2789              (ins DstRC:$src1, x86memop:$src),
2790              !strconcat(asm," \t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
2791              EVEX_4V;
2792} // hasSideEffects = 0
2793}
2794let Predicates = [HasAVX512] in {
2795defm VCVTSI2SSZ   : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
2796                                  XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2797defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
2798                                  XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2799defm VCVTSI2SDZ   : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
2800                                  XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2801defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
2802                                  XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2803
2804def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
2805          (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2806def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
2807          (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2808def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
2809          (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2810def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
2811          (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2812
2813def : Pat<(f32 (sint_to_fp GR32:$src)),
2814          (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2815def : Pat<(f32 (sint_to_fp GR64:$src)),
2816          (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2817def : Pat<(f64 (sint_to_fp GR32:$src)),
2818          (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2819def : Pat<(f64 (sint_to_fp GR64:$src)),
2820          (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2821
2822defm VCVTUSI2SSZ   : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
2823                                  XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2824defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
2825                                  XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2826defm VCVTUSI2SDZ   : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
2827                                  XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
2828defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
2829                                  XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
2830
2831def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
2832          (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2833def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
2834          (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
2835def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
2836          (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2837def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
2838          (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
2839
2840def : Pat<(f32 (uint_to_fp GR32:$src)),
2841          (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
2842def : Pat<(f32 (uint_to_fp GR64:$src)),
2843          (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
2844def : Pat<(f64 (uint_to_fp GR32:$src)),
2845          (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
2846def : Pat<(f64 (uint_to_fp GR64:$src)),
2847          (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
2848}
2849
2850//===----------------------------------------------------------------------===//
2851// AVX-512  Scalar convert from float/double to integer
2852//===----------------------------------------------------------------------===//
2853multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2854                          Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
2855                          string asm> {
2856let hasSideEffects = 0 in {
2857  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
2858              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
2859              [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
2860              Requires<[HasAVX512]>;
2861  let mayLoad = 1 in
2862  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
2863              !strconcat(asm," \t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
2864              Requires<[HasAVX512]>;
2865} // hasSideEffects = 0
2866}
2867let Predicates = [HasAVX512] in {
2868// Convert float/double to signed/unsigned int 32/64
2869defm VCVTSS2SIZ:    avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
2870                                   ssmem, sse_load_f32, "cvtss2si">,
2871                                   XS, EVEX_CD8<32, CD8VT1>;
2872defm VCVTSS2SI64Z:  avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
2873                                   ssmem, sse_load_f32, "cvtss2si">,
2874                                   XS, VEX_W, EVEX_CD8<32, CD8VT1>;
2875defm VCVTSS2USIZ:   avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
2876                                   ssmem, sse_load_f32, "cvtss2usi">,
2877                                   XS, EVEX_CD8<32, CD8VT1>;
2878defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2879                                   int_x86_avx512_cvtss2usi64, ssmem,
2880                                   sse_load_f32, "cvtss2usi">, XS, VEX_W,
2881                                   EVEX_CD8<32, CD8VT1>;
2882defm VCVTSD2SIZ:    avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
2883                                   sdmem, sse_load_f64, "cvtsd2si">,
2884                                   XD, EVEX_CD8<64, CD8VT1>;
2885defm VCVTSD2SI64Z:  avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
2886                                   sdmem, sse_load_f64, "cvtsd2si">,
2887                                   XD, VEX_W, EVEX_CD8<64, CD8VT1>;
2888defm VCVTSD2USIZ:   avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
2889                                   sdmem, sse_load_f64, "cvtsd2usi">,
2890                                   XD, EVEX_CD8<64, CD8VT1>;
2891defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
2892                                   int_x86_avx512_cvtsd2usi64, sdmem,
2893                                   sse_load_f64, "cvtsd2usi">, XD, VEX_W,
2894                                   EVEX_CD8<64, CD8VT1>;
2895
2896let isCodeGenOnly = 1 in {
2897  defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2898            int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
2899            SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2900  defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2901            int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
2902            SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2903  defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2904            int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
2905            SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2906  defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2907            int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
2908            SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2909
2910  defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2911            int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
2912            SSE_CVT_Scalar, 0>, XS, EVEX_4V;
2913  defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2914            int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
2915            SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
2916  defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
2917            int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
2918            SSE_CVT_Scalar, 0>, XD, EVEX_4V;
2919  defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
2920            int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
2921            SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
2922} // isCodeGenOnly = 1
2923
2924// Convert float/double to signed/unsigned int 32/64 with truncation
2925let isCodeGenOnly = 1 in {
2926  defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
2927                                     ssmem, sse_load_f32, "cvttss2si">,
2928                                     XS, EVEX_CD8<32, CD8VT1>;
2929  defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
2930                                     int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
2931                                     "cvttss2si">, XS, VEX_W,
2932                                     EVEX_CD8<32, CD8VT1>;
2933  defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
2934                                     sdmem, sse_load_f64, "cvttsd2si">, XD,
2935                                     EVEX_CD8<64, CD8VT1>;
2936  defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
2937                                     int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
2938                                     "cvttsd2si">, XD, VEX_W,
2939                                     EVEX_CD8<64, CD8VT1>;
2940  defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
2941                                     int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
2942                                     "cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
2943  defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
2944                                     int_x86_avx512_cvttss2usi64, ssmem,
2945                                     sse_load_f32, "cvttss2usi">, XS, VEX_W,
2946                                     EVEX_CD8<32, CD8VT1>;
2947  defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
2948                                     int_x86_avx512_cvttsd2usi,
2949                                     sdmem, sse_load_f64, "cvttsd2usi">, XD,
2950                                     EVEX_CD8<64, CD8VT1>;
2951  defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
2952                                     int_x86_avx512_cvttsd2usi64, sdmem,
2953                                     sse_load_f64, "cvttsd2usi">, XD, VEX_W,
2954                                     EVEX_CD8<64, CD8VT1>;
2955} // isCodeGenOnly = 1
2956
2957multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
2958                         SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
2959                         string asm> {
2960  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
2961              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
2962              [(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
2963  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
2964              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
2965              [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
2966}
2967
2968defm VCVTTSS2SIZ    : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
2969                                  loadf32, "cvttss2si">, XS,
2970                                  EVEX_CD8<32, CD8VT1>;
2971defm VCVTTSS2USIZ   : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
2972                                  loadf32, "cvttss2usi">, XS,
2973                                  EVEX_CD8<32, CD8VT1>;
2974defm VCVTTSS2SI64Z  : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
2975                                  loadf32, "cvttss2si">, XS, VEX_W,
2976                                  EVEX_CD8<32, CD8VT1>;
2977defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
2978                                  loadf32, "cvttss2usi">, XS, VEX_W,
2979                                  EVEX_CD8<32, CD8VT1>;
2980defm VCVTTSD2SIZ    : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
2981                                  loadf64, "cvttsd2si">, XD,
2982                                  EVEX_CD8<64, CD8VT1>;
2983defm VCVTTSD2USIZ   : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
2984                                  loadf64, "cvttsd2usi">, XD,
2985                                  EVEX_CD8<64, CD8VT1>;
2986defm VCVTTSD2SI64Z  : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
2987                                  loadf64, "cvttsd2si">, XD, VEX_W,
2988                                  EVEX_CD8<64, CD8VT1>;
2989defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
2990                                  loadf64, "cvttsd2usi">, XD, VEX_W,
2991                                  EVEX_CD8<64, CD8VT1>;
2992} // HasAVX512
2993//===----------------------------------------------------------------------===//
2994// AVX-512  Convert form float to double and back
2995//===----------------------------------------------------------------------===//
2996let hasSideEffects = 0 in {
2997def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
2998                    (ins FR32X:$src1, FR32X:$src2),
2999                    "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3000                    []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
3001let mayLoad = 1 in
3002def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
3003                    (ins FR32X:$src1, f32mem:$src2),
3004                    "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3005                    []>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
3006                    EVEX_CD8<32, CD8VT1>;
3007
3008// Convert scalar double to scalar single
3009def VCVTSD2SSZrr  : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
3010                      (ins FR64X:$src1, FR64X:$src2),
3011                      "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3012                      []>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
3013let mayLoad = 1 in
3014def VCVTSD2SSZrm  : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
3015                      (ins FR64X:$src1, f64mem:$src2),
3016                      "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3017                      []>, EVEX_4V, VEX_LIG, VEX_W,
3018                      Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
3019}
3020
3021def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
3022      Requires<[HasAVX512]>;
3023def : Pat<(fextend (loadf32 addr:$src)),
3024    (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
3025
3026def : Pat<(extloadf32 addr:$src),
3027    (VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3028      Requires<[HasAVX512, OptForSize]>;
3029
3030def : Pat<(extloadf32 addr:$src),
3031    (VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
3032    Requires<[HasAVX512, OptForSpeed]>;
3033
3034def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
3035           Requires<[HasAVX512]>;
3036
3037multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC, 
3038               RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag, 
3039               X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3040               Domain d> {
3041let hasSideEffects = 0 in {
3042  def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3043              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3044              [(set DstRC:$dst,
3045                (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3046  def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3047              !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3048              [], d>, EVEX, EVEX_B, EVEX_RC;
3049  let mayLoad = 1 in
3050  def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3051              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3052              [(set DstRC:$dst,
3053                (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3054} // hasSideEffects = 0
3055}
3056
3057multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
3058               RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
3059               X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
3060               Domain d> {
3061let hasSideEffects = 0 in {
3062  def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3063              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3064              [(set DstRC:$dst,
3065                (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
3066  let mayLoad = 1 in
3067  def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3068              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3069              [(set DstRC:$dst,
3070                (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
3071} // hasSideEffects = 0
3072}
3073
3074defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
3075                                memopv8f64, f512mem, v8f32, v8f64,
3076                                SSEPackedSingle>, EVEX_V512, VEX_W, PD,
3077                                EVEX_CD8<64, CD8VF>;
3078
3079defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
3080                                memopv4f64, f256mem, v8f64, v8f32,
3081                                SSEPackedDouble>, EVEX_V512, PS,
3082                                EVEX_CD8<32, CD8VH>;
3083def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3084            (VCVTPS2PDZrm addr:$src)>;
3085            
3086def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3087                   (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
3088          (VCVTPD2PSZrr VR512:$src)>;
3089
3090def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
3091                   (bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
3092          (VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
3093
3094//===----------------------------------------------------------------------===//
3095// AVX-512  Vector convert from sign integer to float/double
3096//===----------------------------------------------------------------------===//
3097
3098defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
3099                                memopv8i64, i512mem, v16f32, v16i32,
3100                                SSEPackedSingle>, EVEX_V512, PS,
3101                                EVEX_CD8<32, CD8VF>;
3102
3103defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
3104                                memopv4i64, i256mem, v8f64, v8i32,
3105                                SSEPackedDouble>, EVEX_V512, XS,
3106                                EVEX_CD8<32, CD8VH>;
3107
3108defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
3109                                 memopv16f32, f512mem, v16i32, v16f32,
3110                                 SSEPackedSingle>, EVEX_V512, XS,
3111                                 EVEX_CD8<32, CD8VF>;
3112
3113defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
3114                                 memopv8f64, f512mem, v8i32, v8f64, 
3115                                 SSEPackedDouble>, EVEX_V512, PD, VEX_W,
3116                                 EVEX_CD8<64, CD8VF>;
3117
3118defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
3119                                 memopv16f32, f512mem, v16i32, v16f32,
3120                                 SSEPackedSingle>, EVEX_V512, PS,
3121                                 EVEX_CD8<32, CD8VF>;
3122
3123// cvttps2udq (src, 0, mask-all-ones, sae-current)
3124def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
3125                   (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
3126          (VCVTTPS2UDQZrr VR512:$src)>;
3127
3128defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
3129                                 memopv8f64, f512mem, v8i32, v8f64,
3130                                 SSEPackedDouble>, EVEX_V512, PS, VEX_W,
3131                                 EVEX_CD8<64, CD8VF>;
3132                                 
3133// cvttpd2udq (src, 0, mask-all-ones, sae-current)
3134def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
3135                   (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
3136          (VCVTTPD2UDQZrr VR512:$src)>;
3137
3138defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
3139                                 memopv4i64, f256mem, v8f64, v8i32,
3140                                 SSEPackedDouble>, EVEX_V512, XS,
3141                                 EVEX_CD8<32, CD8VH>;
3142                                 
3143defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
3144                                 memopv16i32, f512mem, v16f32, v16i32,
3145                                 SSEPackedSingle>, EVEX_V512, XD,
3146                                 EVEX_CD8<32, CD8VF>;
3147
3148def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
3149          (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr 
3150           (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3151                                 
3152def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
3153          (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
3154           (v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3155
3156def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
3157          (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3158           (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
3159           
3160def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
3161          (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
3162           (v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
3163
3164def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
3165                   (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3166          (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
3167def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
3168                   (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3169          (VCVTDQ2PDZrr VR256X:$src)>;
3170def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
3171                   (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
3172          (VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
3173def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
3174                   (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3175          (VCVTUDQ2PDZrr VR256X:$src)>;
3176
3177multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
3178               RegisterClass DstRC, PatFrag mem_frag,
3179               X86MemOperand x86memop, Domain d> {
3180let hasSideEffects = 0 in {
3181  def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
3182              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3183              [], d>, EVEX;
3184  def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
3185              !strconcat(asm," \t{$rc, $src, $dst|$dst, $src, $rc}"),
3186              [], d>, EVEX, EVEX_B, EVEX_RC;
3187  let mayLoad = 1 in
3188  def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
3189              !strconcat(asm," \t{$src, $dst|$dst, $src}"),
3190              [], d>, EVEX;
3191} // hasSideEffects = 0
3192}
3193
3194defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
3195                                 memopv16f32, f512mem, SSEPackedSingle>, PD,
3196                                 EVEX_V512, EVEX_CD8<32, CD8VF>;
3197defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
3198                                 memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
3199                                 EVEX_V512, EVEX_CD8<64, CD8VF>;
3200
3201def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
3202                    (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3203           (VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
3204
3205def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
3206                    (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3207           (VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
3208
3209defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
3210                                 memopv16f32, f512mem, SSEPackedSingle>,
3211                                 PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
3212defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
3213                                 memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
3214                                 PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
3215
3216def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
3217                    (v16i32 immAllZerosV), (i16 -1), imm:$rc)),
3218           (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
3219
3220def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
3221                    (v8i32 immAllZerosV), (i8 -1), imm:$rc)),
3222           (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
3223
3224let Predicates = [HasAVX512] in {
3225  def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
3226            (VCVTPD2PSZrm addr:$src)>;
3227  def : Pat<(v8f64 (extloadv8f32 addr:$src)),
3228            (VCVTPS2PDZrm addr:$src)>;
3229}
3230
3231//===----------------------------------------------------------------------===//
3232// Half precision conversion instructions
3233//===----------------------------------------------------------------------===//
3234multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
3235                             X86MemOperand x86memop> {
3236  def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
3237             "vcvtph2ps\t{$src, $dst|$dst, $src}",
3238             []>, EVEX;
3239  let hasSideEffects = 0, mayLoad = 1 in
3240  def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
3241             "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
3242}
3243
3244multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
3245                             X86MemOperand x86memop> {
3246  def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
3247               (ins srcRC:$src1, i32i8imm:$src2),
3248               "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}",
3249               []>, EVEX;
3250  let hasSideEffects = 0, mayStore = 1 in
3251  def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
3252               (ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
3253               "vcvtps2ph \t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
3254}
3255
3256defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
3257                                    EVEX_CD8<32, CD8VH>;
3258defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
3259                                    EVEX_CD8<32, CD8VH>;
3260
3261def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
3262           imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
3263           (VCVTPS2PHZrr VR512:$src, imm:$rc)>;
3264
3265def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
3266           (bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
3267           (VCVTPH2PSZrr VR256X:$src)>;
3268
3269let Defs = [EFLAGS], Predicates = [HasAVX512] in {
3270  defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
3271                                 "ucomiss">, PS, EVEX, VEX_LIG,
3272                                 EVEX_CD8<32, CD8VT1>;
3273  defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
3274                                  "ucomisd">, PD, EVEX,
3275                                  VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3276  let Pattern = []<dag> in {
3277    defm VCOMISSZ  : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
3278                                   "comiss">, PS, EVEX, VEX_LIG,
3279                                   EVEX_CD8<32, CD8VT1>;
3280    defm VCOMISDZ  : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
3281                                   "comisd">, PD, EVEX,
3282                                    VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3283  }
3284  let isCodeGenOnly = 1 in {
3285    defm Int_VUCOMISSZ  : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
3286                              load, "ucomiss">, PS, EVEX, VEX_LIG,
3287                              EVEX_CD8<32, CD8VT1>;
3288    defm Int_VUCOMISDZ  : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
3289                              load, "ucomisd">, PD, EVEX,
3290                              VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3291
3292    defm Int_VCOMISSZ  : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
3293                              load, "comiss">, PS, EVEX, VEX_LIG,
3294                              EVEX_CD8<32, CD8VT1>;
3295    defm Int_VCOMISDZ  : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
3296                              load, "comisd">, PD, EVEX,
3297                              VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
3298  }
3299}
3300  
3301/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
3302multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3303                            X86MemOperand x86memop> {
3304  let hasSideEffects = 0 in {
3305  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3306               (ins RC:$src1, RC:$src2),
3307               !strconcat(OpcodeStr,
3308               " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3309  let mayLoad = 1 in {
3310  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3311               (ins RC:$src1, x86memop:$src2),
3312               !strconcat(OpcodeStr,
3313               " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3314  }
3315}
3316}
3317
3318defm VRCP14SS   : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
3319                  EVEX_CD8<32, CD8VT1>;
3320defm VRCP14SD   : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
3321                  VEX_W, EVEX_CD8<64, CD8VT1>;
3322defm VRSQRT14SS   : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
3323                  EVEX_CD8<32, CD8VT1>;
3324defm VRSQRT14SD   : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
3325                  VEX_W, EVEX_CD8<64, CD8VT1>;
3326
3327def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
3328              (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3329           (COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3330                       (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3331
3332def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
3333              (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3334           (COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3335                       (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3336
3337def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
3338              (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
3339           (COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3340                       (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3341
3342def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
3343              (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
3344           (COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3345                       (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3346
3347/// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
3348multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3349                         RegisterClass RC, X86MemOperand x86memop,
3350                         PatFrag mem_frag, ValueType OpVt> {
3351  def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3352                        !strconcat(OpcodeStr,
3353                                   " \t{$src, $dst|$dst, $src}"),
3354                        [(set RC:$dst, (OpVt (OpNode RC:$src)))]>,
3355                        EVEX;
3356  def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3357                        !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3358                        [(set RC:$dst, (OpVt (OpNode (mem_frag addr:$src))))]>,
3359                        EVEX;
3360}
3361defm VRSQRT14PSZ : avx512_fp14_p<0x4E, "vrsqrt14ps", X86frsqrt, VR512, f512mem,
3362                        memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3363defm VRSQRT14PDZ : avx512_fp14_p<0x4E, "vrsqrt14pd", X86frsqrt, VR512, f512mem,
3364                        memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3365defm VRCP14PSZ : avx512_fp14_p<0x4C, "vrcp14ps", X86frcp, VR512, f512mem,
3366                        memopv16f32, v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
3367defm VRCP14PDZ : avx512_fp14_p<0x4C, "vrcp14pd", X86frcp, VR512, f512mem,
3368                        memopv8f64, v8f64>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3369
3370def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
3371              (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3372           (VRSQRT14PSZr VR512:$src)>;
3373def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
3374              (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3375           (VRSQRT14PDZr VR512:$src)>;
3376
3377def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
3378              (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
3379           (VRCP14PSZr VR512:$src)>;
3380def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
3381              (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
3382           (VRCP14PDZr VR512:$src)>;
3383
3384/// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
3385multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3386                            X86MemOperand x86memop> {
3387  let hasSideEffects = 0, Predicates = [HasERI] in {
3388  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3389               (ins RC:$src1, RC:$src2),
3390               !strconcat(OpcodeStr,
3391               " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3392  def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
3393               (ins RC:$src1, RC:$src2),
3394               !strconcat(OpcodeStr,
3395               " \t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
3396               []>, EVEX_4V, EVEX_B;
3397  let mayLoad = 1 in {
3398  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
3399               (ins RC:$src1, x86memop:$src2),
3400               !strconcat(OpcodeStr,
3401               " \t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
3402  }
3403}
3404}
3405
3406defm VRCP28SS   : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
3407                  EVEX_CD8<32, CD8VT1>;
3408defm VRCP28SD   : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
3409                  VEX_W, EVEX_CD8<64, CD8VT1>;
3410defm VRSQRT28SS   : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
3411                  EVEX_CD8<32, CD8VT1>;
3412defm VRSQRT28SD   : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
3413                  VEX_W, EVEX_CD8<64, CD8VT1>;
3414
3415def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
3416              (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3417                   FROUND_NO_EXC)),
3418           (COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3419                       (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3420
3421def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
3422              (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3423                   FROUND_NO_EXC)),
3424           (COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3425                       (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3426
3427def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
3428              (v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
3429                   FROUND_NO_EXC)),
3430           (COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
3431                       (COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
3432
3433def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
3434              (v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
3435                   FROUND_NO_EXC)),
3436           (COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
3437                       (COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
3438
3439/// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
3440multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr,
3441                         RegisterClass RC, X86MemOperand x86memop> {
3442  let hasSideEffects = 0, Predicates = [HasERI] in {
3443  def r : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3444                        !strconcat(OpcodeStr,
3445                                   " \t{$src, $dst|$dst, $src}"),
3446                        []>, EVEX;
3447  def rb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3448                        !strconcat(OpcodeStr,
3449                                   " \t{{sae}, $src, $dst|$dst, $src, {sae}}"),
3450                        []>, EVEX, EVEX_B;
3451  def m : AVX5128I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3452                        !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3453                        []>, EVEX;
3454  }
3455}
3456defm VRSQRT28PSZ : avx512_fp28_p<0xCC, "vrsqrt28ps", VR512, f512mem>,
3457                        EVEX_V512, EVEX_CD8<32, CD8VF>;
3458defm VRSQRT28PDZ : avx512_fp28_p<0xCC, "vrsqrt28pd", VR512, f512mem>,
3459                        VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3460defm VRCP28PSZ : avx512_fp28_p<0xCA, "vrcp28ps", VR512, f512mem>,
3461                        EVEX_V512, EVEX_CD8<32, CD8VF>;
3462defm VRCP28PDZ : avx512_fp28_p<0xCA, "vrcp28pd", VR512, f512mem>,
3463                        VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
3464
3465def : Pat <(v16f32 (int_x86_avx512_rsqrt28_ps (v16f32 VR512:$src),
3466              (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3467           (VRSQRT28PSZrb VR512:$src)>;
3468def : Pat <(v8f64 (int_x86_avx512_rsqrt28_pd (v8f64 VR512:$src),
3469              (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3470           (VRSQRT28PDZrb VR512:$src)>;
3471
3472def : Pat <(v16f32 (int_x86_avx512_rcp28_ps (v16f32 VR512:$src),
3473              (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_NO_EXC)),
3474           (VRCP28PSZrb VR512:$src)>;
3475def : Pat <(v8f64 (int_x86_avx512_rcp28_pd (v8f64 VR512:$src),
3476              (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_NO_EXC)),
3477           (VRCP28PDZrb VR512:$src)>;
3478
3479multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
3480                              Intrinsic V16F32Int, Intrinsic V8F64Int,
3481                              OpndItins itins_s, OpndItins itins_d> {
3482  def PSZrr :AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3483             !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3484             [(set VR512:$dst, (v16f32 (OpNode VR512:$src)))], itins_s.rr>,
3485             EVEX, EVEX_V512;
3486
3487  let mayLoad = 1 in
3488  def PSZrm : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3489              !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3490              [(set VR512:$dst, 
3491                (OpNode (v16f32 (bitconvert (memopv16f32 addr:$src)))))],
3492              itins_s.rm>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>;
3493
3494  def PDZrr : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3495              !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3496              [(set VR512:$dst, (v8f64 (OpNode VR512:$src)))], itins_d.rr>,
3497              EVEX, EVEX_V512;
3498
3499  let mayLoad = 1 in
3500    def PDZrm : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3501                !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3502                [(set VR512:$dst, (OpNode
3503                  (v8f64 (bitconvert (memopv16f32 addr:$src)))))],
3504                itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
3505
3506let isCodeGenOnly = 1 in {
3507  def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3508                           !strconcat(OpcodeStr,
3509                                      "ps\t{$src, $dst|$dst, $src}"),
3510                           [(set VR512:$dst, (V16F32Int VR512:$src))]>, 
3511                           EVEX, EVEX_V512;
3512  def PSZm_Int : AVX512PSI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3513                          !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3514                          [(set VR512:$dst, 
3515                           (V16F32Int (memopv16f32 addr:$src)))]>, EVEX,
3516                          EVEX_V512, EVEX_CD8<32, CD8VF>;
3517  def PDZr_Int : AVX512PDI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
3518                           !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3519                           [(set VR512:$dst, (V8F64Int VR512:$src))]>, 
3520                           EVEX, EVEX_V512, VEX_W;
3521  def PDZm_Int : AVX512PDI<opc, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src),
3522                         !strconcat(OpcodeStr,
3523                         "pd\t{$src, $dst|$dst, $src}"),
3524                         [(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
3525                         EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; 
3526} // isCodeGenOnly = 1
3527}
3528
3529multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
3530                          Intrinsic F32Int, Intrinsic F64Int,
3531                          OpndItins itins_s, OpndItins itins_d> {
3532  def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
3533               (ins FR32X:$src1, FR32X:$src2),
3534               !strconcat(OpcodeStr,
3535                          "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3536                      [], itins_s.rr>, XS, EVEX_4V;
3537  let isCodeGenOnly = 1 in
3538  def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3539               (ins VR128X:$src1, VR128X:$src2),
3540               !strconcat(OpcodeStr,
3541                "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3542               [(set VR128X:$dst, 
3543                 (F32Int VR128X:$src1, VR128X:$src2))],
3544               itins_s.rr>, XS, EVEX_4V;
3545  let mayLoad = 1 in {
3546  def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
3547               (ins FR32X:$src1, f32mem:$src2),
3548               !strconcat(OpcodeStr,
3549                          "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3550                      [], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3551  let isCodeGenOnly = 1 in
3552  def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3553                   (ins VR128X:$src1, ssmem:$src2),
3554                   !strconcat(OpcodeStr,
3555                 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3556                   [(set VR128X:$dst, 
3557                     (F32Int VR128X:$src1, sse_load_f32:$src2))],
3558                   itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
3559  }
3560  def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
3561               (ins FR64X:$src1, FR64X:$src2),
3562               !strconcat(OpcodeStr,
3563                          "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3564                      XD, EVEX_4V, VEX_W;
3565  let isCodeGenOnly = 1 in
3566  def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
3567               (ins VR128X:$src1, VR128X:$src2),
3568               !strconcat(OpcodeStr,
3569                "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3570               [(set VR128X:$dst, 
3571                 (F64Int VR128X:$src1, VR128X:$src2))],
3572               itins_s.rr>, XD, EVEX_4V, VEX_W;
3573  let mayLoad = 1 in {
3574  def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
3575               (ins FR64X:$src1, f64mem:$src2),
3576               !strconcat(OpcodeStr,
3577                  "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
3578               XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3579  let isCodeGenOnly = 1 in
3580  def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
3581                  (ins VR128X:$src1, sdmem:$src2),
3582                   !strconcat(OpcodeStr,
3583                  "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3584                  [(set VR128X:$dst, 
3585                    (F64Int VR128X:$src1, sse_load_f64:$src2))]>, 
3586                  XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
3587  }
3588}
3589
3590
3591defm VSQRT  : avx512_sqrt_scalar<0x51, "sqrt", 
3592                int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd, 
3593                SSE_SQRTSS, SSE_SQRTSD>,
3594              avx512_sqrt_packed<0x51, "vsqrt", fsqrt,
3595                int_x86_avx512_sqrt_ps_512, int_x86_avx512_sqrt_pd_512,
3596                SSE_SQRTPS, SSE_SQRTPD>;
3597
3598let Predicates = [HasAVX512] in {
3599  def : Pat<(f32 (fsqrt FR32X:$src)),
3600            (VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3601  def : Pat<(f32 (fsqrt (load addr:$src))),
3602            (VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
3603            Requires<[OptForSize]>;
3604  def : Pat<(f64 (fsqrt FR64X:$src)),
3605            (VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
3606  def : Pat<(f64 (fsqrt (load addr:$src))),
3607            (VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
3608            Requires<[OptForSize]>;
3609
3610  def : Pat<(f32 (X86frsqrt FR32X:$src)),
3611            (VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3612  def : Pat<(f32 (X86frsqrt (load addr:$src))),
3613            (VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3614            Requires<[OptForSize]>;
3615
3616  def : Pat<(f32 (X86frcp FR32X:$src)),
3617            (VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
3618  def : Pat<(f32 (X86frcp (load addr:$src))),
3619            (VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
3620            Requires<[OptForSize]>;
3621
3622  def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
3623            (COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
3624                                        (COPY_TO_REGCLASS VR128X:$src, FR32)),
3625                              VR128X)>;
3626  def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3627            (VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3628
3629  def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
3630            (COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
3631                                        (COPY_TO_REGCLASS VR128X:$src, FR64)),
3632                              VR128X)>;
3633  def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3634            (VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3635}
3636
3637
3638multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
3639                            X86MemOperand x86memop, RegisterClass RC,
3640                            PatFrag mem_frag32, PatFrag mem_frag64,
3641                            Intrinsic V4F32Int, Intrinsic V2F64Int,
3642                            CD8VForm VForm> {
3643let ExeDomain = SSEPackedSingle in {
3644  // Intrinsic operation, reg.
3645  // Vector intrinsic operation, reg
3646  def PSr : AVX512AIi8<opcps, MRMSrcReg,
3647                    (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3648                    !strconcat(OpcodeStr,
3649                    "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3650                    [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
3651
3652  // Vector intrinsic operation, mem
3653  def PSm : AVX512AIi8<opcps, MRMSrcMem,
3654                    (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3655                    !strconcat(OpcodeStr,
3656                    "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3657                    [(set RC:$dst,
3658                          (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
3659                    EVEX_CD8<32, VForm>;
3660} // ExeDomain = SSEPackedSingle
3661
3662let ExeDomain = SSEPackedDouble in {
3663  // Vector intrinsic operation, reg
3664  def PDr : AVX512AIi8<opcpd, MRMSrcReg,
3665                     (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3666                     !strconcat(OpcodeStr,
3667                     "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3668                     [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
3669
3670  // Vector intrinsic operation, mem
3671  def PDm : AVX512AIi8<opcpd, MRMSrcMem,
3672                     (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3673                     !strconcat(OpcodeStr,
3674                     "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3675                     [(set RC:$dst,
3676                          (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
3677                     EVEX_CD8<64, VForm>;
3678} // ExeDomain = SSEPackedDouble
3679}
3680
3681multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
3682                            string OpcodeStr,
3683                            Intrinsic F32Int,
3684                            Intrinsic F64Int> {
3685let ExeDomain = GenericDomain in {
3686  // Operation, reg.
3687  let hasSideEffects = 0 in
3688  def SSr : AVX512AIi8<opcss, MRMSrcReg,
3689      (outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
3690      !strconcat(OpcodeStr,
3691              "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3692      []>;
3693
3694  // Intrinsic operation, reg.
3695  let isCodeGenOnly = 1 in
3696  def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
3697        (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3698        !strconcat(OpcodeStr,
3699                "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3700        [(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
3701
3702  // Intrinsic operation, mem.
3703  def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
3704                     (ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
3705                     !strconcat(OpcodeStr,
3706                   "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3707                     [(set VR128X:$dst, (F32Int VR128X:$src1, 
3708                                         sse_load_f32:$src2, imm:$src3))]>,
3709                     EVEX_CD8<32, CD8VT1>;
3710
3711  // Operation, reg.
3712  let hasSideEffects = 0 in
3713  def SDr : AVX512AIi8<opcsd, MRMSrcReg,
3714        (outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
3715        !strconcat(OpcodeStr,
3716                "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3717        []>, VEX_W;
3718
3719  // Intrinsic operation, reg.
3720  let isCodeGenOnly = 1 in
3721  def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
3722        (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
3723        !strconcat(OpcodeStr,
3724                "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3725        [(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
3726        VEX_W;
3727
3728  // Intrinsic operation, mem.
3729  def SDm : AVX512AIi8<opcsd, MRMSrcMem,
3730        (outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
3731        !strconcat(OpcodeStr,
3732                "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3733        [(set VR128X:$dst,
3734              (F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
3735        VEX_W, EVEX_CD8<64, CD8VT1>;
3736} // ExeDomain = GenericDomain
3737}
3738
3739multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
3740                            X86MemOperand x86memop, RegisterClass RC,
3741                            PatFrag mem_frag, Domain d> {
3742let ExeDomain = d in {
3743  // Intrinsic operation, reg.
3744  // Vector intrinsic operation, reg
3745  def r : AVX512AIi8<opc, MRMSrcReg,
3746                    (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
3747                    !strconcat(OpcodeStr,
3748                    " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3749                    []>, EVEX;
3750
3751  // Vector intrinsic operation, mem
3752  def m : AVX512AIi8<opc, MRMSrcMem,
3753                    (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
3754                    !strconcat(OpcodeStr,
3755                    " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3756                    []>, EVEX;
3757} // ExeDomain
3758}
3759
3760
3761defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
3762                                memopv16f32, SSEPackedSingle>, EVEX_V512,
3763                                EVEX_CD8<32, CD8VF>;
3764
3765def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
3766                   imm:$src2, (v16f32 VR512:$src1), (i16 -1),
3767                   FROUND_CURRENT)),
3768                   (VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
3769
3770
3771defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
3772                                memopv8f64, SSEPackedDouble>, EVEX_V512,
3773                                VEX_W, EVEX_CD8<64, CD8VF>;
3774
3775def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
3776                  imm:$src2, (v8f64 VR512:$src1), (i8 -1),
3777                  FROUND_CURRENT)),
3778                   (VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
3779
3780multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
3781                     Operand x86memop, RegisterClass RC, Domain d> {
3782let ExeDomain = d in {
3783  def r : AVX512AIi8<opc, MRMSrcReg,
3784                    (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
3785                    !strconcat(OpcodeStr,
3786                    " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3787                    []>, EVEX_4V;
3788
3789  def m : AVX512AIi8<opc, MRMSrcMem,
3790                    (outs RC:$dst), (ins RC:$src1, x86memop:$src2,  i32i8imm:$src3),
3791                    !strconcat(OpcodeStr,
3792                    " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3793                    []>, EVEX_4V;
3794} // ExeDomain
3795}
3796
3797defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
3798                                SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
3799                                
3800defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
3801                                SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
3802
3803def : Pat<(ffloor FR32X:$src),
3804          (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
3805def : Pat<(f64 (ffloor FR64X:$src)),
3806          (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
3807def : Pat<(f32 (fnearbyint FR32X:$src)),
3808          (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
3809def : Pat<(f64 (fnearbyint FR64X:$src)),
3810          (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
3811def : Pat<(f32 (fceil FR32X:$src)),
3812          (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
3813def : Pat<(f64 (fceil FR64X:$src)),
3814          (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
3815def : Pat<(f32 (frint FR32X:$src)),
3816          (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
3817def : Pat<(f64 (frint FR64X:$src)),
3818          (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
3819def : Pat<(f32 (ftrunc FR32X:$src)),
3820          (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
3821def : Pat<(f64 (ftrunc FR64X:$src)),
3822          (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
3823
3824def : Pat<(v16f32 (ffloor VR512:$src)),
3825          (VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
3826def : Pat<(v16f32 (fnearbyint VR512:$src)),
3827          (VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
3828def : Pat<(v16f32 (fceil VR512:$src)),
3829          (VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
3830def : Pat<(v16f32 (frint VR512:$src)),
3831          (VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
3832def : Pat<(v16f32 (ftrunc VR512:$src)),
3833          (VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
3834
3835def : Pat<(v8f64 (ffloor VR512:$src)),
3836          (VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
3837def : Pat<(v8f64 (fnearbyint VR512:$src)),
3838          (VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
3839def : Pat<(v8f64 (fceil VR512:$src)),
3840          (VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
3841def : Pat<(v8f64 (frint VR512:$src)),
3842          (VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
3843def : Pat<(v8f64 (ftrunc VR512:$src)),
3844          (VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
3845
3846//-------------------------------------------------
3847// Integer truncate and extend operations
3848//-------------------------------------------------
3849
3850multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
3851                          RegisterClass dstRC, RegisterClass srcRC,
3852                          RegisterClass KRC, X86MemOperand x86memop> {
3853  def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3854               (ins srcRC:$src),
3855               !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
3856               []>, EVEX;
3857
3858  def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3859               (ins KRC:$mask, srcRC:$src),
3860               !strconcat(OpcodeStr,
3861                 " \t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
3862               []>, EVEX, EVEX_K;
3863
3864  def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
3865               (ins KRC:$mask, srcRC:$src),
3866               !strconcat(OpcodeStr,
3867                 " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
3868               []>, EVEX, EVEX_KZ;
3869
3870  def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
3871               !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3872               []>, EVEX;
3873
3874  def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
3875               (ins x86memop:$dst, KRC:$mask, srcRC:$src),
3876               !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
3877               []>, EVEX, EVEX_K;
3878
3879}
3880defm VPMOVQB    : avx512_trunc_sat<0x32, "vpmovqb",   VR128X, VR512, VK8WM, 
3881                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3882defm VPMOVSQB   : avx512_trunc_sat<0x22, "vpmovsqb",  VR128X, VR512, VK8WM,
3883                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3884defm VPMOVUSQB  : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
3885                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
3886defm VPMOVQW    : avx512_trunc_sat<0x34, "vpmovqw",   VR128X, VR512, VK8WM,
3887                                 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3888defm VPMOVSQW   : avx512_trunc_sat<0x24, "vpmovsqw",  VR128X, VR512, VK8WM,
3889                                 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3890defm VPMOVUSQW  : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
3891                                 i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
3892defm VPMOVQD    : avx512_trunc_sat<0x35, "vpmovqd",   VR256X, VR512, VK8WM,
3893                                 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3894defm VPMOVSQD   : avx512_trunc_sat<0x25, "vpmovsqd",  VR256X, VR512, VK8WM,
3895                                 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3896defm VPMOVUSQD  : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
3897                                 i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
3898defm VPMOVDW    : avx512_trunc_sat<0x33, "vpmovdw",   VR256X, VR512, VK16WM,
3899                                 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3900defm VPMOVSDW   : avx512_trunc_sat<0x23, "vpmovsdw",  VR256X, VR512, VK16WM,
3901                                 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3902defm VPMOVUSDW  : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
3903                                 i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
3904defm VPMOVDB    : avx512_trunc_sat<0x31, "vpmovdb",   VR128X, VR512, VK16WM,
3905                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3906defm VPMOVSDB   : avx512_trunc_sat<0x21, "vpmovsdb",  VR128X, VR512, VK16WM,
3907                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3908defm VPMOVUSDB  : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
3909                                 i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
3910
3911def : Pat<(v16i8  (X86vtrunc (v8i64  VR512:$src))), (VPMOVQBrr  VR512:$src)>;
3912def : Pat<(v8i16  (X86vtrunc (v8i64  VR512:$src))), (VPMOVQWrr  VR512:$src)>;
3913def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr  VR512:$src)>;
3914def : Pat<(v16i8  (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr  VR512:$src)>;
3915def : Pat<(v8i32  (X86vtrunc (v8i64  VR512:$src))), (VPMOVQDrr  VR512:$src)>;
3916
3917def : Pat<(v16i8  (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3918                  (VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
3919def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
3920                  (VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
3921def : Pat<(v8i16  (X86vtruncm VK8WM:$mask,  (v8i64 VR512:$src))),
3922                  (VPMOVQWrrkz  VK8WM:$mask, VR512:$src)>;
3923def : Pat<(v8i32  (X86vtruncm VK8WM:$mask,  (v8i64 VR512:$src))),
3924                  (VPMOVQDrrkz  VK8WM:$mask, VR512:$src)>;
3925
3926
3927multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
3928                      RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
3929                      PatFrag mem_frag, X86MemOperand x86memop,
3930                      ValueType OpVT, ValueType InVT> {
3931
3932  def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
3933              (ins SrcRC:$src),
3934              !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
3935              [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
3936
3937  def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
3938              (ins KRC:$mask, SrcRC:$src),
3939              !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
3940              []>, EVEX, EVEX_K;
3941
3942  def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
3943              (ins KRC:$mask, SrcRC:$src),
3944              !strconcat(OpcodeStr, " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
3945              []>, EVEX, EVEX_KZ;
3946
3947  let mayLoad = 1 in {
3948    def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
3949              (ins x86memop:$src),
3950              !strconcat(OpcodeStr," \t{$src, $dst|$dst, $src}"),
3951              [(set DstRC:$dst,
3952                (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
3953              EVEX;
3954
3955    def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
3956              (ins KRC:$mask, x86memop:$src),
3957              !strconcat(OpcodeStr," \t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
3958              []>,
3959              EVEX, EVEX_K;
3960
3961    def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
3962              (ins KRC:$mask, x86memop:$src),
3963              !strconcat(OpcodeStr," \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
3964              []>,
3965              EVEX, EVEX_KZ;
3966  }
3967}
3968
3969defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
3970                             memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
3971                             EVEX_CD8<8, CD8VQ>;
3972defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
3973                             memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
3974                             EVEX_CD8<8, CD8VO>;
3975defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
3976                             memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
3977                             EVEX_CD8<16, CD8VH>;
3978defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
3979                             memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
3980                             EVEX_CD8<16, CD8VQ>;
3981defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
3982                             memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
3983                             EVEX_CD8<32, CD8VH>;
3984
3985defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
3986                             memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
3987                             EVEX_CD8<8, CD8VQ>;
3988defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
3989                             memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
3990                             EVEX_CD8<8, CD8VO>;
3991defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
3992                             memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
3993                             EVEX_CD8<16, CD8VH>;
3994defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
3995                             memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
3996                             EVEX_CD8<16, CD8VQ>;
3997defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
3998                             memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
3999                             EVEX_CD8<32, CD8VH>;
4000
4001//===----------------------------------------------------------------------===//
4002// GATHER - SCATTER Operations
4003
4004multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4005                       RegisterClass RC, X86MemOperand memop> {
4006let mayLoad = 1,
4007  Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
4008  def rm  : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
4009            (ins RC:$src1, KRC:$mask, memop:$src2),
4010            !strconcat(OpcodeStr,
4011            " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4012            []>, EVEX, EVEX_K;
4013}
4014
4015let ExeDomain = SSEPackedDouble in {
4016defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
4017                                 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4018defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
4019                                 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4020}
4021
4022let ExeDomain = SSEPackedSingle in {
4023defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
4024                                 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4025defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
4026                                 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4027}
4028  
4029defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512,  vy64xmem>,
4030                                 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4031defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
4032                                 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4033
4034defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512,  vz64mem>,
4035                                 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4036defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X,  vz64mem>,
4037                                 EVEX_V512, EVEX_CD8<32, CD8VT1>;
4038
4039multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
4040                       RegisterClass RC, X86MemOperand memop> {
4041let mayStore = 1, Constraints = "$mask = $mask_wb" in
4042  def mr  : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
4043            (ins memop:$dst, KRC:$mask, RC:$src2),
4044            !strconcat(OpcodeStr,
4045            " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4046            []>, EVEX, EVEX_K;
4047}
4048
4049let ExeDomain = SSEPackedDouble in {
4050defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
4051                                   EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4052defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
4053                                   EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4054}
4055
4056let ExeDomain = SSEPackedSingle in {
4057defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
4058                                   EVEX_V512, EVEX_CD8<32, CD8VT1>;
4059defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
4060                                   EVEX_V512, EVEX_CD8<32, CD8VT1>;
4061}
4062
4063defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
4064                                   EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4065defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
4066                                   EVEX_V512, EVEX_CD8<32, CD8VT1>;
4067
4068defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
4069                                  EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4070defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
4071                                  EVEX_V512, EVEX_CD8<32, CD8VT1>;
4072
4073// prefetch
4074multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
4075                       RegisterClass KRC, X86MemOperand memop> {
4076  let Predicates = [HasPFI], hasSideEffects = 1 in
4077  def m  : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
4078            !strconcat(OpcodeStr, " \t{$src {${mask}}|{${mask}}, $src}"),
4079            []>, EVEX, EVEX_K;
4080}
4081
4082defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
4083                     VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4084
4085defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
4086                     VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4087
4088defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
4089                     VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4090
4091defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
4092                     VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4093                     
4094defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
4095                     VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4096
4097defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
4098                     VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4099
4100defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
4101                     VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4102
4103defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
4104                     VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4105
4106defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
4107                     VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4108
4109defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
4110                     VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4111
4112defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
4113                     VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4114
4115defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
4116                     VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4117
4118defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
4119                     VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
4120
4121defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
4122                     VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
4123
4124defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
4125                     VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
4126
4127defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
4128                     VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
4129//===----------------------------------------------------------------------===//
4130// VSHUFPS - VSHUFPD Operations
4131
4132multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
4133                      ValueType vt, string OpcodeStr, PatFrag mem_frag,
4134                      Domain d> {
4135  def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
4136                   (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4137                   !strconcat(OpcodeStr,
4138                   " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4139                   [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
4140                                       (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4141                   EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
4142  def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
4143                   (ins RC:$src1, RC:$src2, i8imm:$src3),
4144                   !strconcat(OpcodeStr,
4145                   " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4146                   [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
4147                                       (i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
4148                   EVEX_4V, Sched<[WriteShuffle]>;
4149}
4150
4151defm VSHUFPSZ  : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
4152                  SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
4153defm VSHUFPDZ  : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
4154                  SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4155
4156def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4157          (VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4158def : Pat<(v16i32 (X86Shufp VR512:$src1,
4159                    (memopv16i32 addr:$src2), (i8 imm:$imm))),
4160          (VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4161
4162def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4163          (VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
4164def : Pat<(v8i64 (X86Shufp VR512:$src1,
4165                            (memopv8i64 addr:$src2), (i8 imm:$imm))),
4166          (VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
4167
4168multiclass avx512_alignr<string OpcodeStr, RegisterClass RC,
4169                       X86MemOperand x86memop> {
4170  def rri : AVX512AIi8<0x03, MRMSrcReg, (outs RC:$dst),
4171                     (ins RC:$src1, RC:$src2, i8imm:$src3),
4172                     !strconcat(OpcodeStr,
4173                     " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4174                     []>, EVEX_4V;
4175  let mayLoad = 1 in
4176  def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs RC:$dst),
4177                     (ins RC:$src1, x86memop:$src2, i8imm:$src3),
4178                     !strconcat(OpcodeStr,
4179                     " \t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4180                     []>, EVEX_4V;
4181}
4182defm VALIGND : avx512_alignr<"valignd", VR512, i512mem>, 
4183                 EVEX_V512, EVEX_CD8<32, CD8VF>;
4184defm VALIGNQ : avx512_alignr<"valignq", VR512, i512mem>, 
4185                 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
4186
4187def : Pat<(v16f32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4188          (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4189def : Pat<(v8f64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4190          (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4191def : Pat<(v16i32 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4192          (VALIGNDrri VR512:$src2, VR512:$src1, imm:$imm)>;
4193def : Pat<(v8i64 (X86PAlignr VR512:$src1, VR512:$src2, (i8 imm:$imm))),
4194          (VALIGNQrri VR512:$src2, VR512:$src1, imm:$imm)>;
4195
4196// Helper fragments to match sext vXi1 to vXiY.
4197def v16i1sextv16i32  : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
4198def v8i1sextv8i64  : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
4199
4200multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
4201                        RegisterClass KRC, RegisterClass RC,
4202                        X86MemOperand x86memop, X86MemOperand x86scalar_mop,
4203                        string BrdcstStr> {
4204  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4205            !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4206            []>, EVEX;
4207  def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4208             !strconcat(OpcodeStr, " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4209             []>, EVEX, EVEX_K;
4210  def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
4211              !strconcat(OpcodeStr,
4212                         " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4213              []>, EVEX, EVEX_KZ;
4214  let mayLoad = 1 in {
4215    def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4216              (ins x86memop:$src),
4217              !strconcat(OpcodeStr, " \t{$src, $dst|$dst, $src}"),
4218              []>, EVEX;
4219    def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4220               (ins KRC:$mask, x86memop:$src),
4221               !strconcat(OpcodeStr,
4222                          " \t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
4223               []>, EVEX, EVEX_K;
4224    def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4225                (ins KRC:$mask, x86memop:$src),
4226                !strconcat(OpcodeStr,
4227                           " \t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
4228                []>, EVEX, EVEX_KZ;
4229    def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4230               (ins x86scalar_mop:$src),
4231               !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4232                          ", $dst|$dst, ${src}", BrdcstStr, "}"),
4233               []>, EVEX, EVEX_B;
4234    def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4235                (ins KRC:$mask, x86scalar_mop:$src),
4236                !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4237                           ", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
4238                []>, EVEX, EVEX_B, EVEX_K;
4239    def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
4240                 (ins KRC:$mask, x86scalar_mop:$src),
4241                 !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4242                            ", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
4243                            BrdcstStr, "}"),
4244                 []>, EVEX, EVEX_B, EVEX_KZ;
4245  }
4246}
4247
4248defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
4249                           i512mem, i32mem, "{1to16}">, EVEX_V512,
4250                           EVEX_CD8<32, CD8VF>;
4251defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
4252                           i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
4253                           EVEX_CD8<64, CD8VF>;
4254
4255def : Pat<(xor
4256          (bc_v16i32 (v16i1sextv16i32)),
4257          (bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
4258          (VPABSDZrr VR512:$src)>;
4259def : Pat<(xor
4260          (bc_v8i64 (v8i1sextv8i64)),
4261          (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
4262          (VPABSQZrr VR512:$src)>;
4263
4264def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
4265                   (v16i32 immAllZerosV), (i16 -1))),
4266          (VPABSDZrr VR512:$src)>;
4267def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
4268                   (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
4269          (VPABSQZrr VR512:$src)>;
4270
4271multiclass avx512_conflict<bits<8> opc, string OpcodeStr, 
4272                        RegisterClass RC, RegisterClass KRC,
4273                        X86MemOperand x86memop,
4274                        X86MemOperand x86scalar_mop, string BrdcstStr> {
4275  def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4276       (ins RC:$src),
4277       !strconcat(OpcodeStr, " \t{$src, ${dst} |${dst}, $src}"),
4278       []>, EVEX;
4279  def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4280       (ins x86memop:$src),
4281       !strconcat(OpcodeStr, " \t{$src, ${dst}|${dst}, $src}"),
4282       []>, EVEX;
4283  def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4284       (ins x86scalar_mop:$src),
4285       !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4286                  ", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
4287       []>, EVEX, EVEX_B;
4288  def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4289       (ins KRC:$mask, RC:$src),
4290       !strconcat(OpcodeStr,
4291                  " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4292       []>, EVEX, EVEX_KZ;
4293  def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4294       (ins KRC:$mask, x86memop:$src),
4295       !strconcat(OpcodeStr,
4296                  " \t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
4297       []>, EVEX, EVEX_KZ;
4298  def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4299       (ins KRC:$mask, x86scalar_mop:$src),
4300       !strconcat(OpcodeStr, " \t{${src}", BrdcstStr,
4301                  ", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
4302                  BrdcstStr, "}"),
4303       []>, EVEX, EVEX_KZ, EVEX_B;
4304       
4305  let Constraints = "$src1 = $dst" in {
4306  def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
4307       (ins RC:$src1, KRC:$mask, RC:$src2),
4308       !strconcat(OpcodeStr,
4309                  " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4310       []>, EVEX, EVEX_K;
4311  def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4312       (ins RC:$src1, KRC:$mask, x86memop:$src2),
4313       !strconcat(OpcodeStr,
4314                  " \t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
4315       []>, EVEX, EVEX_K;
4316  def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
4317       (ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
4318       !strconcat(OpcodeStr, " \t{${src2}", BrdcstStr,
4319                  ", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
4320       []>, EVEX, EVEX_K, EVEX_B;
4321   }
4322}
4323
4324let Predicates = [HasCDI] in {
4325defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
4326                    i512mem, i32mem, "{1to16}">,
4327                    EVEX_V512, EVEX_CD8<32, CD8VF>;
4328
4329
4330defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
4331                    i512mem, i64mem, "{1to8}">,
4332                    EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4333
4334}
4335
4336def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
4337                                              GR16:$mask),
4338          (VPCONFLICTDrrk VR512:$src1,
4339           (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
4340
4341def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
4342                                              GR8:$mask),
4343          (VPCONFLICTQrrk VR512:$src1,
4344           (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
4345
4346def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4347def : Pat<(store (i1  1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
4348def : Pat<(store (i1  0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
4349
4350def : Pat<(store VK1:$src, addr:$dst),
4351          (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
4352
4353def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
4354                           (truncstore node:$val, node:$ptr), [{
4355  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
4356}]>;
4357
4358def : Pat<(truncstorei1 GR8:$src, addr:$dst),
4359          (MOV8mr addr:$dst, GR8:$src)>;
4360
4361