AArch64ISelLowering.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "aarch64-isel"
16#include "AArch64.h"
17#include "AArch64ISelLowering.h"
18#include "AArch64MachineFunctionInfo.h"
19#include "AArch64TargetMachine.h"
20#include "AArch64TargetObjectFile.h"
21#include "Utils/AArch64BaseInfo.h"
22#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28#include "llvm/IR/CallingConv.h"
29#include "llvm/Support/MathExtras.h"
30
31using namespace llvm;
32
33static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
34  assert (TM.getSubtarget<AArch64Subtarget>().isTargetELF() &&
35          "unknown subtarget type");
36  return new AArch64ElfTargetObjectFile();
37}
38
39AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
40  : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
41
42  const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
43
44  // SIMD compares set the entire lane's bits to 1
45  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
46
47  // Scalar register <-> type mapping
48  addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
49  addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
50
51  if (Subtarget->hasFPARMv8()) {
52    addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
53    addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
54    addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
55    addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
56  }
57
58  if (Subtarget->hasNEON()) {
59    // And the vectors
60    addRegisterClass(MVT::v1i8,  &AArch64::FPR8RegClass);
61    addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
62    addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
63    addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
64    addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
65    addRegisterClass(MVT::v8i8,  &AArch64::FPR64RegClass);
66    addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
67    addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
68    addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
69    addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
70    addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
71    addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
72    addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
73    addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
74    addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
75    addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
76  }
77
78  computeRegisterProperties();
79
80  // We combine OR nodes for bitfield and NEON BSL operations.
81  setTargetDAGCombine(ISD::OR);
82
83  setTargetDAGCombine(ISD::AND);
84  setTargetDAGCombine(ISD::SRA);
85  setTargetDAGCombine(ISD::SRL);
86  setTargetDAGCombine(ISD::SHL);
87
88  setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
89  setTargetDAGCombine(ISD::INTRINSIC_VOID);
90  setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
91
92  // AArch64 does not have i1 loads, or much of anything for i1 really.
93  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
94  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
95  setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
96
97  setStackPointerRegisterToSaveRestore(AArch64::XSP);
98  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
99  setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
100  setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
101
102  // We'll lower globals to wrappers for selection.
103  setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
104  setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
105
106  // A64 instructions have the comparison predicate attached to the user of the
107  // result, but having a separate comparison is valuable for matching.
108  setOperationAction(ISD::BR_CC, MVT::i32, Custom);
109  setOperationAction(ISD::BR_CC, MVT::i64, Custom);
110  setOperationAction(ISD::BR_CC, MVT::f32, Custom);
111  setOperationAction(ISD::BR_CC, MVT::f64, Custom);
112
113  setOperationAction(ISD::SELECT, MVT::i32, Custom);
114  setOperationAction(ISD::SELECT, MVT::i64, Custom);
115  setOperationAction(ISD::SELECT, MVT::f32, Custom);
116  setOperationAction(ISD::SELECT, MVT::f64, Custom);
117
118  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
119  setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
120  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
121  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
122
123  setOperationAction(ISD::BRCOND, MVT::Other, Custom);
124
125  setOperationAction(ISD::SETCC, MVT::i32, Custom);
126  setOperationAction(ISD::SETCC, MVT::i64, Custom);
127  setOperationAction(ISD::SETCC, MVT::f32, Custom);
128  setOperationAction(ISD::SETCC, MVT::f64, Custom);
129
130  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
131  setOperationAction(ISD::JumpTable, MVT::i32, Custom);
132  setOperationAction(ISD::JumpTable, MVT::i64, Custom);
133
134  setOperationAction(ISD::VASTART, MVT::Other, Custom);
135  setOperationAction(ISD::VACOPY, MVT::Other, Custom);
136  setOperationAction(ISD::VAEND, MVT::Other, Expand);
137  setOperationAction(ISD::VAARG, MVT::Other, Expand);
138
139  setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
140  setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
141
142  setOperationAction(ISD::ROTL, MVT::i32, Expand);
143  setOperationAction(ISD::ROTL, MVT::i64, Expand);
144
145  setOperationAction(ISD::UREM, MVT::i32, Expand);
146  setOperationAction(ISD::UREM, MVT::i64, Expand);
147  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
148  setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
149
150  setOperationAction(ISD::SREM, MVT::i32, Expand);
151  setOperationAction(ISD::SREM, MVT::i64, Expand);
152  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
153  setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
154
155  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
156  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
157  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
158  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
159
160  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
161  setOperationAction(ISD::CTPOP, MVT::i64, Expand);
162
163  // Legal floating-point operations.
164  setOperationAction(ISD::FABS, MVT::f32, Legal);
165  setOperationAction(ISD::FABS, MVT::f64, Legal);
166
167  setOperationAction(ISD::FCEIL, MVT::f32, Legal);
168  setOperationAction(ISD::FCEIL, MVT::f64, Legal);
169
170  setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
171  setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
172
173  setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
174  setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
175
176  setOperationAction(ISD::FNEG, MVT::f32, Legal);
177  setOperationAction(ISD::FNEG, MVT::f64, Legal);
178
179  setOperationAction(ISD::FRINT, MVT::f32, Legal);
180  setOperationAction(ISD::FRINT, MVT::f64, Legal);
181
182  setOperationAction(ISD::FSQRT, MVT::f32, Legal);
183  setOperationAction(ISD::FSQRT, MVT::f64, Legal);
184
185  setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
186  setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
187
188  setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
189  setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
190  setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
191
192  // Illegal floating-point operations.
193  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
194  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
195
196  setOperationAction(ISD::FCOS, MVT::f32, Expand);
197  setOperationAction(ISD::FCOS, MVT::f64, Expand);
198
199  setOperationAction(ISD::FEXP, MVT::f32, Expand);
200  setOperationAction(ISD::FEXP, MVT::f64, Expand);
201
202  setOperationAction(ISD::FEXP2, MVT::f32, Expand);
203  setOperationAction(ISD::FEXP2, MVT::f64, Expand);
204
205  setOperationAction(ISD::FLOG, MVT::f32, Expand);
206  setOperationAction(ISD::FLOG, MVT::f64, Expand);
207
208  setOperationAction(ISD::FLOG2, MVT::f32, Expand);
209  setOperationAction(ISD::FLOG2, MVT::f64, Expand);
210
211  setOperationAction(ISD::FLOG10, MVT::f32, Expand);
212  setOperationAction(ISD::FLOG10, MVT::f64, Expand);
213
214  setOperationAction(ISD::FPOW, MVT::f32, Expand);
215  setOperationAction(ISD::FPOW, MVT::f64, Expand);
216
217  setOperationAction(ISD::FPOWI, MVT::f32, Expand);
218  setOperationAction(ISD::FPOWI, MVT::f64, Expand);
219
220  setOperationAction(ISD::FREM, MVT::f32, Expand);
221  setOperationAction(ISD::FREM, MVT::f64, Expand);
222
223  setOperationAction(ISD::FSIN, MVT::f32, Expand);
224  setOperationAction(ISD::FSIN, MVT::f64, Expand);
225
226  setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
227  setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
228
229  // Virtually no operation on f128 is legal, but LLVM can't expand them when
230  // there's a valid register class, so we need custom operations in most cases.
231  setOperationAction(ISD::FABS,       MVT::f128, Expand);
232  setOperationAction(ISD::FADD,       MVT::f128, Custom);
233  setOperationAction(ISD::FCOPYSIGN,  MVT::f128, Expand);
234  setOperationAction(ISD::FCOS,       MVT::f128, Expand);
235  setOperationAction(ISD::FDIV,       MVT::f128, Custom);
236  setOperationAction(ISD::FMA,        MVT::f128, Expand);
237  setOperationAction(ISD::FMUL,       MVT::f128, Custom);
238  setOperationAction(ISD::FNEG,       MVT::f128, Expand);
239  setOperationAction(ISD::FP_EXTEND,  MVT::f128, Expand);
240  setOperationAction(ISD::FP_ROUND,   MVT::f128, Expand);
241  setOperationAction(ISD::FPOW,       MVT::f128, Expand);
242  setOperationAction(ISD::FREM,       MVT::f128, Expand);
243  setOperationAction(ISD::FRINT,      MVT::f128, Expand);
244  setOperationAction(ISD::FSIN,       MVT::f128, Expand);
245  setOperationAction(ISD::FSINCOS,    MVT::f128, Expand);
246  setOperationAction(ISD::FSQRT,      MVT::f128, Expand);
247  setOperationAction(ISD::FSUB,       MVT::f128, Custom);
248  setOperationAction(ISD::FTRUNC,     MVT::f128, Expand);
249  setOperationAction(ISD::SETCC,      MVT::f128, Custom);
250  setOperationAction(ISD::BR_CC,      MVT::f128, Custom);
251  setOperationAction(ISD::SELECT,     MVT::f128, Expand);
252  setOperationAction(ISD::SELECT_CC,  MVT::f128, Custom);
253  setOperationAction(ISD::FP_EXTEND,  MVT::f128, Custom);
254
255  // Lowering for many of the conversions is actually specified by the non-f128
256  // type. The LowerXXX function will be trivial when f128 isn't involved.
257  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
258  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
259  setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
260  setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
261  setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
262  setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
263  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
264  setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
265  setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
266  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
267  setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
268  setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
269  setOperationAction(ISD::FP_ROUND,  MVT::f32, Custom);
270  setOperationAction(ISD::FP_ROUND,  MVT::f64, Custom);
271
272  // i128 shift operation support
273  setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
274  setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
275  setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
276
277  // This prevents LLVM trying to compress double constants into a floating
278  // constant-pool entry and trying to load from there. It's of doubtful benefit
279  // for A64: we'd need LDR followed by FCVT, I believe.
280  setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
281  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
282  setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
283
284  setTruncStoreAction(MVT::f128, MVT::f64, Expand);
285  setTruncStoreAction(MVT::f128, MVT::f32, Expand);
286  setTruncStoreAction(MVT::f128, MVT::f16, Expand);
287  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
288  setTruncStoreAction(MVT::f64, MVT::f16, Expand);
289  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
290
291  setExceptionPointerRegister(AArch64::X0);
292  setExceptionSelectorRegister(AArch64::X1);
293
294  if (Subtarget->hasNEON()) {
295    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Expand);
296    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand);
297    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand);
298    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v1i64, Expand);
299    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v16i8, Expand);
300    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Expand);
301    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand);
302    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Expand);
303
304    setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
305    setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
306    setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
307    setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
308    setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
309    setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
310    setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
311    setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
312    setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
313    setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
314    setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
315    setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
316    setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
317    setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
318    setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
319
320    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
321    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
322    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
323    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
324    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
325    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
326    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
327    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
328    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
329    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
330    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
331    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
332
333    setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i32, Legal);
334    setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
335    setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
336    setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
337    setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
338    setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
339    setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
340
341    setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i8, Custom);
342    setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i16, Custom);
343    setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom);
344    setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom);
345    setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
346
347    setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
348    setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
349    setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
350    setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
351    setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
352    setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
353    setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
354    setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
355    setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
356    setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
357    setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
358    setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
359
360    setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
361    setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
362    setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
363    setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
364
365    setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
366    setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
367    setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
368    setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
369
370    setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
371    setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
372    setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
373    setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
374
375    setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
376    setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
377    setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
378    setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
379
380    setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
381    setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
382    setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
383    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
384
385    setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
386    setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
387    setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
388    setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
389
390    setOperationAction(ISD::SINT_TO_FP, MVT::v1i8, Custom);
391    setOperationAction(ISD::SINT_TO_FP, MVT::v1i16, Custom);
392    setOperationAction(ISD::SINT_TO_FP, MVT::v1i32, Custom);
393    setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
394    setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
395    setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom);
396
397    setOperationAction(ISD::UINT_TO_FP, MVT::v1i8, Custom);
398    setOperationAction(ISD::UINT_TO_FP, MVT::v1i16, Custom);
399    setOperationAction(ISD::UINT_TO_FP, MVT::v1i32, Custom);
400    setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
401    setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
402    setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom);
403
404    setOperationAction(ISD::FP_TO_SINT, MVT::v1i8, Custom);
405    setOperationAction(ISD::FP_TO_SINT, MVT::v1i16, Custom);
406    setOperationAction(ISD::FP_TO_SINT, MVT::v1i32, Custom);
407    setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
408    setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
409    setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Custom);
410
411    setOperationAction(ISD::FP_TO_UINT, MVT::v1i8, Custom);
412    setOperationAction(ISD::FP_TO_UINT, MVT::v1i16, Custom);
413    setOperationAction(ISD::FP_TO_UINT, MVT::v1i32, Custom);
414    setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
415    setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
416    setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Custom);
417
418    // Neon does not support vector divide/remainder operations except
419    // floating-point divide.
420    setOperationAction(ISD::SDIV, MVT::v1i8, Expand);
421    setOperationAction(ISD::SDIV, MVT::v8i8, Expand);
422    setOperationAction(ISD::SDIV, MVT::v16i8, Expand);
423    setOperationAction(ISD::SDIV, MVT::v1i16, Expand);
424    setOperationAction(ISD::SDIV, MVT::v4i16, Expand);
425    setOperationAction(ISD::SDIV, MVT::v8i16, Expand);
426    setOperationAction(ISD::SDIV, MVT::v1i32, Expand);
427    setOperationAction(ISD::SDIV, MVT::v2i32, Expand);
428    setOperationAction(ISD::SDIV, MVT::v4i32, Expand);
429    setOperationAction(ISD::SDIV, MVT::v1i64, Expand);
430    setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
431
432    setOperationAction(ISD::UDIV, MVT::v1i8, Expand);
433    setOperationAction(ISD::UDIV, MVT::v8i8, Expand);
434    setOperationAction(ISD::UDIV, MVT::v16i8, Expand);
435    setOperationAction(ISD::UDIV, MVT::v1i16, Expand);
436    setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
437    setOperationAction(ISD::UDIV, MVT::v8i16, Expand);
438    setOperationAction(ISD::UDIV, MVT::v1i32, Expand);
439    setOperationAction(ISD::UDIV, MVT::v2i32, Expand);
440    setOperationAction(ISD::UDIV, MVT::v4i32, Expand);
441    setOperationAction(ISD::UDIV, MVT::v1i64, Expand);
442    setOperationAction(ISD::UDIV, MVT::v2i64, Expand);
443
444    setOperationAction(ISD::SREM, MVT::v1i8, Expand);
445    setOperationAction(ISD::SREM, MVT::v8i8, Expand);
446    setOperationAction(ISD::SREM, MVT::v16i8, Expand);
447    setOperationAction(ISD::SREM, MVT::v1i16, Expand);
448    setOperationAction(ISD::SREM, MVT::v4i16, Expand);
449    setOperationAction(ISD::SREM, MVT::v8i16, Expand);
450    setOperationAction(ISD::SREM, MVT::v1i32, Expand);
451    setOperationAction(ISD::SREM, MVT::v2i32, Expand);
452    setOperationAction(ISD::SREM, MVT::v4i32, Expand);
453    setOperationAction(ISD::SREM, MVT::v1i64, Expand);
454    setOperationAction(ISD::SREM, MVT::v2i64, Expand);
455
456    setOperationAction(ISD::UREM, MVT::v1i8, Expand);
457    setOperationAction(ISD::UREM, MVT::v8i8, Expand);
458    setOperationAction(ISD::UREM, MVT::v16i8, Expand);
459    setOperationAction(ISD::UREM, MVT::v1i16, Expand);
460    setOperationAction(ISD::UREM, MVT::v4i16, Expand);
461    setOperationAction(ISD::UREM, MVT::v8i16, Expand);
462    setOperationAction(ISD::UREM, MVT::v1i32, Expand);
463    setOperationAction(ISD::UREM, MVT::v2i32, Expand);
464    setOperationAction(ISD::UREM, MVT::v4i32, Expand);
465    setOperationAction(ISD::UREM, MVT::v1i64, Expand);
466    setOperationAction(ISD::UREM, MVT::v2i64, Expand);
467
468    setOperationAction(ISD::FREM, MVT::v2f32, Expand);
469    setOperationAction(ISD::FREM, MVT::v4f32, Expand);
470    setOperationAction(ISD::FREM, MVT::v1f64, Expand);
471    setOperationAction(ISD::FREM, MVT::v2f64, Expand);
472
473    setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
474    setOperationAction(ISD::SELECT, MVT::v16i8, Expand);
475    setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
476    setOperationAction(ISD::SELECT, MVT::v8i16, Expand);
477    setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
478    setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
479    setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
480    setOperationAction(ISD::SELECT, MVT::v2i64, Expand);
481    setOperationAction(ISD::SELECT, MVT::v2f32, Expand);
482    setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
483    setOperationAction(ISD::SELECT, MVT::v1f64, Expand);
484    setOperationAction(ISD::SELECT, MVT::v2f64, Expand);
485
486    setOperationAction(ISD::SELECT_CC, MVT::v8i8, Custom);
487    setOperationAction(ISD::SELECT_CC, MVT::v16i8, Custom);
488    setOperationAction(ISD::SELECT_CC, MVT::v4i16, Custom);
489    setOperationAction(ISD::SELECT_CC, MVT::v8i16, Custom);
490    setOperationAction(ISD::SELECT_CC, MVT::v2i32, Custom);
491    setOperationAction(ISD::SELECT_CC, MVT::v4i32, Custom);
492    setOperationAction(ISD::SELECT_CC, MVT::v1i64, Custom);
493    setOperationAction(ISD::SELECT_CC, MVT::v2i64, Custom);
494    setOperationAction(ISD::SELECT_CC, MVT::v2f32, Custom);
495    setOperationAction(ISD::SELECT_CC, MVT::v4f32, Custom);
496    setOperationAction(ISD::SELECT_CC, MVT::v1f64, Custom);
497    setOperationAction(ISD::SELECT_CC, MVT::v2f64, Custom);
498
499    // Vector ExtLoad and TruncStore are expanded.
500    for (unsigned I = MVT::FIRST_VECTOR_VALUETYPE;
501         I <= MVT::LAST_VECTOR_VALUETYPE; ++I) {
502      MVT VT = (MVT::SimpleValueType) I;
503      setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
504      setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
505      setLoadExtAction(ISD::EXTLOAD, VT, Expand);
506      for (unsigned II = MVT::FIRST_VECTOR_VALUETYPE;
507           II <= MVT::LAST_VECTOR_VALUETYPE; ++II) {
508        MVT VT1 = (MVT::SimpleValueType) II;
509        // A TruncStore has two vector types of the same number of elements
510        // and different element sizes.
511        if (VT.getVectorNumElements() == VT1.getVectorNumElements() &&
512            VT.getVectorElementType().getSizeInBits()
513                > VT1.getVectorElementType().getSizeInBits())
514          setTruncStoreAction(VT, VT1, Expand);
515      }
516    }
517
518    // There is no v1i64/v2i64 multiply, expand v1i64/v2i64 to GPR i64 multiply.
519    // FIXME: For a v2i64 multiply, we copy VPR to GPR and do 2 i64 multiplies,
520    // and then copy back to VPR. This solution may be optimized by Following 3
521    // NEON instructions:
522    //        pmull  v2.1q, v0.1d, v1.1d
523    //        pmull2 v3.1q, v0.2d, v1.2d
524    //        ins    v2.d[1], v3.d[0]
525    // As currently we can't verify the correctness of such assumption, we can
526    // do such optimization in the future.
527    setOperationAction(ISD::MUL, MVT::v1i64, Expand);
528    setOperationAction(ISD::MUL, MVT::v2i64, Expand);
529
530    setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
531    setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
532    setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
533    setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
534    setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
535    setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
536    setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
537    setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
538    setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
539  }
540
541  setTargetDAGCombine(ISD::SETCC);
542  setTargetDAGCombine(ISD::SIGN_EXTEND);
543  setTargetDAGCombine(ISD::VSELECT);
544}
545
546EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
547  // It's reasonably important that this value matches the "natural" legal
548  // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
549  // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
550  if (!VT.isVector()) return MVT::i32;
551  return VT.changeVectorElementTypeToInteger();
552}
553
554static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
555                                  unsigned &LdrOpc,
556                                  unsigned &StrOpc) {
557  static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
558                                       AArch64::LDXR_word, AArch64::LDXR_dword};
559  static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
560                                     AArch64::LDAXR_word, AArch64::LDAXR_dword};
561  static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
562                                       AArch64::STXR_word, AArch64::STXR_dword};
563  static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
564                                     AArch64::STLXR_word, AArch64::STLXR_dword};
565
566  const unsigned *LoadOps, *StoreOps;
567  if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
568    LoadOps = LoadAcqs;
569  else
570    LoadOps = LoadBares;
571
572  if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
573    StoreOps = StoreRels;
574  else
575    StoreOps = StoreBares;
576
577  assert(isPowerOf2_32(Size) && Size <= 8 &&
578         "unsupported size for atomic binary op!");
579
580  LdrOpc = LoadOps[Log2_32(Size)];
581  StrOpc = StoreOps[Log2_32(Size)];
582}
583
584// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
585// have value type mapped, and they are both being defined as MVT::untyped.
586// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
587// would fail to figure out the register pressure correctly.
588std::pair<const TargetRegisterClass*, uint8_t>
589AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
590  const TargetRegisterClass *RRC = 0;
591  uint8_t Cost = 1;
592  switch (VT.SimpleTy) {
593  default:
594    return TargetLowering::findRepresentativeClass(VT);
595  case MVT::v4i64:
596    RRC = &AArch64::QPairRegClass;
597    Cost = 2;
598    break;
599  case MVT::v8i64:
600    RRC = &AArch64::QQuadRegClass;
601    Cost = 4;
602    break;
603  }
604  return std::make_pair(RRC, Cost);
605}
606
607MachineBasicBlock *
608AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
609                                        unsigned Size,
610                                        unsigned BinOpcode) const {
611  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
612  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
613
614  const BasicBlock *LLVM_BB = BB->getBasicBlock();
615  MachineFunction *MF = BB->getParent();
616  MachineFunction::iterator It = BB;
617  ++It;
618
619  unsigned dest = MI->getOperand(0).getReg();
620  unsigned ptr = MI->getOperand(1).getReg();
621  unsigned incr = MI->getOperand(2).getReg();
622  AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
623  DebugLoc dl = MI->getDebugLoc();
624
625  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
626
627  unsigned ldrOpc, strOpc;
628  getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
629
630  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
631  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
632  MF->insert(It, loopMBB);
633  MF->insert(It, exitMBB);
634
635  // Transfer the remainder of BB and its successor edges to exitMBB.
636  exitMBB->splice(exitMBB->begin(), BB,
637                  std::next(MachineBasicBlock::iterator(MI)), BB->end());
638  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
639
640  const TargetRegisterClass *TRC
641    = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
642  unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
643
644  //  thisMBB:
645  //   ...
646  //   fallthrough --> loopMBB
647  BB->addSuccessor(loopMBB);
648
649  //  loopMBB:
650  //   ldxr dest, ptr
651  //   <binop> scratch, dest, incr
652  //   stxr stxr_status, scratch, ptr
653  //   cbnz stxr_status, loopMBB
654  //   fallthrough --> exitMBB
655  BB = loopMBB;
656  BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
657  if (BinOpcode) {
658    // All arithmetic operations we'll be creating are designed to take an extra
659    // shift or extend operand, which we can conveniently set to zero.
660
661    // Operand order needs to go the other way for NAND.
662    if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
663      BuildMI(BB, dl, TII->get(BinOpcode), scratch)
664        .addReg(incr).addReg(dest).addImm(0);
665    else
666      BuildMI(BB, dl, TII->get(BinOpcode), scratch)
667        .addReg(dest).addReg(incr).addImm(0);
668  }
669
670  // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
671  unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
672  MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
673
674  BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
675  BuildMI(BB, dl, TII->get(AArch64::CBNZw))
676    .addReg(stxr_status).addMBB(loopMBB);
677
678  BB->addSuccessor(loopMBB);
679  BB->addSuccessor(exitMBB);
680
681  //  exitMBB:
682  //   ...
683  BB = exitMBB;
684
685  MI->eraseFromParent();   // The instruction is gone now.
686
687  return BB;
688}
689
690MachineBasicBlock *
691AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
692                                              MachineBasicBlock *BB,
693                                              unsigned Size,
694                                              unsigned CmpOp,
695                                              A64CC::CondCodes Cond) const {
696  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
697
698  const BasicBlock *LLVM_BB = BB->getBasicBlock();
699  MachineFunction *MF = BB->getParent();
700  MachineFunction::iterator It = BB;
701  ++It;
702
703  unsigned dest = MI->getOperand(0).getReg();
704  unsigned ptr = MI->getOperand(1).getReg();
705  unsigned incr = MI->getOperand(2).getReg();
706  AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
707
708  unsigned oldval = dest;
709  DebugLoc dl = MI->getDebugLoc();
710
711  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
712  const TargetRegisterClass *TRC, *TRCsp;
713  if (Size == 8) {
714    TRC = &AArch64::GPR64RegClass;
715    TRCsp = &AArch64::GPR64xspRegClass;
716  } else {
717    TRC = &AArch64::GPR32RegClass;
718    TRCsp = &AArch64::GPR32wspRegClass;
719  }
720
721  unsigned ldrOpc, strOpc;
722  getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
723
724  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
725  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
726  MF->insert(It, loopMBB);
727  MF->insert(It, exitMBB);
728
729  // Transfer the remainder of BB and its successor edges to exitMBB.
730  exitMBB->splice(exitMBB->begin(), BB,
731                  std::next(MachineBasicBlock::iterator(MI)), BB->end());
732  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
733
734  unsigned scratch = MRI.createVirtualRegister(TRC);
735  MRI.constrainRegClass(scratch, TRCsp);
736
737  //  thisMBB:
738  //   ...
739  //   fallthrough --> loopMBB
740  BB->addSuccessor(loopMBB);
741
742  //  loopMBB:
743  //   ldxr dest, ptr
744  //   cmp incr, dest (, sign extend if necessary)
745  //   csel scratch, dest, incr, cond
746  //   stxr stxr_status, scratch, ptr
747  //   cbnz stxr_status, loopMBB
748  //   fallthrough --> exitMBB
749  BB = loopMBB;
750  BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
751
752  // Build compare and cmov instructions.
753  MRI.constrainRegClass(incr, TRCsp);
754  BuildMI(BB, dl, TII->get(CmpOp))
755    .addReg(incr).addReg(oldval).addImm(0);
756
757  BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
758          scratch)
759    .addReg(oldval).addReg(incr).addImm(Cond);
760
761  unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
762  MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
763
764  BuildMI(BB, dl, TII->get(strOpc), stxr_status)
765    .addReg(scratch).addReg(ptr);
766  BuildMI(BB, dl, TII->get(AArch64::CBNZw))
767    .addReg(stxr_status).addMBB(loopMBB);
768
769  BB->addSuccessor(loopMBB);
770  BB->addSuccessor(exitMBB);
771
772  //  exitMBB:
773  //   ...
774  BB = exitMBB;
775
776  MI->eraseFromParent();   // The instruction is gone now.
777
778  return BB;
779}
780
781MachineBasicBlock *
782AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
783                                         MachineBasicBlock *BB,
784                                         unsigned Size) const {
785  unsigned dest    = MI->getOperand(0).getReg();
786  unsigned ptr     = MI->getOperand(1).getReg();
787  unsigned oldval  = MI->getOperand(2).getReg();
788  unsigned newval  = MI->getOperand(3).getReg();
789  AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
790  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
791  DebugLoc dl = MI->getDebugLoc();
792
793  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
794  const TargetRegisterClass *TRCsp;
795  TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
796
797  unsigned ldrOpc, strOpc;
798  getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
799
800  MachineFunction *MF = BB->getParent();
801  const BasicBlock *LLVM_BB = BB->getBasicBlock();
802  MachineFunction::iterator It = BB;
803  ++It; // insert the new blocks after the current block
804
805  MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
806  MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
807  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
808  MF->insert(It, loop1MBB);
809  MF->insert(It, loop2MBB);
810  MF->insert(It, exitMBB);
811
812  // Transfer the remainder of BB and its successor edges to exitMBB.
813  exitMBB->splice(exitMBB->begin(), BB,
814                  std::next(MachineBasicBlock::iterator(MI)), BB->end());
815  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
816
817  //  thisMBB:
818  //   ...
819  //   fallthrough --> loop1MBB
820  BB->addSuccessor(loop1MBB);
821
822  // loop1MBB:
823  //   ldxr dest, [ptr]
824  //   cmp dest, oldval
825  //   b.ne exitMBB
826  BB = loop1MBB;
827  BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
828
829  unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
830  MRI.constrainRegClass(dest, TRCsp);
831  BuildMI(BB, dl, TII->get(CmpOp))
832    .addReg(dest).addReg(oldval).addImm(0);
833  BuildMI(BB, dl, TII->get(AArch64::Bcc))
834    .addImm(A64CC::NE).addMBB(exitMBB);
835  BB->addSuccessor(loop2MBB);
836  BB->addSuccessor(exitMBB);
837
838  // loop2MBB:
839  //   strex stxr_status, newval, [ptr]
840  //   cbnz stxr_status, loop1MBB
841  BB = loop2MBB;
842  unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
843  MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
844
845  BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
846  BuildMI(BB, dl, TII->get(AArch64::CBNZw))
847    .addReg(stxr_status).addMBB(loop1MBB);
848  BB->addSuccessor(loop1MBB);
849  BB->addSuccessor(exitMBB);
850
851  //  exitMBB:
852  //   ...
853  BB = exitMBB;
854
855  MI->eraseFromParent();   // The instruction is gone now.
856
857  return BB;
858}
859
860MachineBasicBlock *
861AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
862                                    MachineBasicBlock *MBB) const {
863  // We materialise the F128CSEL pseudo-instruction using conditional branches
864  // and loads, giving an instruciton sequence like:
865  //     str q0, [sp]
866  //     b.ne IfTrue
867  //     b Finish
868  // IfTrue:
869  //     str q1, [sp]
870  // Finish:
871  //     ldr q0, [sp]
872  //
873  // Using virtual registers would probably not be beneficial since COPY
874  // instructions are expensive for f128 (there's no actual instruction to
875  // implement them).
876  //
877  // An alternative would be to do an integer-CSEL on some address. E.g.:
878  //     mov x0, sp
879  //     add x1, sp, #16
880  //     str q0, [x0]
881  //     str q1, [x1]
882  //     csel x0, x0, x1, ne
883  //     ldr q0, [x0]
884  //
885  // It's unclear which approach is actually optimal.
886  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
887  MachineFunction *MF = MBB->getParent();
888  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
889  DebugLoc DL = MI->getDebugLoc();
890  MachineFunction::iterator It = MBB;
891  ++It;
892
893  unsigned DestReg = MI->getOperand(0).getReg();
894  unsigned IfTrueReg = MI->getOperand(1).getReg();
895  unsigned IfFalseReg = MI->getOperand(2).getReg();
896  unsigned CondCode = MI->getOperand(3).getImm();
897  bool NZCVKilled = MI->getOperand(4).isKill();
898
899  MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
900  MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
901  MF->insert(It, TrueBB);
902  MF->insert(It, EndBB);
903
904  // Transfer rest of current basic-block to EndBB
905  EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)),
906                MBB->end());
907  EndBB->transferSuccessorsAndUpdatePHIs(MBB);
908
909  // We need somewhere to store the f128 value needed.
910  int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
911
912  //     [... start of incoming MBB ...]
913  //     str qIFFALSE, [sp]
914  //     b.cc IfTrue
915  //     b Done
916  BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
917    .addReg(IfFalseReg)
918    .addFrameIndex(ScratchFI)
919    .addImm(0);
920  BuildMI(MBB, DL, TII->get(AArch64::Bcc))
921    .addImm(CondCode)
922    .addMBB(TrueBB);
923  BuildMI(MBB, DL, TII->get(AArch64::Bimm))
924    .addMBB(EndBB);
925  MBB->addSuccessor(TrueBB);
926  MBB->addSuccessor(EndBB);
927
928  if (!NZCVKilled) {
929    // NZCV is live-through TrueBB.
930    TrueBB->addLiveIn(AArch64::NZCV);
931    EndBB->addLiveIn(AArch64::NZCV);
932  }
933
934  // IfTrue:
935  //     str qIFTRUE, [sp]
936  BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
937    .addReg(IfTrueReg)
938    .addFrameIndex(ScratchFI)
939    .addImm(0);
940
941  // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
942  // blocks.
943  TrueBB->addSuccessor(EndBB);
944
945  // Done:
946  //     ldr qDEST, [sp]
947  //     [... rest of incoming MBB ...]
948  MachineInstr *StartOfEnd = EndBB->begin();
949  BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
950    .addFrameIndex(ScratchFI)
951    .addImm(0);
952
953  MI->eraseFromParent();
954  return EndBB;
955}
956
957MachineBasicBlock *
958AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
959                                                 MachineBasicBlock *MBB) const {
960  switch (MI->getOpcode()) {
961  default: llvm_unreachable("Unhandled instruction with custom inserter");
962  case AArch64::F128CSEL:
963    return EmitF128CSEL(MI, MBB);
964  case AArch64::ATOMIC_LOAD_ADD_I8:
965    return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
966  case AArch64::ATOMIC_LOAD_ADD_I16:
967    return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
968  case AArch64::ATOMIC_LOAD_ADD_I32:
969    return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
970  case AArch64::ATOMIC_LOAD_ADD_I64:
971    return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
972
973  case AArch64::ATOMIC_LOAD_SUB_I8:
974    return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
975  case AArch64::ATOMIC_LOAD_SUB_I16:
976    return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
977  case AArch64::ATOMIC_LOAD_SUB_I32:
978    return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
979  case AArch64::ATOMIC_LOAD_SUB_I64:
980    return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
981
982  case AArch64::ATOMIC_LOAD_AND_I8:
983    return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
984  case AArch64::ATOMIC_LOAD_AND_I16:
985    return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
986  case AArch64::ATOMIC_LOAD_AND_I32:
987    return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
988  case AArch64::ATOMIC_LOAD_AND_I64:
989    return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
990
991  case AArch64::ATOMIC_LOAD_OR_I8:
992    return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
993  case AArch64::ATOMIC_LOAD_OR_I16:
994    return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
995  case AArch64::ATOMIC_LOAD_OR_I32:
996    return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
997  case AArch64::ATOMIC_LOAD_OR_I64:
998    return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
999
1000  case AArch64::ATOMIC_LOAD_XOR_I8:
1001    return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
1002  case AArch64::ATOMIC_LOAD_XOR_I16:
1003    return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
1004  case AArch64::ATOMIC_LOAD_XOR_I32:
1005    return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
1006  case AArch64::ATOMIC_LOAD_XOR_I64:
1007    return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
1008
1009  case AArch64::ATOMIC_LOAD_NAND_I8:
1010    return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
1011  case AArch64::ATOMIC_LOAD_NAND_I16:
1012    return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
1013  case AArch64::ATOMIC_LOAD_NAND_I32:
1014    return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
1015  case AArch64::ATOMIC_LOAD_NAND_I64:
1016    return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
1017
1018  case AArch64::ATOMIC_LOAD_MIN_I8:
1019    return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
1020  case AArch64::ATOMIC_LOAD_MIN_I16:
1021    return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
1022  case AArch64::ATOMIC_LOAD_MIN_I32:
1023    return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
1024  case AArch64::ATOMIC_LOAD_MIN_I64:
1025    return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
1026
1027  case AArch64::ATOMIC_LOAD_MAX_I8:
1028    return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
1029  case AArch64::ATOMIC_LOAD_MAX_I16:
1030    return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
1031  case AArch64::ATOMIC_LOAD_MAX_I32:
1032    return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
1033  case AArch64::ATOMIC_LOAD_MAX_I64:
1034    return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
1035
1036  case AArch64::ATOMIC_LOAD_UMIN_I8:
1037    return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
1038  case AArch64::ATOMIC_LOAD_UMIN_I16:
1039    return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
1040  case AArch64::ATOMIC_LOAD_UMIN_I32:
1041    return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
1042  case AArch64::ATOMIC_LOAD_UMIN_I64:
1043    return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
1044
1045  case AArch64::ATOMIC_LOAD_UMAX_I8:
1046    return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
1047  case AArch64::ATOMIC_LOAD_UMAX_I16:
1048    return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
1049  case AArch64::ATOMIC_LOAD_UMAX_I32:
1050    return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
1051  case AArch64::ATOMIC_LOAD_UMAX_I64:
1052    return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
1053
1054  case AArch64::ATOMIC_SWAP_I8:
1055    return emitAtomicBinary(MI, MBB, 1, 0);
1056  case AArch64::ATOMIC_SWAP_I16:
1057    return emitAtomicBinary(MI, MBB, 2, 0);
1058  case AArch64::ATOMIC_SWAP_I32:
1059    return emitAtomicBinary(MI, MBB, 4, 0);
1060  case AArch64::ATOMIC_SWAP_I64:
1061    return emitAtomicBinary(MI, MBB, 8, 0);
1062
1063  case AArch64::ATOMIC_CMP_SWAP_I8:
1064    return emitAtomicCmpSwap(MI, MBB, 1);
1065  case AArch64::ATOMIC_CMP_SWAP_I16:
1066    return emitAtomicCmpSwap(MI, MBB, 2);
1067  case AArch64::ATOMIC_CMP_SWAP_I32:
1068    return emitAtomicCmpSwap(MI, MBB, 4);
1069  case AArch64::ATOMIC_CMP_SWAP_I64:
1070    return emitAtomicCmpSwap(MI, MBB, 8);
1071  }
1072}
1073
1074
1075const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
1076  switch (Opcode) {
1077  case AArch64ISD::BR_CC:          return "AArch64ISD::BR_CC";
1078  case AArch64ISD::Call:           return "AArch64ISD::Call";
1079  case AArch64ISD::FPMOV:          return "AArch64ISD::FPMOV";
1080  case AArch64ISD::GOTLoad:        return "AArch64ISD::GOTLoad";
1081  case AArch64ISD::BFI:            return "AArch64ISD::BFI";
1082  case AArch64ISD::EXTR:           return "AArch64ISD::EXTR";
1083  case AArch64ISD::Ret:            return "AArch64ISD::Ret";
1084  case AArch64ISD::SBFX:           return "AArch64ISD::SBFX";
1085  case AArch64ISD::SELECT_CC:      return "AArch64ISD::SELECT_CC";
1086  case AArch64ISD::SETCC:          return "AArch64ISD::SETCC";
1087  case AArch64ISD::TC_RETURN:      return "AArch64ISD::TC_RETURN";
1088  case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
1089  case AArch64ISD::TLSDESCCALL:    return "AArch64ISD::TLSDESCCALL";
1090  case AArch64ISD::WrapperLarge:   return "AArch64ISD::WrapperLarge";
1091  case AArch64ISD::WrapperSmall:   return "AArch64ISD::WrapperSmall";
1092
1093  case AArch64ISD::NEON_MOVIMM:
1094    return "AArch64ISD::NEON_MOVIMM";
1095  case AArch64ISD::NEON_MVNIMM:
1096    return "AArch64ISD::NEON_MVNIMM";
1097  case AArch64ISD::NEON_FMOVIMM:
1098    return "AArch64ISD::NEON_FMOVIMM";
1099  case AArch64ISD::NEON_CMP:
1100    return "AArch64ISD::NEON_CMP";
1101  case AArch64ISD::NEON_CMPZ:
1102    return "AArch64ISD::NEON_CMPZ";
1103  case AArch64ISD::NEON_TST:
1104    return "AArch64ISD::NEON_TST";
1105  case AArch64ISD::NEON_QSHLs:
1106    return "AArch64ISD::NEON_QSHLs";
1107  case AArch64ISD::NEON_QSHLu:
1108    return "AArch64ISD::NEON_QSHLu";
1109  case AArch64ISD::NEON_VDUP:
1110    return "AArch64ISD::NEON_VDUP";
1111  case AArch64ISD::NEON_VDUPLANE:
1112    return "AArch64ISD::NEON_VDUPLANE";
1113  case AArch64ISD::NEON_REV16:
1114    return "AArch64ISD::NEON_REV16";
1115  case AArch64ISD::NEON_REV32:
1116    return "AArch64ISD::NEON_REV32";
1117  case AArch64ISD::NEON_REV64:
1118    return "AArch64ISD::NEON_REV64";
1119  case AArch64ISD::NEON_UZP1:
1120    return "AArch64ISD::NEON_UZP1";
1121  case AArch64ISD::NEON_UZP2:
1122    return "AArch64ISD::NEON_UZP2";
1123  case AArch64ISD::NEON_ZIP1:
1124    return "AArch64ISD::NEON_ZIP1";
1125  case AArch64ISD::NEON_ZIP2:
1126    return "AArch64ISD::NEON_ZIP2";
1127  case AArch64ISD::NEON_TRN1:
1128    return "AArch64ISD::NEON_TRN1";
1129  case AArch64ISD::NEON_TRN2:
1130    return "AArch64ISD::NEON_TRN2";
1131  case AArch64ISD::NEON_LD1_UPD:
1132    return "AArch64ISD::NEON_LD1_UPD";
1133  case AArch64ISD::NEON_LD2_UPD:
1134    return "AArch64ISD::NEON_LD2_UPD";
1135  case AArch64ISD::NEON_LD3_UPD:
1136    return "AArch64ISD::NEON_LD3_UPD";
1137  case AArch64ISD::NEON_LD4_UPD:
1138    return "AArch64ISD::NEON_LD4_UPD";
1139  case AArch64ISD::NEON_ST1_UPD:
1140    return "AArch64ISD::NEON_ST1_UPD";
1141  case AArch64ISD::NEON_ST2_UPD:
1142    return "AArch64ISD::NEON_ST2_UPD";
1143  case AArch64ISD::NEON_ST3_UPD:
1144    return "AArch64ISD::NEON_ST3_UPD";
1145  case AArch64ISD::NEON_ST4_UPD:
1146    return "AArch64ISD::NEON_ST4_UPD";
1147  case AArch64ISD::NEON_LD1x2_UPD:
1148    return "AArch64ISD::NEON_LD1x2_UPD";
1149  case AArch64ISD::NEON_LD1x3_UPD:
1150    return "AArch64ISD::NEON_LD1x3_UPD";
1151  case AArch64ISD::NEON_LD1x4_UPD:
1152    return "AArch64ISD::NEON_LD1x4_UPD";
1153  case AArch64ISD::NEON_ST1x2_UPD:
1154    return "AArch64ISD::NEON_ST1x2_UPD";
1155  case AArch64ISD::NEON_ST1x3_UPD:
1156    return "AArch64ISD::NEON_ST1x3_UPD";
1157  case AArch64ISD::NEON_ST1x4_UPD:
1158    return "AArch64ISD::NEON_ST1x4_UPD";
1159  case AArch64ISD::NEON_LD2DUP:
1160    return "AArch64ISD::NEON_LD2DUP";
1161  case AArch64ISD::NEON_LD3DUP:
1162    return "AArch64ISD::NEON_LD3DUP";
1163  case AArch64ISD::NEON_LD4DUP:
1164    return "AArch64ISD::NEON_LD4DUP";
1165  case AArch64ISD::NEON_LD2DUP_UPD:
1166    return "AArch64ISD::NEON_LD2DUP_UPD";
1167  case AArch64ISD::NEON_LD3DUP_UPD:
1168    return "AArch64ISD::NEON_LD3DUP_UPD";
1169  case AArch64ISD::NEON_LD4DUP_UPD:
1170    return "AArch64ISD::NEON_LD4DUP_UPD";
1171  case AArch64ISD::NEON_LD2LN_UPD:
1172    return "AArch64ISD::NEON_LD2LN_UPD";
1173  case AArch64ISD::NEON_LD3LN_UPD:
1174    return "AArch64ISD::NEON_LD3LN_UPD";
1175  case AArch64ISD::NEON_LD4LN_UPD:
1176    return "AArch64ISD::NEON_LD4LN_UPD";
1177  case AArch64ISD::NEON_ST2LN_UPD:
1178    return "AArch64ISD::NEON_ST2LN_UPD";
1179  case AArch64ISD::NEON_ST3LN_UPD:
1180    return "AArch64ISD::NEON_ST3LN_UPD";
1181  case AArch64ISD::NEON_ST4LN_UPD:
1182    return "AArch64ISD::NEON_ST4LN_UPD";
1183  case AArch64ISD::NEON_VEXTRACT:
1184    return "AArch64ISD::NEON_VEXTRACT";
1185  default:
1186    return NULL;
1187  }
1188}
1189
1190static const uint16_t AArch64FPRArgRegs[] = {
1191  AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
1192  AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
1193};
1194static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
1195
1196static const uint16_t AArch64ArgRegs[] = {
1197  AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
1198  AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
1199};
1200static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
1201
1202static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
1203                                 CCValAssign::LocInfo LocInfo,
1204                                 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1205  // Mark all remaining general purpose registers as allocated. We don't
1206  // backtrack: if (for example) an i128 gets put on the stack, no subsequent
1207  // i64 will go in registers (C.11).
1208  for (unsigned i = 0; i < NumArgRegs; ++i)
1209    State.AllocateReg(AArch64ArgRegs[i]);
1210
1211  return false;
1212}
1213
1214#include "AArch64GenCallingConv.inc"
1215
1216CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
1217
1218  switch(CC) {
1219  default: llvm_unreachable("Unsupported calling convention");
1220  case CallingConv::Fast:
1221  case CallingConv::C:
1222    return CC_A64_APCS;
1223  }
1224}
1225
1226void
1227AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
1228                                           SDLoc DL, SDValue &Chain) const {
1229  MachineFunction &MF = DAG.getMachineFunction();
1230  MachineFrameInfo *MFI = MF.getFrameInfo();
1231  AArch64MachineFunctionInfo *FuncInfo
1232    = MF.getInfo<AArch64MachineFunctionInfo>();
1233
1234  SmallVector<SDValue, 8> MemOps;
1235
1236  unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
1237                                                         NumArgRegs);
1238  unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
1239                                                         NumFPRArgRegs);
1240
1241  unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
1242  int GPRIdx = 0;
1243  if (GPRSaveSize != 0) {
1244    GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
1245
1246    SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
1247
1248    for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
1249      unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
1250      SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
1251      SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1252                                   MachinePointerInfo::getStack(i * 8),
1253                                   false, false, 0);
1254      MemOps.push_back(Store);
1255      FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1256                        DAG.getConstant(8, getPointerTy()));
1257    }
1258  }
1259
1260  if (getSubtarget()->hasFPARMv8()) {
1261  unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
1262  int FPRIdx = 0;
1263    // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
1264    // can omit a register save area if we know we'll never use registers of
1265    // that class.
1266    if (FPRSaveSize != 0) {
1267      FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
1268
1269      SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
1270
1271      for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
1272        unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
1273            &AArch64::FPR128RegClass);
1274        SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
1275        SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1276            MachinePointerInfo::getStack(i * 16),
1277            false, false, 0);
1278        MemOps.push_back(Store);
1279        FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1280            DAG.getConstant(16, getPointerTy()));
1281      }
1282    }
1283    FuncInfo->setVariadicFPRIdx(FPRIdx);
1284    FuncInfo->setVariadicFPRSize(FPRSaveSize);
1285  }
1286
1287  unsigned StackOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), 8);
1288  int StackIdx = MFI->CreateFixedObject(8, StackOffset, true);
1289
1290  FuncInfo->setVariadicStackIdx(StackIdx);
1291  FuncInfo->setVariadicGPRIdx(GPRIdx);
1292  FuncInfo->setVariadicGPRSize(GPRSaveSize);
1293
1294  if (!MemOps.empty()) {
1295    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
1296                        MemOps.size());
1297  }
1298}
1299
1300
1301SDValue
1302AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
1303                                      CallingConv::ID CallConv, bool isVarArg,
1304                                      const SmallVectorImpl<ISD::InputArg> &Ins,
1305                                      SDLoc dl, SelectionDAG &DAG,
1306                                      SmallVectorImpl<SDValue> &InVals) const {
1307  MachineFunction &MF = DAG.getMachineFunction();
1308  AArch64MachineFunctionInfo *FuncInfo
1309    = MF.getInfo<AArch64MachineFunctionInfo>();
1310  MachineFrameInfo *MFI = MF.getFrameInfo();
1311  bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1312
1313  SmallVector<CCValAssign, 16> ArgLocs;
1314  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1315                 getTargetMachine(), ArgLocs, *DAG.getContext());
1316  CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
1317
1318  SmallVector<SDValue, 16> ArgValues;
1319
1320  SDValue ArgValue;
1321  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1322    CCValAssign &VA = ArgLocs[i];
1323    ISD::ArgFlagsTy Flags = Ins[i].Flags;
1324
1325    if (Flags.isByVal()) {
1326      // Byval is used for small structs and HFAs in the PCS, but the system
1327      // should work in a non-compliant manner for larger structs.
1328      EVT PtrTy = getPointerTy();
1329      int Size = Flags.getByValSize();
1330      unsigned NumRegs = (Size + 7) / 8;
1331
1332      uint32_t BEAlign = 0;
1333      if (Size < 8 && !getSubtarget()->isLittle())
1334        BEAlign = 8-Size;
1335      unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1336                                                 VA.getLocMemOffset() + BEAlign,
1337                                                 false);
1338      SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
1339      InVals.push_back(FrameIdxN);
1340
1341      continue;
1342    } else if (VA.isRegLoc()) {
1343      MVT RegVT = VA.getLocVT();
1344      const TargetRegisterClass *RC = getRegClassFor(RegVT);
1345      unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1346
1347      ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1348    } else { // VA.isRegLoc()
1349      assert(VA.isMemLoc());
1350
1351      int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
1352                                      VA.getLocMemOffset(), true);
1353
1354      SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1355      ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1356                             MachinePointerInfo::getFixedStack(FI),
1357                             false, false, false, 0);
1358
1359
1360    }
1361
1362    switch (VA.getLocInfo()) {
1363    default: llvm_unreachable("Unknown loc info!");
1364    case CCValAssign::Full: break;
1365    case CCValAssign::BCvt:
1366      ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1367      break;
1368    case CCValAssign::SExt:
1369    case CCValAssign::ZExt:
1370    case CCValAssign::AExt:
1371    case CCValAssign::FPExt: {
1372      unsigned DestSize = VA.getValVT().getSizeInBits();
1373      unsigned DestSubReg;
1374
1375      switch (DestSize) {
1376      case 8: DestSubReg = AArch64::sub_8; break;
1377      case 16: DestSubReg = AArch64::sub_16; break;
1378      case 32: DestSubReg = AArch64::sub_32; break;
1379      case 64: DestSubReg = AArch64::sub_64; break;
1380      default: llvm_unreachable("Unexpected argument promotion");
1381      }
1382
1383      ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1384                                   VA.getValVT(), ArgValue,
1385                                   DAG.getTargetConstant(DestSubReg, MVT::i32)),
1386                         0);
1387      break;
1388    }
1389    }
1390
1391    InVals.push_back(ArgValue);
1392  }
1393
1394  if (isVarArg)
1395    SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1396
1397  unsigned StackArgSize = CCInfo.getNextStackOffset();
1398  if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1399    // This is a non-standard ABI so by fiat I say we're allowed to make full
1400    // use of the stack area to be popped, which must be aligned to 16 bytes in
1401    // any case:
1402    StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1403
1404    // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1405    // a multiple of 16.
1406    FuncInfo->setArgumentStackToRestore(StackArgSize);
1407
1408    // This realignment carries over to the available bytes below. Our own
1409    // callers will guarantee the space is free by giving an aligned value to
1410    // CALLSEQ_START.
1411  }
1412  // Even if we're not expected to free up the space, it's useful to know how
1413  // much is there while considering tail calls (because we can reuse it).
1414  FuncInfo->setBytesInStackArgArea(StackArgSize);
1415
1416  return Chain;
1417}
1418
1419SDValue
1420AArch64TargetLowering::LowerReturn(SDValue Chain,
1421                                   CallingConv::ID CallConv, bool isVarArg,
1422                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
1423                                   const SmallVectorImpl<SDValue> &OutVals,
1424                                   SDLoc dl, SelectionDAG &DAG) const {
1425  // CCValAssign - represent the assignment of the return value to a location.
1426  SmallVector<CCValAssign, 16> RVLocs;
1427
1428  // CCState - Info about the registers and stack slots.
1429  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1430                 getTargetMachine(), RVLocs, *DAG.getContext());
1431
1432  // Analyze outgoing return values.
1433  CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1434
1435  SDValue Flag;
1436  SmallVector<SDValue, 4> RetOps(1, Chain);
1437
1438  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1439    // PCS: "If the type, T, of the result of a function is such that
1440    // void func(T arg) would require that arg be passed as a value in a
1441    // register (or set of registers) according to the rules in 5.4, then the
1442    // result is returned in the same registers as would be used for such an
1443    // argument.
1444    //
1445    // Otherwise, the caller shall reserve a block of memory of sufficient
1446    // size and alignment to hold the result. The address of the memory block
1447    // shall be passed as an additional argument to the function in x8."
1448    //
1449    // This is implemented in two places. The register-return values are dealt
1450    // with here, more complex returns are passed as an sret parameter, which
1451    // means we don't have to worry about it during actual return.
1452    CCValAssign &VA = RVLocs[i];
1453    assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1454
1455
1456    SDValue Arg = OutVals[i];
1457
1458    // There's no convenient note in the ABI about this as there is for normal
1459    // arguments, but it says return values are passed in the same registers as
1460    // an argument would be. I believe that includes the comments about
1461    // unspecified higher bits, putting the burden of widening on the *caller*
1462    // for return values.
1463    switch (VA.getLocInfo()) {
1464    default: llvm_unreachable("Unknown loc info");
1465    case CCValAssign::Full: break;
1466    case CCValAssign::SExt:
1467    case CCValAssign::ZExt:
1468    case CCValAssign::AExt:
1469      // Floating-point values should only be extended when they're going into
1470      // memory, which can't happen here so an integer extend is acceptable.
1471      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1472      break;
1473    case CCValAssign::BCvt:
1474      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1475      break;
1476    }
1477
1478    Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1479    Flag = Chain.getValue(1);
1480    RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1481  }
1482
1483  RetOps[0] = Chain;  // Update chain.
1484
1485  // Add the flag if we have it.
1486  if (Flag.getNode())
1487    RetOps.push_back(Flag);
1488
1489  return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1490                     &RetOps[0], RetOps.size());
1491}
1492
1493unsigned AArch64TargetLowering::getByValTypeAlignment(Type *Ty) const {
1494  // This is a new backend. For anything more precise than this a FE should
1495  // set an explicit alignment.
1496  return 4;
1497}
1498
1499SDValue
1500AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1501                                 SmallVectorImpl<SDValue> &InVals) const {
1502  SelectionDAG &DAG                     = CLI.DAG;
1503  SDLoc &dl                             = CLI.DL;
1504  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1505  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
1506  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
1507  SDValue Chain                         = CLI.Chain;
1508  SDValue Callee                        = CLI.Callee;
1509  bool &IsTailCall                      = CLI.IsTailCall;
1510  CallingConv::ID CallConv              = CLI.CallConv;
1511  bool IsVarArg                         = CLI.IsVarArg;
1512
1513  MachineFunction &MF = DAG.getMachineFunction();
1514  AArch64MachineFunctionInfo *FuncInfo
1515    = MF.getInfo<AArch64MachineFunctionInfo>();
1516  bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1517  bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1518  bool IsSibCall = false;
1519
1520  if (IsTailCall) {
1521    IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1522                    IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1523                                                   Outs, OutVals, Ins, DAG);
1524
1525    // A sibling call is one where we're under the usual C ABI and not planning
1526    // to change that but can still do a tail call:
1527    if (!TailCallOpt && IsTailCall)
1528      IsSibCall = true;
1529  }
1530
1531  SmallVector<CCValAssign, 16> ArgLocs;
1532  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1533                 getTargetMachine(), ArgLocs, *DAG.getContext());
1534  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1535
1536  // On AArch64 (and all other architectures I'm aware of) the most this has to
1537  // do is adjust the stack pointer.
1538  unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1539  if (IsSibCall) {
1540    // Since we're not changing the ABI to make this a tail call, the memory
1541    // operands are already available in the caller's incoming argument space.
1542    NumBytes = 0;
1543  }
1544
1545  // FPDiff is the byte offset of the call's argument area from the callee's.
1546  // Stores to callee stack arguments will be placed in FixedStackSlots offset
1547  // by this amount for a tail call. In a sibling call it must be 0 because the
1548  // caller will deallocate the entire stack and the callee still expects its
1549  // arguments to begin at SP+0. Completely unused for non-tail calls.
1550  int FPDiff = 0;
1551
1552  if (IsTailCall && !IsSibCall) {
1553    unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1554
1555    // FPDiff will be negative if this tail call requires more space than we
1556    // would automatically have in our incoming argument space. Positive if we
1557    // can actually shrink the stack.
1558    FPDiff = NumReusableBytes - NumBytes;
1559
1560    // The stack pointer must be 16-byte aligned at all times it's used for a
1561    // memory operation, which in practice means at *all* times and in
1562    // particular across call boundaries. Therefore our own arguments started at
1563    // a 16-byte aligned SP and the delta applied for the tail call should
1564    // satisfy the same constraint.
1565    assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1566  }
1567
1568  if (!IsSibCall)
1569    Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1570                                 dl);
1571
1572  SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1573                                        getPointerTy());
1574
1575  SmallVector<SDValue, 8> MemOpChains;
1576  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1577
1578  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1579    CCValAssign &VA = ArgLocs[i];
1580    ISD::ArgFlagsTy Flags = Outs[i].Flags;
1581    SDValue Arg = OutVals[i];
1582
1583    // Callee does the actual widening, so all extensions just use an implicit
1584    // definition of the rest of the Loc. Aesthetically, this would be nicer as
1585    // an ANY_EXTEND, but that isn't valid for floating-point types and this
1586    // alternative works on integer types too.
1587    switch (VA.getLocInfo()) {
1588    default: llvm_unreachable("Unknown loc info!");
1589    case CCValAssign::Full: break;
1590    case CCValAssign::SExt:
1591    case CCValAssign::ZExt:
1592    case CCValAssign::AExt:
1593    case CCValAssign::FPExt: {
1594      unsigned SrcSize = VA.getValVT().getSizeInBits();
1595      unsigned SrcSubReg;
1596
1597      switch (SrcSize) {
1598      case 8: SrcSubReg = AArch64::sub_8; break;
1599      case 16: SrcSubReg = AArch64::sub_16; break;
1600      case 32: SrcSubReg = AArch64::sub_32; break;
1601      case 64: SrcSubReg = AArch64::sub_64; break;
1602      default: llvm_unreachable("Unexpected argument promotion");
1603      }
1604
1605      Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1606                                    VA.getLocVT(),
1607                                    DAG.getUNDEF(VA.getLocVT()),
1608                                    Arg,
1609                                    DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1610                    0);
1611
1612      break;
1613    }
1614    case CCValAssign::BCvt:
1615      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1616      break;
1617    }
1618
1619    if (VA.isRegLoc()) {
1620      // A normal register (sub-) argument. For now we just note it down because
1621      // we want to copy things into registers as late as possible to avoid
1622      // register-pressure (and possibly worse).
1623      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1624      continue;
1625    }
1626
1627    assert(VA.isMemLoc() && "unexpected argument location");
1628
1629    SDValue DstAddr;
1630    MachinePointerInfo DstInfo;
1631    if (IsTailCall) {
1632      uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1633                                          VA.getLocVT().getSizeInBits();
1634      OpSize = (OpSize + 7) / 8;
1635      int32_t Offset = VA.getLocMemOffset() + FPDiff;
1636      int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1637
1638      DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1639      DstInfo = MachinePointerInfo::getFixedStack(FI);
1640
1641      // Make sure any stack arguments overlapping with where we're storing are
1642      // loaded before this eventual operation. Otherwise they'll be clobbered.
1643      Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1644    } else {
1645      uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize()*8 :
1646                                          VA.getLocVT().getSizeInBits();
1647      OpSize = (OpSize + 7) / 8;
1648      uint32_t BEAlign = 0;
1649      if (OpSize < 8 && !getSubtarget()->isLittle())
1650        BEAlign = 8-OpSize;
1651      SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + BEAlign);
1652
1653      DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1654      DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1655    }
1656
1657    if (Flags.isByVal()) {
1658      SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1659      SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1660                                  Flags.getByValAlign(),
1661                                  /*isVolatile = */ false,
1662                                  /*alwaysInline = */ false,
1663                                  DstInfo, MachinePointerInfo(0));
1664      MemOpChains.push_back(Cpy);
1665    } else {
1666      // Normal stack argument, put it where it's needed.
1667      SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1668                                   false, false, 0);
1669      MemOpChains.push_back(Store);
1670    }
1671  }
1672
1673  // The loads and stores generated above shouldn't clash with each
1674  // other. Combining them with this TokenFactor notes that fact for the rest of
1675  // the backend.
1676  if (!MemOpChains.empty())
1677    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1678                        &MemOpChains[0], MemOpChains.size());
1679
1680  // Most of the rest of the instructions need to be glued together; we don't
1681  // want assignments to actual registers used by a call to be rearranged by a
1682  // well-meaning scheduler.
1683  SDValue InFlag;
1684
1685  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1686    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1687                             RegsToPass[i].second, InFlag);
1688    InFlag = Chain.getValue(1);
1689  }
1690
1691  // The linker is responsible for inserting veneers when necessary to put a
1692  // function call destination in range, so we don't need to bother with a
1693  // wrapper here.
1694  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1695    const GlobalValue *GV = G->getGlobal();
1696    Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1697  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1698    const char *Sym = S->getSymbol();
1699    Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1700  }
1701
1702  // We don't usually want to end the call-sequence here because we would tidy
1703  // the frame up *after* the call, however in the ABI-changing tail-call case
1704  // we've carefully laid out the parameters so that when sp is reset they'll be
1705  // in the correct location.
1706  if (IsTailCall && !IsSibCall) {
1707    Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1708                               DAG.getIntPtrConstant(0, true), InFlag, dl);
1709    InFlag = Chain.getValue(1);
1710  }
1711
1712  // We produce the following DAG scheme for the actual call instruction:
1713  //     (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1714  //
1715  // Most arguments aren't going to be used and just keep the values live as
1716  // far as LLVM is concerned. It's expected to be selected as simply "bl
1717  // callee" (for a direct, non-tail call).
1718  std::vector<SDValue> Ops;
1719  Ops.push_back(Chain);
1720  Ops.push_back(Callee);
1721
1722  if (IsTailCall) {
1723    // Each tail call may have to adjust the stack by a different amount, so
1724    // this information must travel along with the operation for eventual
1725    // consumption by emitEpilogue.
1726    Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1727  }
1728
1729  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1730    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1731                                  RegsToPass[i].second.getValueType()));
1732
1733
1734  // Add a register mask operand representing the call-preserved registers. This
1735  // is used later in codegen to constrain register-allocation.
1736  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1737  const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1738  assert(Mask && "Missing call preserved mask for calling convention");
1739  Ops.push_back(DAG.getRegisterMask(Mask));
1740
1741  // If we needed glue, put it in as the last argument.
1742  if (InFlag.getNode())
1743    Ops.push_back(InFlag);
1744
1745  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1746
1747  if (IsTailCall) {
1748    return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1749  }
1750
1751  Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1752  InFlag = Chain.getValue(1);
1753
1754  // Now we can reclaim the stack, just as well do it before working out where
1755  // our return value is.
1756  if (!IsSibCall) {
1757    uint64_t CalleePopBytes
1758      = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1759
1760    Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1761                               DAG.getIntPtrConstant(CalleePopBytes, true),
1762                               InFlag, dl);
1763    InFlag = Chain.getValue(1);
1764  }
1765
1766  return LowerCallResult(Chain, InFlag, CallConv,
1767                         IsVarArg, Ins, dl, DAG, InVals);
1768}
1769
1770SDValue
1771AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1772                                      CallingConv::ID CallConv, bool IsVarArg,
1773                                      const SmallVectorImpl<ISD::InputArg> &Ins,
1774                                      SDLoc dl, SelectionDAG &DAG,
1775                                      SmallVectorImpl<SDValue> &InVals) const {
1776  // Assign locations to each value returned by this call.
1777  SmallVector<CCValAssign, 16> RVLocs;
1778  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1779                 getTargetMachine(), RVLocs, *DAG.getContext());
1780  CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1781
1782  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1783    CCValAssign VA = RVLocs[i];
1784
1785    // Return values that are too big to fit into registers should use an sret
1786    // pointer, so this can be a lot simpler than the main argument code.
1787    assert(VA.isRegLoc() && "Memory locations not expected for call return");
1788
1789    SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1790                                     InFlag);
1791    Chain = Val.getValue(1);
1792    InFlag = Val.getValue(2);
1793
1794    switch (VA.getLocInfo()) {
1795    default: llvm_unreachable("Unknown loc info!");
1796    case CCValAssign::Full: break;
1797    case CCValAssign::BCvt:
1798      Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1799      break;
1800    case CCValAssign::ZExt:
1801    case CCValAssign::SExt:
1802    case CCValAssign::AExt:
1803      // Floating-point arguments only get extended/truncated if they're going
1804      // in memory, so using the integer operation is acceptable here.
1805      Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1806      break;
1807    }
1808
1809    InVals.push_back(Val);
1810  }
1811
1812  return Chain;
1813}
1814
1815bool
1816AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1817                                    CallingConv::ID CalleeCC,
1818                                    bool IsVarArg,
1819                                    bool IsCalleeStructRet,
1820                                    bool IsCallerStructRet,
1821                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
1822                                    const SmallVectorImpl<SDValue> &OutVals,
1823                                    const SmallVectorImpl<ISD::InputArg> &Ins,
1824                                    SelectionDAG& DAG) const {
1825
1826  // For CallingConv::C this function knows whether the ABI needs
1827  // changing. That's not true for other conventions so they will have to opt in
1828  // manually.
1829  if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1830    return false;
1831
1832  const MachineFunction &MF = DAG.getMachineFunction();
1833  const Function *CallerF = MF.getFunction();
1834  CallingConv::ID CallerCC = CallerF->getCallingConv();
1835  bool CCMatch = CallerCC == CalleeCC;
1836
1837  // Byval parameters hand the function a pointer directly into the stack area
1838  // we want to reuse during a tail call. Working around this *is* possible (see
1839  // X86) but less efficient and uglier in LowerCall.
1840  for (Function::const_arg_iterator i = CallerF->arg_begin(),
1841         e = CallerF->arg_end(); i != e; ++i)
1842    if (i->hasByValAttr())
1843      return false;
1844
1845  if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1846    if (IsTailCallConvention(CalleeCC) && CCMatch)
1847      return true;
1848    return false;
1849  }
1850
1851  // Now we search for cases where we can use a tail call without changing the
1852  // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1853  // concept.
1854
1855  // I want anyone implementing a new calling convention to think long and hard
1856  // about this assert.
1857  assert((!IsVarArg || CalleeCC == CallingConv::C)
1858         && "Unexpected variadic calling convention");
1859
1860  if (IsVarArg && !Outs.empty()) {
1861    // At least two cases here: if caller is fastcc then we can't have any
1862    // memory arguments (we'd be expected to clean up the stack afterwards). If
1863    // caller is C then we could potentially use its argument area.
1864
1865    // FIXME: for now we take the most conservative of these in both cases:
1866    // disallow all variadic memory operands.
1867    SmallVector<CCValAssign, 16> ArgLocs;
1868    CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1869                   getTargetMachine(), ArgLocs, *DAG.getContext());
1870
1871    CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1872    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1873      if (!ArgLocs[i].isRegLoc())
1874        return false;
1875  }
1876
1877  // If the calling conventions do not match, then we'd better make sure the
1878  // results are returned in the same way as what the caller expects.
1879  if (!CCMatch) {
1880    SmallVector<CCValAssign, 16> RVLocs1;
1881    CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1882                    getTargetMachine(), RVLocs1, *DAG.getContext());
1883    CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1884
1885    SmallVector<CCValAssign, 16> RVLocs2;
1886    CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1887                    getTargetMachine(), RVLocs2, *DAG.getContext());
1888    CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1889
1890    if (RVLocs1.size() != RVLocs2.size())
1891      return false;
1892    for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1893      if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1894        return false;
1895      if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1896        return false;
1897      if (RVLocs1[i].isRegLoc()) {
1898        if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1899          return false;
1900      } else {
1901        if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1902          return false;
1903      }
1904    }
1905  }
1906
1907  // Nothing more to check if the callee is taking no arguments
1908  if (Outs.empty())
1909    return true;
1910
1911  SmallVector<CCValAssign, 16> ArgLocs;
1912  CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1913                 getTargetMachine(), ArgLocs, *DAG.getContext());
1914
1915  CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1916
1917  const AArch64MachineFunctionInfo *FuncInfo
1918    = MF.getInfo<AArch64MachineFunctionInfo>();
1919
1920  // If the stack arguments for this call would fit into our own save area then
1921  // the call can be made tail.
1922  return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1923}
1924
1925bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1926                                                   bool TailCallOpt) const {
1927  return CallCC == CallingConv::Fast && TailCallOpt;
1928}
1929
1930bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1931  return CallCC == CallingConv::Fast;
1932}
1933
1934SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1935                                                   SelectionDAG &DAG,
1936                                                   MachineFrameInfo *MFI,
1937                                                   int ClobberedFI) const {
1938  SmallVector<SDValue, 8> ArgChains;
1939  int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1940  int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1941
1942  // Include the original chain at the beginning of the list. When this is
1943  // used by target LowerCall hooks, this helps legalize find the
1944  // CALLSEQ_BEGIN node.
1945  ArgChains.push_back(Chain);
1946
1947  // Add a chain value for each stack argument corresponding
1948  for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1949         UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1950    if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1951      if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1952        if (FI->getIndex() < 0) {
1953          int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1954          int64_t InLastByte = InFirstByte;
1955          InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1956
1957          if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1958              (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1959            ArgChains.push_back(SDValue(L, 1));
1960        }
1961
1962   // Build a tokenfactor for all the chains.
1963   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1964                      &ArgChains[0], ArgChains.size());
1965}
1966
1967static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1968  switch (CC) {
1969  case ISD::SETEQ:  return A64CC::EQ;
1970  case ISD::SETGT:  return A64CC::GT;
1971  case ISD::SETGE:  return A64CC::GE;
1972  case ISD::SETLT:  return A64CC::LT;
1973  case ISD::SETLE:  return A64CC::LE;
1974  case ISD::SETNE:  return A64CC::NE;
1975  case ISD::SETUGT: return A64CC::HI;
1976  case ISD::SETUGE: return A64CC::HS;
1977  case ISD::SETULT: return A64CC::LO;
1978  case ISD::SETULE: return A64CC::LS;
1979  default: llvm_unreachable("Unexpected condition code");
1980  }
1981}
1982
1983bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1984  // icmp is implemented using adds/subs immediate, which take an unsigned
1985  // 12-bit immediate, optionally shifted left by 12 bits.
1986
1987  // Symmetric by using adds/subs
1988  if (Val < 0)
1989    Val = -Val;
1990
1991  return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1992}
1993
1994SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1995                                        ISD::CondCode CC, SDValue &A64cc,
1996                                        SelectionDAG &DAG, SDLoc &dl) const {
1997  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1998    int64_t C = 0;
1999    EVT VT = RHSC->getValueType(0);
2000    bool knownInvalid = false;
2001
2002    // I'm not convinced the rest of LLVM handles these edge cases properly, but
2003    // we can at least get it right.
2004    if (isSignedIntSetCC(CC)) {
2005      C = RHSC->getSExtValue();
2006    } else if (RHSC->getZExtValue() > INT64_MAX) {
2007      // A 64-bit constant not representable by a signed 64-bit integer is far
2008      // too big to fit into a SUBS immediate anyway.
2009      knownInvalid = true;
2010    } else {
2011      C = RHSC->getZExtValue();
2012    }
2013
2014    if (!knownInvalid && !isLegalICmpImmediate(C)) {
2015      // Constant does not fit, try adjusting it by one?
2016      switch (CC) {
2017      default: break;
2018      case ISD::SETLT:
2019      case ISD::SETGE:
2020        if (isLegalICmpImmediate(C-1)) {
2021          CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
2022          RHS = DAG.getConstant(C-1, VT);
2023        }
2024        break;
2025      case ISD::SETULT:
2026      case ISD::SETUGE:
2027        if (isLegalICmpImmediate(C-1)) {
2028          CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
2029          RHS = DAG.getConstant(C-1, VT);
2030        }
2031        break;
2032      case ISD::SETLE:
2033      case ISD::SETGT:
2034        if (isLegalICmpImmediate(C+1)) {
2035          CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
2036          RHS = DAG.getConstant(C+1, VT);
2037        }
2038        break;
2039      case ISD::SETULE:
2040      case ISD::SETUGT:
2041        if (isLegalICmpImmediate(C+1)) {
2042          CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2043          RHS = DAG.getConstant(C+1, VT);
2044        }
2045        break;
2046      }
2047    }
2048  }
2049
2050  A64CC::CondCodes CondCode = IntCCToA64CC(CC);
2051  A64cc = DAG.getConstant(CondCode, MVT::i32);
2052  return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2053                     DAG.getCondCode(CC));
2054}
2055
2056static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
2057                                    A64CC::CondCodes &Alternative) {
2058  A64CC::CondCodes CondCode = A64CC::Invalid;
2059  Alternative = A64CC::Invalid;
2060
2061  switch (CC) {
2062  default: llvm_unreachable("Unknown FP condition!");
2063  case ISD::SETEQ:
2064  case ISD::SETOEQ: CondCode = A64CC::EQ; break;
2065  case ISD::SETGT:
2066  case ISD::SETOGT: CondCode = A64CC::GT; break;
2067  case ISD::SETGE:
2068  case ISD::SETOGE: CondCode = A64CC::GE; break;
2069  case ISD::SETOLT: CondCode = A64CC::MI; break;
2070  case ISD::SETOLE: CondCode = A64CC::LS; break;
2071  case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
2072  case ISD::SETO:   CondCode = A64CC::VC; break;
2073  case ISD::SETUO:  CondCode = A64CC::VS; break;
2074  case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
2075  case ISD::SETUGT: CondCode = A64CC::HI; break;
2076  case ISD::SETUGE: CondCode = A64CC::PL; break;
2077  case ISD::SETLT:
2078  case ISD::SETULT: CondCode = A64CC::LT; break;
2079  case ISD::SETLE:
2080  case ISD::SETULE: CondCode = A64CC::LE; break;
2081  case ISD::SETNE:
2082  case ISD::SETUNE: CondCode = A64CC::NE; break;
2083  }
2084  return CondCode;
2085}
2086
2087SDValue
2088AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
2089  SDLoc DL(Op);
2090  EVT PtrVT = getPointerTy();
2091  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2092
2093  switch(getTargetMachine().getCodeModel()) {
2094  case CodeModel::Small:
2095    // The most efficient code is PC-relative anyway for the small memory model,
2096    // so we don't need to worry about relocation model.
2097    return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2098                       DAG.getTargetBlockAddress(BA, PtrVT, 0,
2099                                                 AArch64II::MO_NO_FLAG),
2100                       DAG.getTargetBlockAddress(BA, PtrVT, 0,
2101                                                 AArch64II::MO_LO12),
2102                       DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
2103  case CodeModel::Large:
2104    return DAG.getNode(
2105      AArch64ISD::WrapperLarge, DL, PtrVT,
2106      DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
2107      DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2108      DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2109      DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2110  default:
2111    llvm_unreachable("Only small and large code models supported now");
2112  }
2113}
2114
2115
2116// (BRCOND chain, val, dest)
2117SDValue
2118AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2119  SDLoc dl(Op);
2120  SDValue Chain = Op.getOperand(0);
2121  SDValue TheBit = Op.getOperand(1);
2122  SDValue DestBB = Op.getOperand(2);
2123
2124  // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2125  // that as the consumer we are responsible for ignoring rubbish in higher
2126  // bits.
2127  TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2128                       DAG.getConstant(1, MVT::i32));
2129
2130  SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2131                               DAG.getConstant(0, TheBit.getValueType()),
2132                               DAG.getCondCode(ISD::SETNE));
2133
2134  return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
2135                     A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
2136                     DestBB);
2137}
2138
2139// (BR_CC chain, condcode, lhs, rhs, dest)
2140SDValue
2141AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2142  SDLoc dl(Op);
2143  SDValue Chain = Op.getOperand(0);
2144  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2145  SDValue LHS = Op.getOperand(2);
2146  SDValue RHS = Op.getOperand(3);
2147  SDValue DestBB = Op.getOperand(4);
2148
2149  if (LHS.getValueType() == MVT::f128) {
2150    // f128 comparisons are lowered to runtime calls by a routine which sets
2151    // LHS, RHS and CC appropriately for the rest of this function to continue.
2152    softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2153
2154    // If softenSetCCOperands returned a scalar, we need to compare the result
2155    // against zero to select between true and false values.
2156    if (RHS.getNode() == 0) {
2157      RHS = DAG.getConstant(0, LHS.getValueType());
2158      CC = ISD::SETNE;
2159    }
2160  }
2161
2162  if (LHS.getValueType().isInteger()) {
2163    SDValue A64cc;
2164
2165    // Integers are handled in a separate function because the combinations of
2166    // immediates and tests can get hairy and we may want to fiddle things.
2167    SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2168
2169    return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2170                       Chain, CmpOp, A64cc, DestBB);
2171  }
2172
2173  // Note that some LLVM floating-point CondCodes can't be lowered to a single
2174  // conditional branch, hence FPCCToA64CC can set a second test, where either
2175  // passing is sufficient.
2176  A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2177  CondCode = FPCCToA64CC(CC, Alternative);
2178  SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2179  SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2180                              DAG.getCondCode(CC));
2181  SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2182                                 Chain, SetCC, A64cc, DestBB);
2183
2184  if (Alternative != A64CC::Invalid) {
2185    A64cc = DAG.getConstant(Alternative, MVT::i32);
2186    A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2187                           A64BR_CC, SetCC, A64cc, DestBB);
2188
2189  }
2190
2191  return A64BR_CC;
2192}
2193
2194SDValue
2195AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
2196                                       RTLIB::Libcall Call) const {
2197  ArgListTy Args;
2198  ArgListEntry Entry;
2199  for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
2200    EVT ArgVT = Op.getOperand(i).getValueType();
2201    Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2202    Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
2203    Entry.isSExt = false;
2204    Entry.isZExt = false;
2205    Args.push_back(Entry);
2206  }
2207  SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
2208
2209  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2210
2211  // By default, the input chain to this libcall is the entry node of the
2212  // function. If the libcall is going to be emitted as a tail call then
2213  // isUsedByReturnOnly will change it to the right chain if the return
2214  // node which is being folded has a non-entry input chain.
2215  SDValue InChain = DAG.getEntryNode();
2216
2217  // isTailCall may be true since the callee does not reference caller stack
2218  // frame. Check if it's in the right position.
2219  SDValue TCChain = InChain;
2220  bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
2221  if (isTailCall)
2222    InChain = TCChain;
2223
2224  TargetLowering::
2225  CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
2226                    0, getLibcallCallingConv(Call), isTailCall,
2227                    /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2228                    Callee, Args, DAG, SDLoc(Op));
2229  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2230
2231  if (!CallInfo.second.getNode())
2232    // It's a tailcall, return the chain (which is the DAG root).
2233    return DAG.getRoot();
2234
2235  return CallInfo.first;
2236}
2237
2238SDValue
2239AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2240  if (Op.getOperand(0).getValueType() != MVT::f128) {
2241    // It's legal except when f128 is involved
2242    return Op;
2243  }
2244
2245  RTLIB::Libcall LC;
2246  LC  = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
2247
2248  SDValue SrcVal = Op.getOperand(0);
2249  return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
2250                     /*isSigned*/ false, SDLoc(Op)).first;
2251}
2252
2253SDValue
2254AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
2255  assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
2256
2257  RTLIB::Libcall LC;
2258  LC  = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
2259
2260  return LowerF128ToCall(Op, DAG, LC);
2261}
2262
2263static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2264                                    bool IsSigned) {
2265  SDLoc dl(Op);
2266  EVT VT = Op.getValueType();
2267  SDValue Vec = Op.getOperand(0);
2268  EVT OpVT = Vec.getValueType();
2269  unsigned Opc = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
2270
2271  if (VT.getVectorNumElements() == 1) {
2272    assert(OpVT == MVT::v1f64 && "Unexpected vector type!");
2273    if (VT.getSizeInBits() == OpVT.getSizeInBits())
2274      return Op;
2275    return DAG.UnrollVectorOp(Op.getNode());
2276  }
2277
2278  if (VT.getSizeInBits() > OpVT.getSizeInBits()) {
2279    assert(Vec.getValueType() == MVT::v2f32 && VT == MVT::v2i64 &&
2280           "Unexpected vector type!");
2281    Vec = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Vec);
2282    return DAG.getNode(Opc, dl, VT, Vec);
2283  } else if (VT.getSizeInBits() < OpVT.getSizeInBits()) {
2284    EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2285                                   OpVT.getVectorElementType().getSizeInBits());
2286    CastVT =
2287        EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2288    Vec = DAG.getNode(Opc, dl, CastVT, Vec);
2289    return DAG.getNode(ISD::TRUNCATE, dl, VT, Vec);
2290  }
2291  return DAG.getNode(Opc, dl, VT, Vec);
2292}
2293
2294static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
2295  // We custom lower concat_vectors with 4, 8, or 16 operands that are all the
2296  // same operand and of type v1* using the DUP instruction.
2297  unsigned NumOps = Op->getNumOperands();
2298  if (NumOps == 2) {
2299    assert(Op.getValueType().getSizeInBits() == 128 && "unexpected concat");
2300    return Op;
2301  }
2302
2303  if (NumOps != 4 && NumOps != 8 && NumOps != 16)
2304    return SDValue();
2305
2306  // Must be a single value for VDUP.
2307  SDValue Op0 = Op.getOperand(0);
2308  for (unsigned i = 1; i < NumOps; ++i) {
2309    SDValue OpN = Op.getOperand(i);
2310    if (Op0 != OpN)
2311      return SDValue();
2312  }
2313
2314  // Verify the value type.
2315  EVT EltVT = Op0.getValueType();
2316  switch (NumOps) {
2317  default: llvm_unreachable("Unexpected number of operands");
2318  case 4:
2319    if (EltVT != MVT::v1i16 && EltVT != MVT::v1i32)
2320      return SDValue();
2321    break;
2322  case 8:
2323    if (EltVT != MVT::v1i8 && EltVT != MVT::v1i16)
2324      return SDValue();
2325    break;
2326  case 16:
2327    if (EltVT != MVT::v1i8)
2328      return SDValue();
2329    break;
2330  }
2331
2332  SDLoc DL(Op);
2333  EVT VT = Op.getValueType();
2334  // VDUP produces better code for constants.
2335  if (Op0->getOpcode() == ISD::BUILD_VECTOR)
2336    return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Op0->getOperand(0));
2337  return DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, Op0,
2338                     DAG.getConstant(0, MVT::i64));
2339}
2340
2341SDValue
2342AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2343                                      bool IsSigned) const {
2344  if (Op.getValueType().isVector())
2345    return LowerVectorFP_TO_INT(Op, DAG, IsSigned);
2346  if (Op.getOperand(0).getValueType() != MVT::f128) {
2347    // It's legal except when f128 is involved
2348    return Op;
2349  }
2350
2351  RTLIB::Libcall LC;
2352  if (IsSigned)
2353    LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
2354  else
2355    LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
2356
2357  return LowerF128ToCall(Op, DAG, LC);
2358}
2359
2360SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
2361  MachineFunction &MF = DAG.getMachineFunction();
2362  MachineFrameInfo *MFI = MF.getFrameInfo();
2363  MFI->setReturnAddressIsTaken(true);
2364
2365  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2366    return SDValue();
2367
2368  EVT VT = Op.getValueType();
2369  SDLoc dl(Op);
2370  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2371  if (Depth) {
2372    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
2373    SDValue Offset = DAG.getConstant(8, MVT::i64);
2374    return DAG.getLoad(VT, dl, DAG.getEntryNode(),
2375                       DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
2376                       MachinePointerInfo(), false, false, false, 0);
2377  }
2378
2379  // Return X30, which contains the return address. Mark it an implicit live-in.
2380  unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
2381  return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
2382}
2383
2384
2385SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
2386                                              const {
2387  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2388  MFI->setFrameAddressIsTaken(true);
2389
2390  EVT VT = Op.getValueType();
2391  SDLoc dl(Op);
2392  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2393  unsigned FrameReg = AArch64::X29;
2394  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2395  while (Depth--)
2396    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2397                            MachinePointerInfo(),
2398                            false, false, false, 0);
2399  return FrameAddr;
2400}
2401
2402SDValue
2403AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
2404                                                  SelectionDAG &DAG) const {
2405  assert(getTargetMachine().getCodeModel() == CodeModel::Large);
2406  assert(getTargetMachine().getRelocationModel() == Reloc::Static);
2407
2408  EVT PtrVT = getPointerTy();
2409  SDLoc dl(Op);
2410  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2411  const GlobalValue *GV = GN->getGlobal();
2412
2413  SDValue GlobalAddr = DAG.getNode(
2414      AArch64ISD::WrapperLarge, dl, PtrVT,
2415      DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
2416      DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2417      DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2418      DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2419
2420  if (GN->getOffset() != 0)
2421    return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2422                       DAG.getConstant(GN->getOffset(), PtrVT));
2423
2424  return GlobalAddr;
2425}
2426
2427SDValue
2428AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
2429                                                  SelectionDAG &DAG) const {
2430  assert(getTargetMachine().getCodeModel() == CodeModel::Small);
2431
2432  EVT PtrVT = getPointerTy();
2433  SDLoc dl(Op);
2434  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2435  const GlobalValue *GV = GN->getGlobal();
2436  unsigned Alignment = GV->getAlignment();
2437  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2438  if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
2439    // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
2440    // to zero when they remain undefined. In PIC mode the GOT can take care of
2441    // this, but in absolute mode we use a constant pool load.
2442    SDValue PoolAddr;
2443    PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2444                           DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2445                                                     AArch64II::MO_NO_FLAG),
2446                           DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2447                                                     AArch64II::MO_LO12),
2448                           DAG.getConstant(8, MVT::i32));
2449    SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
2450                                     MachinePointerInfo::getConstantPool(),
2451                                     /*isVolatile=*/ false,
2452                                     /*isNonTemporal=*/ true,
2453                                     /*isInvariant=*/ true, 8);
2454    if (GN->getOffset() != 0)
2455      return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2456                         DAG.getConstant(GN->getOffset(), PtrVT));
2457
2458    return GlobalAddr;
2459  }
2460
2461  if (Alignment == 0) {
2462    const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2463    if (GVPtrTy->getElementType()->isSized()) {
2464      Alignment
2465        = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
2466    } else {
2467      // Be conservative if we can't guess, not that it really matters:
2468      // functions and labels aren't valid for loads, and the methods used to
2469      // actually calculate an address work with any alignment.
2470      Alignment = 1;
2471    }
2472  }
2473
2474  unsigned char HiFixup, LoFixup;
2475  bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
2476
2477  if (UseGOT) {
2478    HiFixup = AArch64II::MO_GOT;
2479    LoFixup = AArch64II::MO_GOT_LO12;
2480    Alignment = 8;
2481  } else {
2482    HiFixup = AArch64II::MO_NO_FLAG;
2483    LoFixup = AArch64II::MO_LO12;
2484  }
2485
2486  // AArch64's small model demands the following sequence:
2487  // ADRP x0, somewhere
2488  // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
2489  SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2490                                  DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2491                                                             HiFixup),
2492                                  DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2493                                                             LoFixup),
2494                                  DAG.getConstant(Alignment, MVT::i32));
2495
2496  if (UseGOT) {
2497    GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2498                            GlobalRef);
2499  }
2500
2501  if (GN->getOffset() != 0)
2502    return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2503                       DAG.getConstant(GN->getOffset(), PtrVT));
2504
2505  return GlobalRef;
2506}
2507
2508SDValue
2509AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2510                                             SelectionDAG &DAG) const {
2511  // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2512  // we make those distinctions here.
2513
2514  switch (getTargetMachine().getCodeModel()) {
2515  case CodeModel::Small:
2516    return LowerGlobalAddressELFSmall(Op, DAG);
2517  case CodeModel::Large:
2518    return LowerGlobalAddressELFLarge(Op, DAG);
2519  default:
2520    llvm_unreachable("Only small and large code models supported now");
2521  }
2522}
2523
2524SDValue
2525AArch64TargetLowering::LowerConstantPool(SDValue Op,
2526                                         SelectionDAG &DAG) const {
2527  SDLoc DL(Op);
2528  EVT PtrVT = getPointerTy();
2529  ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Op);
2530  const Constant *C = CN->getConstVal();
2531
2532  switch(getTargetMachine().getCodeModel()) {
2533  case CodeModel::Small:
2534    // The most efficient code is PC-relative anyway for the small memory model,
2535    // so we don't need to worry about relocation model.
2536    return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2537                       DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2538                                                 AArch64II::MO_NO_FLAG),
2539                       DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2540                                                 AArch64II::MO_LO12),
2541                       DAG.getConstant(CN->getAlignment(), MVT::i32));
2542  case CodeModel::Large:
2543    return DAG.getNode(
2544      AArch64ISD::WrapperLarge, DL, PtrVT,
2545      DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
2546      DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
2547      DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
2548      DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC));
2549  default:
2550    llvm_unreachable("Only small and large code models supported now");
2551  }
2552}
2553
2554SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2555                                                SDValue DescAddr,
2556                                                SDLoc DL,
2557                                                SelectionDAG &DAG) const {
2558  EVT PtrVT = getPointerTy();
2559
2560  // The function we need to call is simply the first entry in the GOT for this
2561  // descriptor, load it in preparation.
2562  SDValue Func, Chain;
2563  Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2564                     DescAddr);
2565
2566  // The function takes only one argument: the address of the descriptor itself
2567  // in X0.
2568  SDValue Glue;
2569  Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2570  Glue = Chain.getValue(1);
2571
2572  // Finally, there's a special calling-convention which means that the lookup
2573  // must preserve all registers (except X0, obviously).
2574  const TargetRegisterInfo *TRI  = getTargetMachine().getRegisterInfo();
2575  const AArch64RegisterInfo *A64RI
2576    = static_cast<const AArch64RegisterInfo *>(TRI);
2577  const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2578
2579  // We're now ready to populate the argument list, as with a normal call:
2580  std::vector<SDValue> Ops;
2581  Ops.push_back(Chain);
2582  Ops.push_back(Func);
2583  Ops.push_back(SymAddr);
2584  Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2585  Ops.push_back(DAG.getRegisterMask(Mask));
2586  Ops.push_back(Glue);
2587
2588  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2589  Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2590                      Ops.size());
2591  Glue = Chain.getValue(1);
2592
2593  // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2594  // back to the generic handling code.
2595  return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2596}
2597
2598SDValue
2599AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2600                                             SelectionDAG &DAG) const {
2601  assert(getSubtarget()->isTargetELF() &&
2602         "TLS not implemented for non-ELF targets");
2603  assert(getTargetMachine().getCodeModel() == CodeModel::Small
2604         && "TLS only supported in small memory model");
2605  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2606
2607  TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2608
2609  SDValue TPOff;
2610  EVT PtrVT = getPointerTy();
2611  SDLoc DL(Op);
2612  const GlobalValue *GV = GA->getGlobal();
2613
2614  SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2615
2616  if (Model == TLSModel::InitialExec) {
2617    TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2618                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2619                                                   AArch64II::MO_GOTTPREL),
2620                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2621                                                   AArch64II::MO_GOTTPREL_LO12),
2622                        DAG.getConstant(8, MVT::i32));
2623    TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2624                        TPOff);
2625  } else if (Model == TLSModel::LocalExec) {
2626    SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2627                                               AArch64II::MO_TPREL_G1);
2628    SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2629                                               AArch64II::MO_TPREL_G0_NC);
2630
2631    TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2632                                       DAG.getTargetConstant(1, MVT::i32)), 0);
2633    TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2634                                       TPOff, LoVar,
2635                                       DAG.getTargetConstant(0, MVT::i32)), 0);
2636  } else if (Model == TLSModel::GeneralDynamic) {
2637    // Accesses used in this sequence go via the TLS descriptor which lives in
2638    // the GOT. Prepare an address we can use to handle this.
2639    SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2640                                                AArch64II::MO_TLSDESC);
2641    SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2642                                                AArch64II::MO_TLSDESC_LO12);
2643    SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2644                                   HiDesc, LoDesc,
2645                                   DAG.getConstant(8, MVT::i32));
2646    SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2647
2648    TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2649  } else if (Model == TLSModel::LocalDynamic) {
2650    // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2651    // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2652    // the beginning of the module's TLS region, followed by a DTPREL offset
2653    // calculation.
2654
2655    // These accesses will need deduplicating if there's more than one.
2656    AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2657      .getInfo<AArch64MachineFunctionInfo>();
2658    MFI->incNumLocalDynamicTLSAccesses();
2659
2660
2661    // Get the location of _TLS_MODULE_BASE_:
2662    SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2663                                                AArch64II::MO_TLSDESC);
2664    SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2665                                                AArch64II::MO_TLSDESC_LO12);
2666    SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2667                                   HiDesc, LoDesc,
2668                                   DAG.getConstant(8, MVT::i32));
2669    SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2670
2671    ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2672
2673    // Get the variable's offset from _TLS_MODULE_BASE_
2674    SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2675                                               AArch64II::MO_DTPREL_G1);
2676    SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2677                                               AArch64II::MO_DTPREL_G0_NC);
2678
2679    TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2680                                       DAG.getTargetConstant(0, MVT::i32)), 0);
2681    TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2682                                       TPOff, LoVar,
2683                                       DAG.getTargetConstant(0, MVT::i32)), 0);
2684  } else
2685      llvm_unreachable("Unsupported TLS access model");
2686
2687
2688  return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2689}
2690
2691static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2692                                    bool IsSigned) {
2693  SDLoc dl(Op);
2694  EVT VT = Op.getValueType();
2695  SDValue Vec = Op.getOperand(0);
2696  unsigned Opc = IsSigned ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2697
2698  if (VT.getVectorNumElements() == 1) {
2699    assert(VT == MVT::v1f64 && "Unexpected vector type!");
2700    if (VT.getSizeInBits() == Vec.getValueSizeInBits())
2701      return Op;
2702    return DAG.UnrollVectorOp(Op.getNode());
2703  }
2704
2705  if (VT.getSizeInBits() < Vec.getValueSizeInBits()) {
2706    assert(Vec.getValueType() == MVT::v2i64 && VT == MVT::v2f32 &&
2707           "Unexpected vector type!");
2708    Vec = DAG.getNode(Opc, dl, MVT::v2f64, Vec);
2709    return DAG.getNode(ISD::FP_ROUND, dl, VT, Vec, DAG.getIntPtrConstant(0));
2710  } else if (VT.getSizeInBits() > Vec.getValueSizeInBits()) {
2711    unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2712    EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2713                                   VT.getVectorElementType().getSizeInBits());
2714    CastVT =
2715        EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2716    Vec = DAG.getNode(CastOpc, dl, CastVT, Vec);
2717  }
2718
2719  return DAG.getNode(Opc, dl, VT, Vec);
2720}
2721
2722SDValue
2723AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2724                                      bool IsSigned) const {
2725  if (Op.getValueType().isVector())
2726    return LowerVectorINT_TO_FP(Op, DAG, IsSigned);
2727  if (Op.getValueType() != MVT::f128) {
2728    // Legal for everything except f128.
2729    return Op;
2730  }
2731
2732  RTLIB::Libcall LC;
2733  if (IsSigned)
2734    LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2735  else
2736    LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2737
2738  return LowerF128ToCall(Op, DAG, LC);
2739}
2740
2741
2742SDValue
2743AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2744  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2745  SDLoc dl(JT);
2746  EVT PtrVT = getPointerTy();
2747
2748  // When compiling PIC, jump tables get put in the code section so a static
2749  // relocation-style is acceptable for both cases.
2750  switch (getTargetMachine().getCodeModel()) {
2751  case CodeModel::Small:
2752    return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2753                       DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2754                       DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2755                                              AArch64II::MO_LO12),
2756                       DAG.getConstant(1, MVT::i32));
2757  case CodeModel::Large:
2758    return DAG.getNode(
2759      AArch64ISD::WrapperLarge, dl, PtrVT,
2760      DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2761      DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2762      DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2763      DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2764  default:
2765    llvm_unreachable("Only small and large code models supported now");
2766  }
2767}
2768
2769// (SELECT testbit, iftrue, iffalse)
2770SDValue
2771AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2772  SDLoc dl(Op);
2773  SDValue TheBit = Op.getOperand(0);
2774  SDValue IfTrue = Op.getOperand(1);
2775  SDValue IfFalse = Op.getOperand(2);
2776
2777  // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2778  // that as the consumer we are responsible for ignoring rubbish in higher
2779  // bits.
2780  TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2781                       DAG.getConstant(1, MVT::i32));
2782  SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2783                               DAG.getConstant(0, TheBit.getValueType()),
2784                               DAG.getCondCode(ISD::SETNE));
2785
2786  return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2787                     A64CMP, IfTrue, IfFalse,
2788                     DAG.getConstant(A64CC::NE, MVT::i32));
2789}
2790
2791static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2792  SDLoc DL(Op);
2793  SDValue LHS = Op.getOperand(0);
2794  SDValue RHS = Op.getOperand(1);
2795  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2796  EVT VT = Op.getValueType();
2797  bool Invert = false;
2798  SDValue Op0, Op1;
2799  unsigned Opcode;
2800
2801  if (LHS.getValueType().isInteger()) {
2802
2803    // Attempt to use Vector Integer Compare Mask Test instruction.
2804    // TST = icmp ne (and (op0, op1), zero).
2805    if (CC == ISD::SETNE) {
2806      if (((LHS.getOpcode() == ISD::AND) &&
2807           ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2808          ((RHS.getOpcode() == ISD::AND) &&
2809           ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2810
2811        SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2812        SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2813        SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2814        return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2815      }
2816    }
2817
2818    // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2819    // Note: Compare against Zero does not support unsigned predicates.
2820    if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2821         ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2822        !isUnsignedIntSetCC(CC)) {
2823
2824      // If LHS is the zero value, swap operands and CondCode.
2825      if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2826        CC = getSetCCSwappedOperands(CC);
2827        Op0 = RHS;
2828      } else
2829        Op0 = LHS;
2830
2831      // Ensure valid CondCode for Compare Mask against Zero instruction:
2832      // EQ, GE, GT, LE, LT.
2833      if (ISD::SETNE == CC) {
2834        Invert = true;
2835        CC = ISD::SETEQ;
2836      }
2837
2838      // Using constant type to differentiate integer and FP compares with zero.
2839      Op1 = DAG.getConstant(0, MVT::i32);
2840      Opcode = AArch64ISD::NEON_CMPZ;
2841
2842    } else {
2843      // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2844      // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2845      bool Swap = false;
2846      switch (CC) {
2847      default:
2848        llvm_unreachable("Illegal integer comparison.");
2849      case ISD::SETEQ:
2850      case ISD::SETGT:
2851      case ISD::SETGE:
2852      case ISD::SETUGT:
2853      case ISD::SETUGE:
2854        break;
2855      case ISD::SETNE:
2856        Invert = true;
2857        CC = ISD::SETEQ;
2858        break;
2859      case ISD::SETULT:
2860      case ISD::SETULE:
2861      case ISD::SETLT:
2862      case ISD::SETLE:
2863        Swap = true;
2864        CC = getSetCCSwappedOperands(CC);
2865      }
2866
2867      if (Swap)
2868        std::swap(LHS, RHS);
2869
2870      Opcode = AArch64ISD::NEON_CMP;
2871      Op0 = LHS;
2872      Op1 = RHS;
2873    }
2874
2875    // Generate Compare Mask instr or Compare Mask against Zero instr.
2876    SDValue NeonCmp =
2877        DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2878
2879    if (Invert)
2880      NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2881
2882    return NeonCmp;
2883  }
2884
2885  // Now handle Floating Point cases.
2886  // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2887  if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2888      ISD::isBuildVectorAllZeros(LHS.getNode())) {
2889
2890    // If LHS is the zero value, swap operands and CondCode.
2891    if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2892      CC = getSetCCSwappedOperands(CC);
2893      Op0 = RHS;
2894    } else
2895      Op0 = LHS;
2896
2897    // Using constant type to differentiate integer and FP compares with zero.
2898    Op1 = DAG.getConstantFP(0, MVT::f32);
2899    Opcode = AArch64ISD::NEON_CMPZ;
2900  } else {
2901    // Attempt to use Vector Floating Point Compare Mask instruction.
2902    Op0 = LHS;
2903    Op1 = RHS;
2904    Opcode = AArch64ISD::NEON_CMP;
2905  }
2906
2907  SDValue NeonCmpAlt;
2908  // Some register compares have to be implemented with swapped CC and operands,
2909  // e.g.: OLT implemented as OGT with swapped operands.
2910  bool SwapIfRegArgs = false;
2911
2912  // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2913  // EQ, GE, GT, LE, LT.
2914  // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2915  switch (CC) {
2916  default:
2917    llvm_unreachable("Illegal FP comparison");
2918  case ISD::SETUNE:
2919  case ISD::SETNE:
2920    Invert = true; // Fallthrough
2921  case ISD::SETOEQ:
2922  case ISD::SETEQ:
2923    CC = ISD::SETEQ;
2924    break;
2925  case ISD::SETOLT:
2926  case ISD::SETLT:
2927    CC = ISD::SETLT;
2928    SwapIfRegArgs = true;
2929    break;
2930  case ISD::SETOGT:
2931  case ISD::SETGT:
2932    CC = ISD::SETGT;
2933    break;
2934  case ISD::SETOLE:
2935  case ISD::SETLE:
2936    CC = ISD::SETLE;
2937    SwapIfRegArgs = true;
2938    break;
2939  case ISD::SETOGE:
2940  case ISD::SETGE:
2941    CC = ISD::SETGE;
2942    break;
2943  case ISD::SETUGE:
2944    Invert = true;
2945    CC = ISD::SETLT;
2946    SwapIfRegArgs = true;
2947    break;
2948  case ISD::SETULE:
2949    Invert = true;
2950    CC = ISD::SETGT;
2951    break;
2952  case ISD::SETUGT:
2953    Invert = true;
2954    CC = ISD::SETLE;
2955    SwapIfRegArgs = true;
2956    break;
2957  case ISD::SETULT:
2958    Invert = true;
2959    CC = ISD::SETGE;
2960    break;
2961  case ISD::SETUEQ:
2962    Invert = true; // Fallthrough
2963  case ISD::SETONE:
2964    // Expand this to (OGT |OLT).
2965    NeonCmpAlt =
2966        DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2967    CC = ISD::SETLT;
2968    SwapIfRegArgs = true;
2969    break;
2970  case ISD::SETUO:
2971    Invert = true; // Fallthrough
2972  case ISD::SETO:
2973    // Expand this to (OGE | OLT).
2974    NeonCmpAlt =
2975        DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2976    CC = ISD::SETLT;
2977    SwapIfRegArgs = true;
2978    break;
2979  }
2980
2981  if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2982    CC = getSetCCSwappedOperands(CC);
2983    std::swap(Op0, Op1);
2984  }
2985
2986  // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2987  SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2988
2989  if (NeonCmpAlt.getNode())
2990    NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2991
2992  if (Invert)
2993    NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2994
2995  return NeonCmp;
2996}
2997
2998// (SETCC lhs, rhs, condcode)
2999SDValue
3000AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3001  SDLoc dl(Op);
3002  SDValue LHS = Op.getOperand(0);
3003  SDValue RHS = Op.getOperand(1);
3004  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3005  EVT VT = Op.getValueType();
3006
3007  if (VT.isVector())
3008    return LowerVectorSETCC(Op, DAG);
3009
3010  if (LHS.getValueType() == MVT::f128) {
3011    // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
3012    // for the rest of the function (some i32 or i64 values).
3013    softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
3014
3015    // If softenSetCCOperands returned a scalar, use it.
3016    if (RHS.getNode() == 0) {
3017      assert(LHS.getValueType() == Op.getValueType() &&
3018             "Unexpected setcc expansion!");
3019      return LHS;
3020    }
3021  }
3022
3023  if (LHS.getValueType().isInteger()) {
3024    SDValue A64cc;
3025
3026    // Integers are handled in a separate function because the combinations of
3027    // immediates and tests can get hairy and we may want to fiddle things.
3028    SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
3029
3030    return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
3031                       CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
3032                       A64cc);
3033  }
3034
3035  // Note that some LLVM floating-point CondCodes can't be lowered to a single
3036  // conditional branch, hence FPCCToA64CC can set a second test, where either
3037  // passing is sufficient.
3038  A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3039  CondCode = FPCCToA64CC(CC, Alternative);
3040  SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3041  SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3042                              DAG.getCondCode(CC));
3043  SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
3044                                     CmpOp, DAG.getConstant(1, VT),
3045                                     DAG.getConstant(0, VT), A64cc);
3046
3047  if (Alternative != A64CC::Invalid) {
3048    A64cc = DAG.getConstant(Alternative, MVT::i32);
3049    A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
3050                               DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
3051  }
3052
3053  return A64SELECT_CC;
3054}
3055
3056static SDValue LowerVectorSELECT_CC(SDValue Op, SelectionDAG &DAG) {
3057  SDLoc dl(Op);
3058  SDValue LHS = Op.getOperand(0);
3059  SDValue RHS = Op.getOperand(1);
3060  SDValue IfTrue = Op.getOperand(2);
3061  SDValue IfFalse = Op.getOperand(3);
3062  EVT IfTrueVT = IfTrue.getValueType();
3063  EVT CondVT = IfTrueVT.changeVectorElementTypeToInteger();
3064  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
3065
3066  // If LHS & RHS are floating point and IfTrue & IfFalse are vectors, we will
3067  // use NEON compare.
3068  if ((LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64)) {
3069    EVT EltVT = LHS.getValueType();
3070    unsigned EltNum = 128 / EltVT.getSizeInBits();
3071    EVT VT = EVT::getVectorVT(*DAG.getContext(), EltVT, EltNum);
3072    unsigned SubConstant =
3073        (LHS.getValueType() == MVT::f32) ? AArch64::sub_32 :AArch64::sub_64;
3074    EVT CEltT = (LHS.getValueType() == MVT::f32) ? MVT::i32 : MVT::i64;
3075    EVT CVT = EVT::getVectorVT(*DAG.getContext(), CEltT, EltNum);
3076
3077    LHS
3078      = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
3079                  VT, DAG.getTargetConstant(0, MVT::i32), LHS,
3080                  DAG.getTargetConstant(SubConstant, MVT::i32)), 0);
3081    RHS
3082      = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
3083                  VT, DAG.getTargetConstant(0, MVT::i32), RHS,
3084                  DAG.getTargetConstant(SubConstant, MVT::i32)), 0);
3085
3086    SDValue VSetCC = DAG.getSetCC(dl, CVT, LHS, RHS, CC);
3087    SDValue ResCC = LowerVectorSETCC(VSetCC, DAG);
3088    if (CEltT.getSizeInBits() < IfTrueVT.getSizeInBits()) {
3089      EVT DUPVT =
3090          EVT::getVectorVT(*DAG.getContext(), CEltT,
3091                           IfTrueVT.getSizeInBits() / CEltT.getSizeInBits());
3092      ResCC = DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, DUPVT, ResCC,
3093                          DAG.getConstant(0, MVT::i64, false));
3094
3095      ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC);
3096    } else {
3097      // FIXME: If IfTrue & IfFalse hold v1i8, v1i16 or v1i32, this function
3098      // can't handle them and will hit this assert.
3099      assert(CEltT.getSizeInBits() == IfTrueVT.getSizeInBits() &&
3100             "Vector of IfTrue & IfFalse is too small.");
3101
3102      unsigned ExEltNum =
3103          EltNum * IfTrueVT.getSizeInBits() / ResCC.getValueSizeInBits();
3104      EVT ExVT = EVT::getVectorVT(*DAG.getContext(), CEltT, ExEltNum);
3105      ResCC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExVT, ResCC,
3106                          DAG.getConstant(0, MVT::i64, false));
3107      ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC);
3108    }
3109    SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(),
3110                                  ResCC, IfTrue, IfFalse);
3111    return VSelect;
3112  }
3113
3114  // Here we handle the case that LHS & RHS are integer and IfTrue & IfFalse are
3115  // vectors.
3116  A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3117  CondCode = FPCCToA64CC(CC, Alternative);
3118  SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3119  SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3120                              DAG.getCondCode(CC));
3121  EVT SEVT = MVT::i32;
3122  if (IfTrue.getValueType().getVectorElementType().getSizeInBits() > 32)
3123    SEVT = MVT::i64;
3124  SDValue AllOne = DAG.getConstant(-1, SEVT);
3125  SDValue AllZero = DAG.getConstant(0, SEVT);
3126  SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, SEVT, SetCC,
3127                                     AllOne, AllZero, A64cc);
3128
3129  if (Alternative != A64CC::Invalid) {
3130    A64cc = DAG.getConstant(Alternative, MVT::i32);
3131    A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
3132                               SetCC, AllOne, A64SELECT_CC, A64cc);
3133  }
3134  SDValue VDup;
3135  if (IfTrue.getValueType().getVectorNumElements() == 1)
3136    VDup = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, CondVT, A64SELECT_CC);
3137  else
3138    VDup = DAG.getNode(AArch64ISD::NEON_VDUP, dl, CondVT, A64SELECT_CC);
3139  SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(),
3140                                VDup, IfTrue, IfFalse);
3141  return VSelect;
3142}
3143
3144// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
3145SDValue
3146AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
3147  SDLoc dl(Op);
3148  SDValue LHS = Op.getOperand(0);
3149  SDValue RHS = Op.getOperand(1);
3150  SDValue IfTrue = Op.getOperand(2);
3151  SDValue IfFalse = Op.getOperand(3);
3152  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
3153
3154  if (IfTrue.getValueType().isVector())
3155    return LowerVectorSELECT_CC(Op, DAG);
3156
3157  if (LHS.getValueType() == MVT::f128) {
3158    // f128 comparisons are lowered to libcalls, but slot in nicely here
3159    // afterwards.
3160    softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
3161
3162    // If softenSetCCOperands returned a scalar, we need to compare the result
3163    // against zero to select between true and false values.
3164    if (RHS.getNode() == 0) {
3165      RHS = DAG.getConstant(0, LHS.getValueType());
3166      CC = ISD::SETNE;
3167    }
3168  }
3169
3170  if (LHS.getValueType().isInteger()) {
3171    SDValue A64cc;
3172
3173    // Integers are handled in a separate function because the combinations of
3174    // immediates and tests can get hairy and we may want to fiddle things.
3175    SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
3176
3177    return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), CmpOp,
3178                       IfTrue, IfFalse, A64cc);
3179  }
3180
3181  // Note that some LLVM floating-point CondCodes can't be lowered to a single
3182  // conditional branch, hence FPCCToA64CC can set a second test, where either
3183  // passing is sufficient.
3184  A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
3185  CondCode = FPCCToA64CC(CC, Alternative);
3186  SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
3187  SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
3188                              DAG.getCondCode(CC));
3189  SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
3190                                     Op.getValueType(),
3191                                     SetCC, IfTrue, IfFalse, A64cc);
3192
3193  if (Alternative != A64CC::Invalid) {
3194    A64cc = DAG.getConstant(Alternative, MVT::i32);
3195    A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
3196                               SetCC, IfTrue, A64SELECT_CC, A64cc);
3197
3198  }
3199
3200  return A64SELECT_CC;
3201}
3202
3203SDValue
3204AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3205  const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3206  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3207
3208  // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
3209  // rather than just 8.
3210  return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
3211                       Op.getOperand(1), Op.getOperand(2),
3212                       DAG.getConstant(32, MVT::i32), 8, false, false,
3213                       MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
3214}
3215
3216SDValue
3217AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3218  // The layout of the va_list struct is specified in the AArch64 Procedure Call
3219  // Standard, section B.3.
3220  MachineFunction &MF = DAG.getMachineFunction();
3221  AArch64MachineFunctionInfo *FuncInfo
3222    = MF.getInfo<AArch64MachineFunctionInfo>();
3223  SDLoc DL(Op);
3224
3225  SDValue Chain = Op.getOperand(0);
3226  SDValue VAList = Op.getOperand(1);
3227  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3228  SmallVector<SDValue, 4> MemOps;
3229
3230  // void *__stack at offset 0
3231  SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
3232                                    getPointerTy());
3233  MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
3234                                MachinePointerInfo(SV), false, false, 0));
3235
3236  // void *__gr_top at offset 8
3237  int GPRSize = FuncInfo->getVariadicGPRSize();
3238  if (GPRSize > 0) {
3239    SDValue GRTop, GRTopAddr;
3240
3241    GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3242                            DAG.getConstant(8, getPointerTy()));
3243
3244    GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
3245    GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
3246                        DAG.getConstant(GPRSize, getPointerTy()));
3247
3248    MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
3249                                  MachinePointerInfo(SV, 8),
3250                                  false, false, 0));
3251  }
3252
3253  // void *__vr_top at offset 16
3254  int FPRSize = FuncInfo->getVariadicFPRSize();
3255  if (FPRSize > 0) {
3256    SDValue VRTop, VRTopAddr;
3257    VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3258                            DAG.getConstant(16, getPointerTy()));
3259
3260    VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
3261    VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
3262                        DAG.getConstant(FPRSize, getPointerTy()));
3263
3264    MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
3265                                  MachinePointerInfo(SV, 16),
3266                                  false, false, 0));
3267  }
3268
3269  // int __gr_offs at offset 24
3270  SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3271                                   DAG.getConstant(24, getPointerTy()));
3272  MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
3273                                GROffsAddr, MachinePointerInfo(SV, 24),
3274                                false, false, 0));
3275
3276  // int __vr_offs at offset 28
3277  SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3278                                   DAG.getConstant(28, getPointerTy()));
3279  MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
3280                                VROffsAddr, MachinePointerInfo(SV, 28),
3281                                false, false, 0));
3282
3283  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
3284                     MemOps.size());
3285}
3286
3287SDValue
3288AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3289  switch (Op.getOpcode()) {
3290  default: llvm_unreachable("Don't know how to custom lower this!");
3291  case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
3292  case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
3293  case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
3294  case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
3295  case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
3296  case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
3297  case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
3298  case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
3299  case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
3300  case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
3301  case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
3302  case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
3303
3304  case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
3305  case ISD::SRL_PARTS:
3306  case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
3307
3308  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3309  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3310  case ISD::BR_CC: return LowerBR_CC(Op, DAG);
3311  case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
3312  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3313  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3314  case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3315  case ISD::SELECT: return LowerSELECT(Op, DAG);
3316  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3317  case ISD::SETCC: return LowerSETCC(Op, DAG);
3318  case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3319  case ISD::VASTART: return LowerVASTART(Op, DAG);
3320  case ISD::BUILD_VECTOR:
3321    return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
3322  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3323  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3324  }
3325
3326  return SDValue();
3327}
3328
3329/// Check if the specified splat value corresponds to a valid vector constant
3330/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI).  If
3331/// so, return the encoded 8-bit immediate and the OpCmode instruction fields
3332/// values.
3333static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3334                              unsigned SplatBitSize, SelectionDAG &DAG,
3335                              bool is128Bits, NeonModImmType type, EVT &VT,
3336                              unsigned &Imm, unsigned &OpCmode) {
3337  switch (SplatBitSize) {
3338  default:
3339    llvm_unreachable("unexpected size for isNeonModifiedImm");
3340  case 8: {
3341    if (type != Neon_Mov_Imm)
3342      return false;
3343    assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3344    // Neon movi per byte: Op=0, Cmode=1110.
3345    OpCmode = 0xe;
3346    Imm = SplatBits;
3347    VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3348    break;
3349  }
3350  case 16: {
3351    // Neon move inst per halfword
3352    VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3353    if ((SplatBits & ~0xff) == 0) {
3354      // Value = 0x00nn is 0x00nn LSL 0
3355      // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
3356      // bic:  Op=1, Cmode=1001;  orr:  Op=0, Cmode=1001
3357      // Op=x, Cmode=100y
3358      Imm = SplatBits;
3359      OpCmode = 0x8;
3360      break;
3361    }
3362    if ((SplatBits & ~0xff00) == 0) {
3363      // Value = 0xnn00 is 0x00nn LSL 8
3364      // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
3365      // bic:  Op=1, Cmode=1011;  orr:  Op=0, Cmode=1011
3366      // Op=x, Cmode=101x
3367      Imm = SplatBits >> 8;
3368      OpCmode = 0xa;
3369      break;
3370    }
3371    // can't handle any other
3372    return false;
3373  }
3374
3375  case 32: {
3376    // First the LSL variants (MSL is unusable by some interested instructions).
3377
3378    // Neon move instr per word, shift zeros
3379    VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3380    if ((SplatBits & ~0xff) == 0) {
3381      // Value = 0x000000nn is 0x000000nn LSL 0
3382      // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
3383      // bic:  Op=1, Cmode= 0001; orr:  Op=0, Cmode= 0001
3384      // Op=x, Cmode=000x
3385      Imm = SplatBits;
3386      OpCmode = 0;
3387      break;
3388    }
3389    if ((SplatBits & ~0xff00) == 0) {
3390      // Value = 0x0000nn00 is 0x000000nn LSL 8
3391      // movi: Op=0, Cmode= 0010;  mvni: Op=1, Cmode= 0010
3392      // bic:  Op=1, Cmode= 0011;  orr : Op=0, Cmode= 0011
3393      // Op=x, Cmode=001x
3394      Imm = SplatBits >> 8;
3395      OpCmode = 0x2;
3396      break;
3397    }
3398    if ((SplatBits & ~0xff0000) == 0) {
3399      // Value = 0x00nn0000 is 0x000000nn LSL 16
3400      // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
3401      // bic:  Op=1, Cmode= 0101; orr:  Op=0, Cmode= 0101
3402      // Op=x, Cmode=010x
3403      Imm = SplatBits >> 16;
3404      OpCmode = 0x4;
3405      break;
3406    }
3407    if ((SplatBits & ~0xff000000) == 0) {
3408      // Value = 0xnn000000 is 0x000000nn LSL 24
3409      // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
3410      // bic:  Op=1, Cmode= 0111; orr:  Op=0, Cmode= 0111
3411      // Op=x, Cmode=011x
3412      Imm = SplatBits >> 24;
3413      OpCmode = 0x6;
3414      break;
3415    }
3416
3417    // Now the MSL immediates.
3418
3419    // Neon move instr per word, shift ones
3420    if ((SplatBits & ~0xffff) == 0 &&
3421        ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3422      // Value = 0x0000nnff is 0x000000nn MSL 8
3423      // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
3424      // Op=x, Cmode=1100
3425      Imm = SplatBits >> 8;
3426      OpCmode = 0xc;
3427      break;
3428    }
3429    if ((SplatBits & ~0xffffff) == 0 &&
3430        ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3431      // Value = 0x00nnffff is 0x000000nn MSL 16
3432      // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
3433      // Op=x, Cmode=1101
3434      Imm = SplatBits >> 16;
3435      OpCmode = 0xd;
3436      break;
3437    }
3438    // can't handle any other
3439    return false;
3440  }
3441
3442  case 64: {
3443    if (type != Neon_Mov_Imm)
3444      return false;
3445    // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
3446    // movi Op=1, Cmode=1110.
3447    OpCmode = 0x1e;
3448    uint64_t BitMask = 0xff;
3449    uint64_t Val = 0;
3450    unsigned ImmMask = 1;
3451    Imm = 0;
3452    for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3453      if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3454        Val |= BitMask;
3455        Imm |= ImmMask;
3456      } else if ((SplatBits & BitMask) != 0) {
3457        return false;
3458      }
3459      BitMask <<= 8;
3460      ImmMask <<= 1;
3461    }
3462    SplatBits = Val;
3463    VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3464    break;
3465  }
3466  }
3467
3468  return true;
3469}
3470
3471static SDValue PerformANDCombine(SDNode *N,
3472                                 TargetLowering::DAGCombinerInfo &DCI) {
3473
3474  SelectionDAG &DAG = DCI.DAG;
3475  SDLoc DL(N);
3476  EVT VT = N->getValueType(0);
3477
3478  // We're looking for an SRA/SHL pair which form an SBFX.
3479
3480  if (VT != MVT::i32 && VT != MVT::i64)
3481    return SDValue();
3482
3483  if (!isa<ConstantSDNode>(N->getOperand(1)))
3484    return SDValue();
3485
3486  uint64_t TruncMask = N->getConstantOperandVal(1);
3487  if (!isMask_64(TruncMask))
3488    return SDValue();
3489
3490  uint64_t Width = CountPopulation_64(TruncMask);
3491  SDValue Shift = N->getOperand(0);
3492
3493  if (Shift.getOpcode() != ISD::SRL)
3494    return SDValue();
3495
3496  if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3497    return SDValue();
3498  uint64_t LSB = Shift->getConstantOperandVal(1);
3499
3500  if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3501    return SDValue();
3502
3503  return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
3504                     DAG.getConstant(LSB, MVT::i64),
3505                     DAG.getConstant(LSB + Width - 1, MVT::i64));
3506}
3507
3508/// For a true bitfield insert, the bits getting into that contiguous mask
3509/// should come from the low part of an existing value: they must be formed from
3510/// a compatible SHL operation (unless they're already low). This function
3511/// checks that condition and returns the least-significant bit that's
3512/// intended. If the operation not a field preparation, -1 is returned.
3513static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
3514                            SDValue &MaskedVal, uint64_t Mask) {
3515  if (!isShiftedMask_64(Mask))
3516    return -1;
3517
3518  // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
3519  // instruction. BFI will do a left-shift by LSB before applying the mask we've
3520  // spotted, so in general we should pre-emptively "undo" that by making sure
3521  // the incoming bits have had a right-shift applied to them.
3522  //
3523  // This right shift, however, will combine with existing left/right shifts. In
3524  // the simplest case of a completely straight bitfield operation, it will be
3525  // expected to completely cancel out with an existing SHL. More complicated
3526  // cases (e.g. bitfield to bitfield copy) may still need a real shift before
3527  // the BFI.
3528
3529  uint64_t LSB = countTrailingZeros(Mask);
3530  int64_t ShiftRightRequired = LSB;
3531  if (MaskedVal.getOpcode() == ISD::SHL &&
3532      isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3533    ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
3534    MaskedVal = MaskedVal.getOperand(0);
3535  } else if (MaskedVal.getOpcode() == ISD::SRL &&
3536             isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3537    ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
3538    MaskedVal = MaskedVal.getOperand(0);
3539  }
3540
3541  if (ShiftRightRequired > 0)
3542    MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
3543                            DAG.getConstant(ShiftRightRequired, MVT::i64));
3544  else if (ShiftRightRequired < 0) {
3545    // We could actually end up with a residual left shift, for example with
3546    // "struc.bitfield = val << 1".
3547    MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
3548                            DAG.getConstant(-ShiftRightRequired, MVT::i64));
3549  }
3550
3551  return LSB;
3552}
3553
3554/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
3555/// a mask and an extension. Returns true if a BFI was found and provides
3556/// information on its surroundings.
3557static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
3558                          bool &Extended) {
3559  Extended = false;
3560  if (N.getOpcode() == ISD::ZERO_EXTEND) {
3561    Extended = true;
3562    N = N.getOperand(0);
3563  }
3564
3565  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
3566    Mask = N->getConstantOperandVal(1);
3567    N = N.getOperand(0);
3568  } else {
3569    // Mask is the whole width.
3570    Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
3571  }
3572
3573  if (N.getOpcode() == AArch64ISD::BFI) {
3574    BFI = N;
3575    return true;
3576  }
3577
3578  return false;
3579}
3580
3581/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
3582/// is roughly equivalent to (and (BFI ...), mask). This form is used because it
3583/// can often be further combined with a larger mask. Ultimately, we want mask
3584/// to be 2^32-1 or 2^64-1 so the AND can be skipped.
3585static SDValue tryCombineToBFI(SDNode *N,
3586                               TargetLowering::DAGCombinerInfo &DCI,
3587                               const AArch64Subtarget *Subtarget) {
3588  SelectionDAG &DAG = DCI.DAG;
3589  SDLoc DL(N);
3590  EVT VT = N->getValueType(0);
3591
3592  assert(N->getOpcode() == ISD::OR && "Unexpected root");
3593
3594  // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
3595  // abandon the effort.
3596  SDValue LHS = N->getOperand(0);
3597  if (LHS.getOpcode() != ISD::AND)
3598    return SDValue();
3599
3600  uint64_t LHSMask;
3601  if (isa<ConstantSDNode>(LHS.getOperand(1)))
3602    LHSMask = LHS->getConstantOperandVal(1);
3603  else
3604    return SDValue();
3605
3606  // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
3607  // is or abandon the effort.
3608  SDValue RHS = N->getOperand(1);
3609  if (RHS.getOpcode() != ISD::AND)
3610    return SDValue();
3611
3612  uint64_t RHSMask;
3613  if (isa<ConstantSDNode>(RHS.getOperand(1)))
3614    RHSMask = RHS->getConstantOperandVal(1);
3615  else
3616    return SDValue();
3617
3618  // Can't do anything if the masks are incompatible.
3619  if (LHSMask & RHSMask)
3620    return SDValue();
3621
3622  // Now we need one of the masks to be a contiguous field. Without loss of
3623  // generality that should be the RHS one.
3624  SDValue Bitfield = LHS.getOperand(0);
3625  if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3626    // We know that LHS is a candidate new value, and RHS isn't already a better
3627    // one.
3628    std::swap(LHS, RHS);
3629    std::swap(LHSMask, RHSMask);
3630  }
3631
3632  // We've done our best to put the right operands in the right places, all we
3633  // can do now is check whether a BFI exists.
3634  Bitfield = RHS.getOperand(0);
3635  int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3636  if (LSB == -1)
3637    return SDValue();
3638
3639  uint32_t Width = CountPopulation_64(RHSMask);
3640  assert(Width && "Expected non-zero bitfield width");
3641
3642  SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3643                            LHS.getOperand(0), Bitfield,
3644                            DAG.getConstant(LSB, MVT::i64),
3645                            DAG.getConstant(Width, MVT::i64));
3646
3647  // Mask is trivial
3648  if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3649    return BFI;
3650
3651  return DAG.getNode(ISD::AND, DL, VT, BFI,
3652                     DAG.getConstant(LHSMask | RHSMask, VT));
3653}
3654
3655/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3656/// original input. This is surprisingly common because SROA splits things up
3657/// into i8 chunks, so the originally detected MaskedBFI may actually only act
3658/// on the low (say) byte of a word. This is then orred into the rest of the
3659/// word afterwards.
3660///
3661/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3662///
3663/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3664/// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3665/// involved.
3666static SDValue tryCombineToLargerBFI(SDNode *N,
3667                                     TargetLowering::DAGCombinerInfo &DCI,
3668                                     const AArch64Subtarget *Subtarget) {
3669  SelectionDAG &DAG = DCI.DAG;
3670  SDLoc DL(N);
3671  EVT VT = N->getValueType(0);
3672
3673  // First job is to hunt for a MaskedBFI on either the left or right. Swap
3674  // operands if it's actually on the right.
3675  SDValue BFI;
3676  SDValue PossExtraMask;
3677  uint64_t ExistingMask = 0;
3678  bool Extended = false;
3679  if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3680    PossExtraMask = N->getOperand(1);
3681  else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3682    PossExtraMask = N->getOperand(0);
3683  else
3684    return SDValue();
3685
3686  // We can only combine a BFI with another compatible mask.
3687  if (PossExtraMask.getOpcode() != ISD::AND ||
3688      !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3689    return SDValue();
3690
3691  uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3692
3693  // Masks must be compatible.
3694  if (ExtraMask & ExistingMask)
3695    return SDValue();
3696
3697  SDValue OldBFIVal = BFI.getOperand(0);
3698  SDValue NewBFIVal = BFI.getOperand(1);
3699  if (Extended) {
3700    // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3701    // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3702    // need to be made compatible.
3703    assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3704           && "Invalid types for BFI");
3705    OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3706    NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3707  }
3708
3709  // We need the MaskedBFI to be combined with a mask of the *same* value.
3710  if (PossExtraMask.getOperand(0) != OldBFIVal)
3711    return SDValue();
3712
3713  BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3714                    OldBFIVal, NewBFIVal,
3715                    BFI.getOperand(2), BFI.getOperand(3));
3716
3717  // If the masking is trivial, we don't need to create it.
3718  if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3719    return BFI;
3720
3721  return DAG.getNode(ISD::AND, DL, VT, BFI,
3722                     DAG.getConstant(ExtraMask | ExistingMask, VT));
3723}
3724
3725/// An EXTR instruction is made up of two shifts, ORed together. This helper
3726/// searches for and classifies those shifts.
3727static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3728                         bool &FromHi) {
3729  if (N.getOpcode() == ISD::SHL)
3730    FromHi = false;
3731  else if (N.getOpcode() == ISD::SRL)
3732    FromHi = true;
3733  else
3734    return false;
3735
3736  if (!isa<ConstantSDNode>(N.getOperand(1)))
3737    return false;
3738
3739  ShiftAmount = N->getConstantOperandVal(1);
3740  Src = N->getOperand(0);
3741  return true;
3742}
3743
3744/// EXTR instruction extracts a contiguous chunk of bits from two existing
3745/// registers viewed as a high/low pair. This function looks for the pattern:
3746/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3747/// EXTR. Can't quite be done in TableGen because the two immediates aren't
3748/// independent.
3749static SDValue tryCombineToEXTR(SDNode *N,
3750                                TargetLowering::DAGCombinerInfo &DCI) {
3751  SelectionDAG &DAG = DCI.DAG;
3752  SDLoc DL(N);
3753  EVT VT = N->getValueType(0);
3754
3755  assert(N->getOpcode() == ISD::OR && "Unexpected root");
3756
3757  if (VT != MVT::i32 && VT != MVT::i64)
3758    return SDValue();
3759
3760  SDValue LHS;
3761  uint32_t ShiftLHS = 0;
3762  bool LHSFromHi = 0;
3763  if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3764    return SDValue();
3765
3766  SDValue RHS;
3767  uint32_t ShiftRHS = 0;
3768  bool RHSFromHi = 0;
3769  if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3770    return SDValue();
3771
3772  // If they're both trying to come from the high part of the register, they're
3773  // not really an EXTR.
3774  if (LHSFromHi == RHSFromHi)
3775    return SDValue();
3776
3777  if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3778    return SDValue();
3779
3780  if (LHSFromHi) {
3781    std::swap(LHS, RHS);
3782    std::swap(ShiftLHS, ShiftRHS);
3783  }
3784
3785  return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3786                     LHS, RHS,
3787                     DAG.getConstant(ShiftRHS, MVT::i64));
3788}
3789
3790/// Target-specific dag combine xforms for ISD::OR
3791static SDValue PerformORCombine(SDNode *N,
3792                                TargetLowering::DAGCombinerInfo &DCI,
3793                                const AArch64Subtarget *Subtarget) {
3794
3795  SelectionDAG &DAG = DCI.DAG;
3796  SDLoc DL(N);
3797  EVT VT = N->getValueType(0);
3798
3799  if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3800    return SDValue();
3801
3802  // Attempt to recognise bitfield-insert operations.
3803  SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3804  if (Res.getNode())
3805    return Res;
3806
3807  // Attempt to combine an existing MaskedBFI operation into one with a larger
3808  // mask.
3809  Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3810  if (Res.getNode())
3811    return Res;
3812
3813  Res = tryCombineToEXTR(N, DCI);
3814  if (Res.getNode())
3815    return Res;
3816
3817  if (!Subtarget->hasNEON())
3818    return SDValue();
3819
3820  // Attempt to use vector immediate-form BSL
3821  // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3822
3823  SDValue N0 = N->getOperand(0);
3824  if (N0.getOpcode() != ISD::AND)
3825    return SDValue();
3826
3827  SDValue N1 = N->getOperand(1);
3828  if (N1.getOpcode() != ISD::AND)
3829    return SDValue();
3830
3831  if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3832    APInt SplatUndef;
3833    unsigned SplatBitSize;
3834    bool HasAnyUndefs;
3835    BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3836    APInt SplatBits0;
3837    if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3838                                      HasAnyUndefs) &&
3839        !HasAnyUndefs) {
3840      BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3841      APInt SplatBits1;
3842      if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3843                                        HasAnyUndefs) && !HasAnyUndefs &&
3844          SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
3845          SplatBits0 == ~SplatBits1) {
3846
3847        return DAG.getNode(ISD::VSELECT, DL, VT, N0->getOperand(1),
3848                           N0->getOperand(0), N1->getOperand(0));
3849      }
3850    }
3851  }
3852
3853  return SDValue();
3854}
3855
3856/// Target-specific dag combine xforms for ISD::SRA
3857static SDValue PerformSRACombine(SDNode *N,
3858                                 TargetLowering::DAGCombinerInfo &DCI) {
3859
3860  SelectionDAG &DAG = DCI.DAG;
3861  SDLoc DL(N);
3862  EVT VT = N->getValueType(0);
3863
3864  // We're looking for an SRA/SHL pair which form an SBFX.
3865
3866  if (VT != MVT::i32 && VT != MVT::i64)
3867    return SDValue();
3868
3869  if (!isa<ConstantSDNode>(N->getOperand(1)))
3870    return SDValue();
3871
3872  uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3873  SDValue Shift = N->getOperand(0);
3874
3875  if (Shift.getOpcode() != ISD::SHL)
3876    return SDValue();
3877
3878  if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3879    return SDValue();
3880
3881  uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3882  uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3883  uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3884
3885  if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3886    return SDValue();
3887
3888  return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3889                     DAG.getConstant(LSB, MVT::i64),
3890                     DAG.getConstant(LSB + Width - 1, MVT::i64));
3891}
3892
3893/// Check if this is a valid build_vector for the immediate operand of
3894/// a vector shift operation, where all the elements of the build_vector
3895/// must have the same constant integer value.
3896static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3897  // Ignore bit_converts.
3898  while (Op.getOpcode() == ISD::BITCAST)
3899    Op = Op.getOperand(0);
3900  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3901  APInt SplatBits, SplatUndef;
3902  unsigned SplatBitSize;
3903  bool HasAnyUndefs;
3904  if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3905                                      HasAnyUndefs, ElementBits) ||
3906      SplatBitSize > ElementBits)
3907    return false;
3908  Cnt = SplatBits.getSExtValue();
3909  return true;
3910}
3911
3912/// Check if this is a valid build_vector for the immediate operand of
3913/// a vector shift left operation.  That value must be in the range:
3914/// 0 <= Value < ElementBits
3915static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3916  assert(VT.isVector() && "vector shift count is not a vector type");
3917  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3918  if (!getVShiftImm(Op, ElementBits, Cnt))
3919    return false;
3920  return (Cnt >= 0 && Cnt < ElementBits);
3921}
3922
3923/// Check if this is a valid build_vector for the immediate operand of a
3924/// vector shift right operation. The value must be in the range:
3925///   1 <= Value <= ElementBits
3926static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
3927  assert(VT.isVector() && "vector shift count is not a vector type");
3928  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3929  if (!getVShiftImm(Op, ElementBits, Cnt))
3930    return false;
3931  return (Cnt >= 1 && Cnt <= ElementBits);
3932}
3933
3934static SDValue GenForSextInreg(SDNode *N,
3935                               TargetLowering::DAGCombinerInfo &DCI,
3936                               EVT SrcVT, EVT DestVT, EVT SubRegVT,
3937                               const int *Mask, SDValue Src) {
3938  SelectionDAG &DAG = DCI.DAG;
3939  SDValue Bitcast
3940    = DAG.getNode(ISD::BITCAST, SDLoc(N), SrcVT, Src);
3941  SDValue Sext
3942    = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), DestVT, Bitcast);
3943  SDValue ShuffleVec
3944    = DAG.getVectorShuffle(DestVT, SDLoc(N), Sext, DAG.getUNDEF(DestVT), Mask);
3945  SDValue ExtractSubreg
3946    = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N),
3947                SubRegVT, ShuffleVec,
3948                DAG.getTargetConstant(AArch64::sub_64, MVT::i32)), 0);
3949  return ExtractSubreg;
3950}
3951
3952/// Checks for vector shifts and lowers them.
3953static SDValue PerformShiftCombine(SDNode *N,
3954                                   TargetLowering::DAGCombinerInfo &DCI,
3955                                   const AArch64Subtarget *ST) {
3956  SelectionDAG &DAG = DCI.DAG;
3957  EVT VT = N->getValueType(0);
3958  if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
3959    return PerformSRACombine(N, DCI);
3960
3961  // We're looking for an SRA/SHL pair to help generating instruction
3962  //   sshll  v0.8h, v0.8b, #0
3963  // The instruction STXL is also the alias of this instruction.
3964  //
3965  // For example, for DAG like below,
3966  //   v2i32 = sra (v2i32 (shl v2i32, 16)), 16
3967  // we can transform it into
3968  //   v2i32 = EXTRACT_SUBREG
3969  //             (v4i32 (suffle_vector
3970  //                       (v4i32 (sext (v4i16 (bitcast v2i32))),
3971  //                       undef, (0, 2, u, u)),
3972  //             sub_64
3973  //
3974  // With this transformation we expect to generate "SSHLL + UZIP1"
3975  // Sometimes UZIP1 can be optimized away by combining with other context.
3976  int64_t ShrCnt, ShlCnt;
3977  if (N->getOpcode() == ISD::SRA
3978      && (VT == MVT::v2i32 || VT == MVT::v4i16)
3979      && isVShiftRImm(N->getOperand(1), VT, ShrCnt)
3980      && N->getOperand(0).getOpcode() == ISD::SHL
3981      && isVShiftRImm(N->getOperand(0).getOperand(1), VT, ShlCnt)) {
3982    SDValue Src = N->getOperand(0).getOperand(0);
3983    if (VT == MVT::v2i32 && ShrCnt == 16 && ShlCnt == 16) {
3984      // sext_inreg(v2i32, v2i16)
3985      // We essentially only care the Mask {0, 2, u, u}
3986      int Mask[4] = {0, 2, 4, 6};
3987      return GenForSextInreg(N, DCI, MVT::v4i16, MVT::v4i32, MVT::v2i32,
3988                             Mask, Src);
3989    }
3990    else if (VT == MVT::v2i32 && ShrCnt == 24 && ShlCnt == 24) {
3991      // sext_inreg(v2i16, v2i8)
3992      // We essentially only care the Mask {0, u, 4, u, u, u, u, u, u, u, u, u}
3993      int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
3994      return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v2i32,
3995                             Mask, Src);
3996    }
3997    else if (VT == MVT::v4i16 && ShrCnt == 8 && ShlCnt == 8) {
3998      // sext_inreg(v4i16, v4i8)
3999      // We essentially only care the Mask {0, 2, 4, 6, u, u, u, u, u, u, u, u}
4000      int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
4001      return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v4i16,
4002                             Mask, Src);
4003    }
4004  }
4005
4006  // Nothing to be done for scalar shifts.
4007  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4008  if (!VT.isVector() || !TLI.isTypeLegal(VT))
4009    return SDValue();
4010
4011  assert(ST->hasNEON() && "unexpected vector shift");
4012  int64_t Cnt;
4013
4014  switch (N->getOpcode()) {
4015  default:
4016    llvm_unreachable("unexpected shift opcode");
4017
4018  case ISD::SHL:
4019    if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
4020      SDValue RHS =
4021          DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
4022                      DAG.getConstant(Cnt, MVT::i32));
4023      return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
4024    }
4025    break;
4026
4027  case ISD::SRA:
4028  case ISD::SRL:
4029    if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
4030      SDValue RHS =
4031          DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
4032                      DAG.getConstant(Cnt, MVT::i32));
4033      return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
4034    }
4035    break;
4036  }
4037
4038  return SDValue();
4039}
4040
4041/// ARM-specific DAG combining for intrinsics.
4042static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
4043  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4044
4045  switch (IntNo) {
4046  default:
4047    // Don't do anything for most intrinsics.
4048    break;
4049
4050  case Intrinsic::arm_neon_vqshifts:
4051  case Intrinsic::arm_neon_vqshiftu:
4052    EVT VT = N->getOperand(1).getValueType();
4053    int64_t Cnt;
4054    if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
4055      break;
4056    unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
4057                             ? AArch64ISD::NEON_QSHLs
4058                             : AArch64ISD::NEON_QSHLu;
4059    return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
4060                       N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
4061  }
4062
4063  return SDValue();
4064}
4065
4066/// Target-specific DAG combine function for NEON load/store intrinsics
4067/// to merge base address updates.
4068static SDValue CombineBaseUpdate(SDNode *N,
4069                                 TargetLowering::DAGCombinerInfo &DCI) {
4070  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
4071    return SDValue();
4072
4073  SelectionDAG &DAG = DCI.DAG;
4074  bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
4075                      N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
4076  unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
4077  SDValue Addr = N->getOperand(AddrOpIdx);
4078
4079  // Search for a use of the address operand that is an increment.
4080  for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
4081       UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
4082    SDNode *User = *UI;
4083    if (User->getOpcode() != ISD::ADD ||
4084        UI.getUse().getResNo() != Addr.getResNo())
4085      continue;
4086
4087    // Check that the add is independent of the load/store.  Otherwise, folding
4088    // it would create a cycle.
4089    if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
4090      continue;
4091
4092    // Find the new opcode for the updating load/store.
4093    bool isLoad = true;
4094    bool isLaneOp = false;
4095    unsigned NewOpc = 0;
4096    unsigned NumVecs = 0;
4097    if (isIntrinsic) {
4098      unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4099      switch (IntNo) {
4100      default: llvm_unreachable("unexpected intrinsic for Neon base update");
4101      case Intrinsic::arm_neon_vld1:       NewOpc = AArch64ISD::NEON_LD1_UPD;
4102        NumVecs = 1; break;
4103      case Intrinsic::arm_neon_vld2:       NewOpc = AArch64ISD::NEON_LD2_UPD;
4104        NumVecs = 2; break;
4105      case Intrinsic::arm_neon_vld3:       NewOpc = AArch64ISD::NEON_LD3_UPD;
4106        NumVecs = 3; break;
4107      case Intrinsic::arm_neon_vld4:       NewOpc = AArch64ISD::NEON_LD4_UPD;
4108        NumVecs = 4; break;
4109      case Intrinsic::arm_neon_vst1:       NewOpc = AArch64ISD::NEON_ST1_UPD;
4110        NumVecs = 1; isLoad = false; break;
4111      case Intrinsic::arm_neon_vst2:       NewOpc = AArch64ISD::NEON_ST2_UPD;
4112        NumVecs = 2; isLoad = false; break;
4113      case Intrinsic::arm_neon_vst3:       NewOpc = AArch64ISD::NEON_ST3_UPD;
4114        NumVecs = 3; isLoad = false; break;
4115      case Intrinsic::arm_neon_vst4:       NewOpc = AArch64ISD::NEON_ST4_UPD;
4116        NumVecs = 4; isLoad = false; break;
4117      case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
4118        NumVecs = 2; break;
4119      case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
4120        NumVecs = 3; break;
4121      case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
4122        NumVecs = 4; break;
4123      case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
4124        NumVecs = 2; isLoad = false; break;
4125      case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
4126        NumVecs = 3; isLoad = false; break;
4127      case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
4128        NumVecs = 4; isLoad = false; break;
4129      case Intrinsic::arm_neon_vld2lane:   NewOpc = AArch64ISD::NEON_LD2LN_UPD;
4130        NumVecs = 2; isLaneOp = true; break;
4131      case Intrinsic::arm_neon_vld3lane:   NewOpc = AArch64ISD::NEON_LD3LN_UPD;
4132        NumVecs = 3; isLaneOp = true; break;
4133      case Intrinsic::arm_neon_vld4lane:   NewOpc = AArch64ISD::NEON_LD4LN_UPD;
4134        NumVecs = 4; isLaneOp = true; break;
4135      case Intrinsic::arm_neon_vst2lane:   NewOpc = AArch64ISD::NEON_ST2LN_UPD;
4136        NumVecs = 2; isLoad = false; isLaneOp = true; break;
4137      case Intrinsic::arm_neon_vst3lane:   NewOpc = AArch64ISD::NEON_ST3LN_UPD;
4138        NumVecs = 3; isLoad = false; isLaneOp = true; break;
4139      case Intrinsic::arm_neon_vst4lane:   NewOpc = AArch64ISD::NEON_ST4LN_UPD;
4140        NumVecs = 4; isLoad = false; isLaneOp = true; break;
4141      }
4142    } else {
4143      isLaneOp = true;
4144      switch (N->getOpcode()) {
4145      default: llvm_unreachable("unexpected opcode for Neon base update");
4146      case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
4147        NumVecs = 2; break;
4148      case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
4149        NumVecs = 3; break;
4150      case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
4151        NumVecs = 4; break;
4152      }
4153    }
4154
4155    // Find the size of memory referenced by the load/store.
4156    EVT VecTy;
4157    if (isLoad)
4158      VecTy = N->getValueType(0);
4159    else
4160      VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
4161    unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
4162    if (isLaneOp)
4163      NumBytes /= VecTy.getVectorNumElements();
4164
4165    // If the increment is a constant, it must match the memory ref size.
4166    SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
4167    if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
4168      uint32_t IncVal = CInc->getZExtValue();
4169      if (IncVal != NumBytes)
4170        continue;
4171      Inc = DAG.getTargetConstant(IncVal, MVT::i32);
4172    }
4173
4174    // Create the new updating load/store node.
4175    EVT Tys[6];
4176    unsigned NumResultVecs = (isLoad ? NumVecs : 0);
4177    unsigned n;
4178    for (n = 0; n < NumResultVecs; ++n)
4179      Tys[n] = VecTy;
4180    Tys[n++] = MVT::i64;
4181    Tys[n] = MVT::Other;
4182    SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
4183    SmallVector<SDValue, 8> Ops;
4184    Ops.push_back(N->getOperand(0)); // incoming chain
4185    Ops.push_back(N->getOperand(AddrOpIdx));
4186    Ops.push_back(Inc);
4187    for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
4188      Ops.push_back(N->getOperand(i));
4189    }
4190    MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
4191    SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
4192                                           Ops.data(), Ops.size(),
4193                                           MemInt->getMemoryVT(),
4194                                           MemInt->getMemOperand());
4195
4196    // Update the uses.
4197    std::vector<SDValue> NewResults;
4198    for (unsigned i = 0; i < NumResultVecs; ++i) {
4199      NewResults.push_back(SDValue(UpdN.getNode(), i));
4200    }
4201    NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
4202    DCI.CombineTo(N, NewResults);
4203    DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
4204
4205    break;
4206  }
4207  return SDValue();
4208}
4209
4210/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
4211/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
4212/// If so, combine them to a vldN-dup operation and return true.
4213static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
4214  SelectionDAG &DAG = DCI.DAG;
4215  EVT VT = N->getValueType(0);
4216
4217  // Check if the VDUPLANE operand is a vldN-dup intrinsic.
4218  SDNode *VLD = N->getOperand(0).getNode();
4219  if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
4220    return SDValue();
4221  unsigned NumVecs = 0;
4222  unsigned NewOpc = 0;
4223  unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
4224  if (IntNo == Intrinsic::arm_neon_vld2lane) {
4225    NumVecs = 2;
4226    NewOpc = AArch64ISD::NEON_LD2DUP;
4227  } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
4228    NumVecs = 3;
4229    NewOpc = AArch64ISD::NEON_LD3DUP;
4230  } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
4231    NumVecs = 4;
4232    NewOpc = AArch64ISD::NEON_LD4DUP;
4233  } else {
4234    return SDValue();
4235  }
4236
4237  // First check that all the vldN-lane uses are VDUPLANEs and that the lane
4238  // numbers match the load.
4239  unsigned VLDLaneNo =
4240      cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
4241  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4242       UI != UE; ++UI) {
4243    // Ignore uses of the chain result.
4244    if (UI.getUse().getResNo() == NumVecs)
4245      continue;
4246    SDNode *User = *UI;
4247    if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
4248        VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
4249      return SDValue();
4250  }
4251
4252  // Create the vldN-dup node.
4253  EVT Tys[5];
4254  unsigned n;
4255  for (n = 0; n < NumVecs; ++n)
4256    Tys[n] = VT;
4257  Tys[n] = MVT::Other;
4258  SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1);
4259  SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
4260  MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
4261  SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
4262                                           VLDMemInt->getMemoryVT(),
4263                                           VLDMemInt->getMemOperand());
4264
4265  // Update the uses.
4266  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4267       UI != UE; ++UI) {
4268    unsigned ResNo = UI.getUse().getResNo();
4269    // Ignore uses of the chain result.
4270    if (ResNo == NumVecs)
4271      continue;
4272    SDNode *User = *UI;
4273    DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
4274  }
4275
4276  // Now the vldN-lane intrinsic is dead except for its chain result.
4277  // Update uses of the chain.
4278  std::vector<SDValue> VLDDupResults;
4279  for (unsigned n = 0; n < NumVecs; ++n)
4280    VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
4281  VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
4282  DCI.CombineTo(VLD, VLDDupResults);
4283
4284  return SDValue(N, 0);
4285}
4286
4287// v1i1 setcc ->
4288//     v1i1 (bitcast (i1 setcc (extract_vector_elt, extract_vector_elt))
4289// FIXME: Currently the type legalizer can't handle SETCC having v1i1 as result.
4290// If it can legalize "v1i1 SETCC" correctly, no need to combine such SETCC.
4291static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) {
4292  EVT ResVT = N->getValueType(0);
4293
4294  if (!ResVT.isVector() || ResVT.getVectorNumElements() != 1 ||
4295      ResVT.getVectorElementType() != MVT::i1)
4296    return SDValue();
4297
4298  SDValue LHS = N->getOperand(0);
4299  SDValue RHS = N->getOperand(1);
4300  EVT CmpVT = LHS.getValueType();
4301  LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
4302                    CmpVT.getVectorElementType(), LHS,
4303                    DAG.getConstant(0, MVT::i64));
4304  RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N),
4305                    CmpVT.getVectorElementType(), RHS,
4306                    DAG.getConstant(0, MVT::i64));
4307  SDValue SetCC =
4308      DAG.getSetCC(SDLoc(N), MVT::i1, LHS, RHS,
4309                   cast<CondCodeSDNode>(N->getOperand(2))->get());
4310  return DAG.getNode(ISD::BITCAST, SDLoc(N), ResVT, SetCC);
4311}
4312
4313// vselect (v1i1 setcc) ->
4314//     vselect (v1iXX setcc)  (XX is the size of the compared operand type)
4315// FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as
4316// condition. If it can legalize "VSELECT v1i1" correctly, no need to combine
4317// such VSELECT.
4318static SDValue PerformVSelectCombine(SDNode *N, SelectionDAG &DAG) {
4319  SDValue N0 = N->getOperand(0);
4320  EVT CCVT = N0.getValueType();
4321
4322  if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorNumElements() != 1 ||
4323      CCVT.getVectorElementType() != MVT::i1)
4324    return SDValue();
4325
4326  EVT ResVT = N->getValueType(0);
4327  EVT CmpVT = N0.getOperand(0).getValueType();
4328  // Only combine when the result type is of the same size as the compared
4329  // operands.
4330  if (ResVT.getSizeInBits() != CmpVT.getSizeInBits())
4331    return SDValue();
4332
4333  SDValue IfTrue = N->getOperand(1);
4334  SDValue IfFalse = N->getOperand(2);
4335  SDValue SetCC =
4336      DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
4337                   N0.getOperand(0), N0.getOperand(1),
4338                   cast<CondCodeSDNode>(N0.getOperand(2))->get());
4339  return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC,
4340                     IfTrue, IfFalse);
4341}
4342
4343// sign_extend (extract_vector_elt (v1i1 setcc)) ->
4344//     extract_vector_elt (v1iXX setcc)
4345// (XX is the size of the compared operand type)
4346static SDValue PerformSignExtendCombine(SDNode *N, SelectionDAG &DAG) {
4347  SDValue N0 = N->getOperand(0);
4348  SDValue Vec = N0.getOperand(0);
4349
4350  if (N0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4351      Vec.getOpcode() != ISD::SETCC)
4352    return SDValue();
4353
4354  EVT ResVT = N->getValueType(0);
4355  EVT CmpVT = Vec.getOperand(0).getValueType();
4356  // Only optimize when the result type is of the same size as the element
4357  // type of the compared operand.
4358  if (ResVT.getSizeInBits() != CmpVT.getVectorElementType().getSizeInBits())
4359    return SDValue();
4360
4361  SDValue Lane = N0.getOperand(1);
4362  SDValue SetCC =
4363      DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(),
4364                   Vec.getOperand(0), Vec.getOperand(1),
4365                   cast<CondCodeSDNode>(Vec.getOperand(2))->get());
4366  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ResVT,
4367                     SetCC, Lane);
4368}
4369
4370SDValue
4371AArch64TargetLowering::PerformDAGCombine(SDNode *N,
4372                                         DAGCombinerInfo &DCI) const {
4373  switch (N->getOpcode()) {
4374  default: break;
4375  case ISD::AND: return PerformANDCombine(N, DCI);
4376  case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
4377  case ISD::SHL:
4378  case ISD::SRA:
4379  case ISD::SRL:
4380    return PerformShiftCombine(N, DCI, getSubtarget());
4381  case ISD::SETCC: return PerformSETCCCombine(N, DCI.DAG);
4382  case ISD::VSELECT: return PerformVSelectCombine(N, DCI.DAG);
4383  case ISD::SIGN_EXTEND: return PerformSignExtendCombine(N, DCI.DAG);
4384  case ISD::INTRINSIC_WO_CHAIN:
4385    return PerformIntrinsicCombine(N, DCI.DAG);
4386  case AArch64ISD::NEON_VDUPLANE:
4387    return CombineVLDDUP(N, DCI);
4388  case AArch64ISD::NEON_LD2DUP:
4389  case AArch64ISD::NEON_LD3DUP:
4390  case AArch64ISD::NEON_LD4DUP:
4391    return CombineBaseUpdate(N, DCI);
4392  case ISD::INTRINSIC_VOID:
4393  case ISD::INTRINSIC_W_CHAIN:
4394    switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
4395    case Intrinsic::arm_neon_vld1:
4396    case Intrinsic::arm_neon_vld2:
4397    case Intrinsic::arm_neon_vld3:
4398    case Intrinsic::arm_neon_vld4:
4399    case Intrinsic::arm_neon_vst1:
4400    case Intrinsic::arm_neon_vst2:
4401    case Intrinsic::arm_neon_vst3:
4402    case Intrinsic::arm_neon_vst4:
4403    case Intrinsic::arm_neon_vld2lane:
4404    case Intrinsic::arm_neon_vld3lane:
4405    case Intrinsic::arm_neon_vld4lane:
4406    case Intrinsic::aarch64_neon_vld1x2:
4407    case Intrinsic::aarch64_neon_vld1x3:
4408    case Intrinsic::aarch64_neon_vld1x4:
4409    case Intrinsic::aarch64_neon_vst1x2:
4410    case Intrinsic::aarch64_neon_vst1x3:
4411    case Intrinsic::aarch64_neon_vst1x4:
4412    case Intrinsic::arm_neon_vst2lane:
4413    case Intrinsic::arm_neon_vst3lane:
4414    case Intrinsic::arm_neon_vst4lane:
4415      return CombineBaseUpdate(N, DCI);
4416    default:
4417      break;
4418    }
4419  }
4420  return SDValue();
4421}
4422
4423bool
4424AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
4425  VT = VT.getScalarType();
4426
4427  if (!VT.isSimple())
4428    return false;
4429
4430  switch (VT.getSimpleVT().SimpleTy) {
4431  case MVT::f16:
4432  case MVT::f32:
4433  case MVT::f64:
4434    return true;
4435  case MVT::f128:
4436    return false;
4437  default:
4438    break;
4439  }
4440
4441  return false;
4442}
4443// Check whether a shuffle_vector could be presented as concat_vector.
4444bool AArch64TargetLowering::isConcatVector(SDValue Op, SelectionDAG &DAG,
4445                                           SDValue V0, SDValue V1,
4446                                           const int *Mask,
4447                                           SDValue &Res) const {
4448  SDLoc DL(Op);
4449  EVT VT = Op.getValueType();
4450  if (VT.getSizeInBits() != 128)
4451    return false;
4452  if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() ||
4453      VT.getVectorElementType() != V1.getValueType().getVectorElementType())
4454    return false;
4455
4456  unsigned NumElts = VT.getVectorNumElements();
4457  bool isContactVector = true;
4458  bool splitV0 = false;
4459  if (V0.getValueType().getSizeInBits() == 128)
4460    splitV0 = true;
4461
4462  for (int I = 0, E = NumElts / 2; I != E; I++) {
4463    if (Mask[I] != I) {
4464      isContactVector = false;
4465      break;
4466    }
4467  }
4468
4469  if (isContactVector) {
4470    int offset = NumElts / 2;
4471    for (int I = NumElts / 2, E = NumElts; I != E; I++) {
4472      if (Mask[I] != I + splitV0 * offset) {
4473        isContactVector = false;
4474        break;
4475      }
4476    }
4477  }
4478
4479  if (isContactVector) {
4480    EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
4481                                  NumElts / 2);
4482    if (splitV0) {
4483      V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
4484                       DAG.getConstant(0, MVT::i64));
4485    }
4486    if (V1.getValueType().getSizeInBits() == 128) {
4487      V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
4488                       DAG.getConstant(0, MVT::i64));
4489    }
4490    Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
4491    return true;
4492  }
4493  return false;
4494}
4495
4496// Check whether a Build Vector could be presented as Shuffle Vector.
4497// This Shuffle Vector maybe not legalized, so the length of its operand and
4498// the length of result may not equal.
4499bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
4500                                                 SDValue &V0, SDValue &V1,
4501                                                 int *Mask) const {
4502  SDLoc DL(Op);
4503  EVT VT = Op.getValueType();
4504  unsigned NumElts = VT.getVectorNumElements();
4505  unsigned V0NumElts = 0;
4506
4507  // Check if all elements are extracted from less than 3 vectors.
4508  for (unsigned i = 0; i < NumElts; ++i) {
4509    SDValue Elt = Op.getOperand(i);
4510    if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4511        Elt.getOperand(0).getValueType().getVectorElementType() !=
4512            VT.getVectorElementType())
4513      return false;
4514
4515    if (V0.getNode() == 0) {
4516      V0 = Elt.getOperand(0);
4517      V0NumElts = V0.getValueType().getVectorNumElements();
4518    }
4519    if (Elt.getOperand(0) == V0) {
4520      Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
4521      continue;
4522    } else if (V1.getNode() == 0) {
4523      V1 = Elt.getOperand(0);
4524    }
4525    if (Elt.getOperand(0) == V1) {
4526      unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
4527      Mask[i] = (Lane + V0NumElts);
4528      continue;
4529    } else {
4530      return false;
4531    }
4532  }
4533  return true;
4534}
4535
4536// LowerShiftRightParts - Lower SRL_PARTS and SRA_PARTS, which returns two
4537/// i64 values and take a 2 x i64 value to shift plus a shift amount.
4538SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op,
4539                                                SelectionDAG &DAG) const {
4540  assert(Op.getNumOperands() == 3 && "Not a quad-shift!");
4541  EVT VT = Op.getValueType();
4542  unsigned VTBits = VT.getSizeInBits();
4543  SDLoc dl(Op);
4544  SDValue ShOpLo = Op.getOperand(0);
4545  SDValue ShOpHi = Op.getOperand(1);
4546  SDValue ShAmt  = Op.getOperand(2);
4547  unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4548
4549  assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4550  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
4551                                 DAG.getConstant(VTBits, MVT::i64), ShAmt);
4552  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4553  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
4554                                   DAG.getConstant(VTBits, MVT::i64));
4555  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4556  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4557  SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4558  SDValue Tmp3 = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4559
4560  SDValue A64cc;
4561  SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt,
4562                                        DAG.getConstant(0, MVT::i64),
4563                                        ISD::SETGE, A64cc,
4564                                        DAG, dl);
4565
4566  SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4567                           DAG.getConstant(0, Tmp3.getValueType()), Tmp3,
4568                           A64cc);
4569  SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4570                           TrueVal, FalseVal, A64cc);
4571
4572  SDValue Ops[2] = { Lo, Hi };
4573  return DAG.getMergeValues(Ops, 2, dl);
4574}
4575
4576/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4577/// i64 values and take a 2 x i64 value to shift plus a shift amount.
4578SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op,
4579                                               SelectionDAG &DAG) const {
4580  assert(Op.getNumOperands() == 3 && "Not a quad-shift!");
4581  EVT VT = Op.getValueType();
4582  unsigned VTBits = VT.getSizeInBits();
4583  SDLoc dl(Op);
4584  SDValue ShOpLo = Op.getOperand(0);
4585  SDValue ShOpHi = Op.getOperand(1);
4586  SDValue ShAmt  = Op.getOperand(2);
4587
4588  assert(Op.getOpcode() == ISD::SHL_PARTS);
4589  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
4590                                 DAG.getConstant(VTBits, MVT::i64), ShAmt);
4591  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4592  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
4593                                   DAG.getConstant(VTBits, MVT::i64));
4594  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4595  SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4596  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4597  SDValue Tmp4 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4598
4599  SDValue A64cc;
4600  SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt,
4601                                        DAG.getConstant(0, MVT::i64),
4602                                        ISD::SETGE, A64cc,
4603                                        DAG, dl);
4604
4605  SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4606                           DAG.getConstant(0, Tmp4.getValueType()), Tmp4,
4607                           A64cc);
4608  SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
4609                           Tmp3, FalseVal, A64cc);
4610
4611  SDValue Ops[2] = { Lo, Hi };
4612  return DAG.getMergeValues(Ops, 2, dl);
4613}
4614
4615// If this is a case we can't handle, return null and let the default
4616// expansion code take care of it.
4617SDValue
4618AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4619                                         const AArch64Subtarget *ST) const {
4620
4621  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
4622  SDLoc DL(Op);
4623  EVT VT = Op.getValueType();
4624
4625  APInt SplatBits, SplatUndef;
4626  unsigned SplatBitSize;
4627  bool HasAnyUndefs;
4628
4629  unsigned UseNeonMov = VT.getSizeInBits() >= 64;
4630
4631  // Note we favor lowering MOVI over MVNI.
4632  // This has implications on the definition of patterns in TableGen to select
4633  // BIC immediate instructions but not ORR immediate instructions.
4634  // If this lowering order is changed, TableGen patterns for BIC immediate and
4635  // ORR immediate instructions have to be updated.
4636  if (UseNeonMov &&
4637      BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4638    if (SplatBitSize <= 64) {
4639      // First attempt to use vector immediate-form MOVI
4640      EVT NeonMovVT;
4641      unsigned Imm = 0;
4642      unsigned OpCmode = 0;
4643
4644      if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
4645                            SplatBitSize, DAG, VT.is128BitVector(),
4646                            Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
4647        SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4648        SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4649
4650        if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4651          SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
4652                                        ImmVal, OpCmodeVal);
4653          return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4654        }
4655      }
4656
4657      // Then attempt to use vector immediate-form MVNI
4658      uint64_t NegatedImm = (~SplatBits).getZExtValue();
4659      if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
4660                            DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
4661                            Imm, OpCmode)) {
4662        SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4663        SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4664        if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4665          SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
4666                                        ImmVal, OpCmodeVal);
4667          return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4668        }
4669      }
4670
4671      // Attempt to use vector immediate-form FMOV
4672      if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
4673          (VT == MVT::v2f64 && SplatBitSize == 64)) {
4674        APFloat RealVal(
4675            SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
4676            SplatBits);
4677        uint32_t ImmVal;
4678        if (A64Imms::isFPImm(RealVal, ImmVal)) {
4679          SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
4680          return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
4681        }
4682      }
4683    }
4684  }
4685
4686  unsigned NumElts = VT.getVectorNumElements();
4687  bool isOnlyLowElement = true;
4688  bool usesOnlyOneValue = true;
4689  bool hasDominantValue = false;
4690  bool isConstant = true;
4691
4692  // Map of the number of times a particular SDValue appears in the
4693  // element list.
4694  DenseMap<SDValue, unsigned> ValueCounts;
4695  SDValue Value;
4696  for (unsigned i = 0; i < NumElts; ++i) {
4697    SDValue V = Op.getOperand(i);
4698    if (V.getOpcode() == ISD::UNDEF)
4699      continue;
4700    if (i > 0)
4701      isOnlyLowElement = false;
4702    if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
4703      isConstant = false;
4704
4705    ValueCounts.insert(std::make_pair(V, 0));
4706    unsigned &Count = ValueCounts[V];
4707
4708    // Is this value dominant? (takes up more than half of the lanes)
4709    if (++Count > (NumElts / 2)) {
4710      hasDominantValue = true;
4711      Value = V;
4712    }
4713  }
4714  if (ValueCounts.size() != 1)
4715    usesOnlyOneValue = false;
4716  if (!Value.getNode() && ValueCounts.size() > 0)
4717    Value = ValueCounts.begin()->first;
4718
4719  if (ValueCounts.size() == 0)
4720    return DAG.getUNDEF(VT);
4721
4722  if (isOnlyLowElement)
4723    return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4724
4725  unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4726  if (hasDominantValue && EltSize <= 64) {
4727    // Use VDUP for non-constant splats.
4728    if (!isConstant) {
4729      SDValue N;
4730
4731      // If we are DUPing a value that comes directly from a vector, we could
4732      // just use DUPLANE. We can only do this if the lane being extracted
4733      // is at a constant index, as the DUP from lane instructions only have
4734      // constant-index forms.
4735      //
4736      // If there is a TRUNCATE between EXTRACT_VECTOR_ELT and DUP, we can
4737      // remove TRUNCATE for DUPLANE by apdating the source vector to
4738      // appropriate vector type and lane index.
4739      //
4740      // FIXME: for now we have v1i8, v1i16, v1i32 legal vector types, if they
4741      // are not legal any more, no need to check the type size in bits should
4742      // be large than 64.
4743      SDValue V = Value;
4744      if (Value->getOpcode() == ISD::TRUNCATE)
4745        V = Value->getOperand(0);
4746      if (V->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4747          isa<ConstantSDNode>(V->getOperand(1)) &&
4748          V->getOperand(0).getValueType().getSizeInBits() >= 64) {
4749
4750        // If the element size of source vector is larger than DUPLANE
4751        // element size, we can do transformation by,
4752        // 1) bitcasting source register to smaller element vector
4753        // 2) mutiplying the lane index by SrcEltSize/ResEltSize
4754        // For example, we can lower
4755        //     "v8i16 vdup_lane(v4i32, 1)"
4756        // to be
4757        //     "v8i16 vdup_lane(v8i16 bitcast(v4i32), 2)".
4758        SDValue SrcVec = V->getOperand(0);
4759        unsigned SrcEltSize =
4760            SrcVec.getValueType().getVectorElementType().getSizeInBits();
4761        unsigned ResEltSize = VT.getVectorElementType().getSizeInBits();
4762        if (SrcEltSize > ResEltSize) {
4763          assert((SrcEltSize % ResEltSize == 0) && "Invalid element size");
4764          SDValue BitCast;
4765          unsigned SrcSize = SrcVec.getValueType().getSizeInBits();
4766          unsigned ResSize = VT.getSizeInBits();
4767
4768          if (SrcSize > ResSize) {
4769            assert((SrcSize % ResSize == 0) && "Invalid vector size");
4770            EVT CastVT =
4771                EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
4772                                 SrcSize / ResEltSize);
4773            BitCast = DAG.getNode(ISD::BITCAST, DL, CastVT, SrcVec);
4774          } else {
4775            assert((SrcSize == ResSize) && "Invalid vector size of source vec");
4776            BitCast = DAG.getNode(ISD::BITCAST, DL, VT, SrcVec);
4777          }
4778
4779          unsigned LaneIdx = V->getConstantOperandVal(1);
4780          SDValue Lane =
4781              DAG.getConstant((SrcEltSize / ResEltSize) * LaneIdx, MVT::i64);
4782          N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, BitCast, Lane);
4783        } else {
4784          assert((SrcEltSize == ResEltSize) &&
4785                 "Invalid element size of source vec");
4786          N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, V->getOperand(0),
4787                          V->getOperand(1));
4788        }
4789      } else
4790        N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4791
4792      if (!usesOnlyOneValue) {
4793        // The dominant value was splatted as 'N', but we now have to insert
4794        // all differing elements.
4795        for (unsigned I = 0; I < NumElts; ++I) {
4796          if (Op.getOperand(I) == Value)
4797            continue;
4798          SmallVector<SDValue, 3> Ops;
4799          Ops.push_back(N);
4800          Ops.push_back(Op.getOperand(I));
4801          Ops.push_back(DAG.getConstant(I, MVT::i64));
4802          N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
4803        }
4804      }
4805      return N;
4806    }
4807    if (usesOnlyOneValue && isConstant) {
4808      return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4809    }
4810  }
4811  // If all elements are constants and the case above didn't get hit, fall back
4812  // to the default expansion, which will generate a load from the constant
4813  // pool.
4814  if (isConstant)
4815    return SDValue();
4816
4817  // Try to lower this in lowering ShuffleVector way.
4818  SDValue V0, V1;
4819  int Mask[16];
4820  if (isKnownShuffleVector(Op, DAG, V0, V1, Mask)) {
4821    unsigned V0NumElts = V0.getValueType().getVectorNumElements();
4822    if (!V1.getNode() && V0NumElts == NumElts * 2) {
4823      V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4824                       DAG.getConstant(NumElts, MVT::i64));
4825      V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4826                       DAG.getConstant(0, MVT::i64));
4827      V0NumElts = V0.getValueType().getVectorNumElements();
4828    }
4829
4830    if (V1.getNode() && NumElts == V0NumElts &&
4831        V0NumElts == V1.getValueType().getVectorNumElements()) {
4832      SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
4833      if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE)
4834        return Shuffle;
4835      else
4836        return LowerVECTOR_SHUFFLE(Shuffle, DAG);
4837    } else {
4838      SDValue Res;
4839      if (isConcatVector(Op, DAG, V0, V1, Mask, Res))
4840        return Res;
4841    }
4842  }
4843
4844  // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
4845  // know the default expansion would otherwise fall back on something even
4846  // worse. For a vector with one or two non-undef values, that's
4847  // scalar_to_vector for the elements followed by a shuffle (provided the
4848  // shuffle is valid for the target) and materialization element by element
4849  // on the stack followed by a load for everything else.
4850  if (!isConstant && !usesOnlyOneValue) {
4851    SDValue Vec = DAG.getUNDEF(VT);
4852    for (unsigned i = 0 ; i < NumElts; ++i) {
4853      SDValue V = Op.getOperand(i);
4854      if (V.getOpcode() == ISD::UNDEF)
4855        continue;
4856      SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
4857      Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
4858    }
4859    return Vec;
4860  }
4861  return SDValue();
4862}
4863
4864/// isREVMask - Check if a vector shuffle corresponds to a REV
4865/// instruction with the specified blocksize.  (The order of the elements
4866/// within each block of the vector is reversed.)
4867static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
4868  assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4869         "Only possible block sizes for REV are: 16, 32, 64");
4870
4871  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4872  if (EltSz == 64)
4873    return false;
4874
4875  unsigned NumElts = VT.getVectorNumElements();
4876  unsigned BlockElts = M[0] + 1;
4877  // If the first shuffle index is UNDEF, be optimistic.
4878  if (M[0] < 0)
4879    BlockElts = BlockSize / EltSz;
4880
4881  if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4882    return false;
4883
4884  for (unsigned i = 0; i < NumElts; ++i) {
4885    if (M[i] < 0)
4886      continue; // ignore UNDEF indices
4887    if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4888      return false;
4889  }
4890
4891  return true;
4892}
4893
4894// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
4895// TRN instruction.
4896static unsigned isPermuteMask(ArrayRef<int> M, EVT VT, bool isV2undef) {
4897  unsigned NumElts = VT.getVectorNumElements();
4898  if (NumElts < 4)
4899    return 0;
4900
4901  bool ismatch = true;
4902
4903  // Check UZP1
4904  for (unsigned i = 0; i < NumElts; ++i) {
4905    unsigned answer = i * 2;
4906    if (isV2undef && answer >= NumElts)
4907      answer -= NumElts;
4908    if (M[i] != -1 && (unsigned)M[i] != answer) {
4909      ismatch = false;
4910      break;
4911    }
4912  }
4913  if (ismatch)
4914    return AArch64ISD::NEON_UZP1;
4915
4916  // Check UZP2
4917  ismatch = true;
4918  for (unsigned i = 0; i < NumElts; ++i) {
4919    unsigned answer = i * 2 + 1;
4920    if (isV2undef && answer >= NumElts)
4921      answer -= NumElts;
4922    if (M[i] != -1 && (unsigned)M[i] != answer) {
4923      ismatch = false;
4924      break;
4925    }
4926  }
4927  if (ismatch)
4928    return AArch64ISD::NEON_UZP2;
4929
4930  // Check ZIP1
4931  ismatch = true;
4932  for (unsigned i = 0; i < NumElts; ++i) {
4933    unsigned answer = i / 2 + NumElts * (i % 2);
4934    if (isV2undef && answer >= NumElts)
4935      answer -= NumElts;
4936    if (M[i] != -1 && (unsigned)M[i] != answer) {
4937      ismatch = false;
4938      break;
4939    }
4940  }
4941  if (ismatch)
4942    return AArch64ISD::NEON_ZIP1;
4943
4944  // Check ZIP2
4945  ismatch = true;
4946  for (unsigned i = 0; i < NumElts; ++i) {
4947    unsigned answer = (NumElts + i) / 2 + NumElts * (i % 2);
4948    if (isV2undef && answer >= NumElts)
4949      answer -= NumElts;
4950    if (M[i] != -1 && (unsigned)M[i] != answer) {
4951      ismatch = false;
4952      break;
4953    }
4954  }
4955  if (ismatch)
4956    return AArch64ISD::NEON_ZIP2;
4957
4958  // Check TRN1
4959  ismatch = true;
4960  for (unsigned i = 0; i < NumElts; ++i) {
4961    unsigned answer = i + (NumElts - 1) * (i % 2);
4962    if (isV2undef && answer >= NumElts)
4963      answer -= NumElts;
4964    if (M[i] != -1 && (unsigned)M[i] != answer) {
4965      ismatch = false;
4966      break;
4967    }
4968  }
4969  if (ismatch)
4970    return AArch64ISD::NEON_TRN1;
4971
4972  // Check TRN2
4973  ismatch = true;
4974  for (unsigned i = 0; i < NumElts; ++i) {
4975    unsigned answer = 1 + i + (NumElts - 1) * (i % 2);
4976    if (isV2undef && answer >= NumElts)
4977      answer -= NumElts;
4978    if (M[i] != -1 && (unsigned)M[i] != answer) {
4979      ismatch = false;
4980      break;
4981    }
4982  }
4983  if (ismatch)
4984    return AArch64ISD::NEON_TRN2;
4985
4986  return 0;
4987}
4988
4989SDValue
4990AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
4991                                           SelectionDAG &DAG) const {
4992  SDValue V1 = Op.getOperand(0);
4993  SDValue V2 = Op.getOperand(1);
4994  SDLoc dl(Op);
4995  EVT VT = Op.getValueType();
4996  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4997
4998  // Convert shuffles that are directly supported on NEON to target-specific
4999  // DAG nodes, instead of keeping them as shuffles and matching them again
5000  // during code selection.  This is more efficient and avoids the possibility
5001  // of inconsistencies between legalization and selection.
5002  ArrayRef<int> ShuffleMask = SVN->getMask();
5003
5004  unsigned EltSize = VT.getVectorElementType().getSizeInBits();
5005  if (EltSize > 64)
5006    return SDValue();
5007
5008  if (isREVMask(ShuffleMask, VT, 64))
5009    return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
5010  if (isREVMask(ShuffleMask, VT, 32))
5011    return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
5012  if (isREVMask(ShuffleMask, VT, 16))
5013    return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
5014
5015  unsigned ISDNo;
5016  if (V2.getOpcode() == ISD::UNDEF)
5017    ISDNo = isPermuteMask(ShuffleMask, VT, true);
5018  else
5019    ISDNo = isPermuteMask(ShuffleMask, VT, false);
5020
5021  if (ISDNo) {
5022    if (V2.getOpcode() == ISD::UNDEF)
5023      return DAG.getNode(ISDNo, dl, VT, V1, V1);
5024    else
5025      return DAG.getNode(ISDNo, dl, VT, V1, V2);
5026  }
5027
5028  SDValue Res;
5029  if (isConcatVector(Op, DAG, V1, V2, &ShuffleMask[0], Res))
5030    return Res;
5031
5032  // If the element of shuffle mask are all the same constant, we can
5033  // transform it into either NEON_VDUP or NEON_VDUPLANE
5034  if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
5035    int Lane = SVN->getSplatIndex();
5036    // If this is undef splat, generate it via "just" vdup, if possible.
5037    if (Lane == -1) Lane = 0;
5038
5039    // Test if V1 is a SCALAR_TO_VECTOR.
5040    if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5041      return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
5042    }
5043    // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
5044    if (V1.getOpcode() == ISD::BUILD_VECTOR) {
5045      bool IsScalarToVector = true;
5046      for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
5047        if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
5048            i != (unsigned)Lane) {
5049          IsScalarToVector = false;
5050          break;
5051        }
5052      if (IsScalarToVector)
5053        return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
5054                           V1.getOperand(Lane));
5055    }
5056
5057    // Test if V1 is a EXTRACT_SUBVECTOR.
5058    if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
5059      int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
5060      return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
5061                         DAG.getConstant(Lane + ExtLane, MVT::i64));
5062    }
5063    // Test if V1 is a CONCAT_VECTORS.
5064    if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
5065        V1.getOperand(1).getOpcode() == ISD::UNDEF) {
5066      SDValue Op0 = V1.getOperand(0);
5067      assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
5068             "Invalid vector lane access");
5069      return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
5070                         DAG.getConstant(Lane, MVT::i64));
5071    }
5072
5073    return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
5074                       DAG.getConstant(Lane, MVT::i64));
5075  }
5076
5077  int Length = ShuffleMask.size();
5078  int V1EltNum = V1.getValueType().getVectorNumElements();
5079
5080  // If the number of v1 elements is the same as the number of shuffle mask
5081  // element and the shuffle masks are sequential values, we can transform
5082  // it into NEON_VEXTRACT.
5083  if (V1EltNum == Length) {
5084    // Check if the shuffle mask is sequential.
5085    int SkipUndef = 0;
5086    while (ShuffleMask[SkipUndef] == -1) {
5087      SkipUndef++;
5088    }
5089    int CurMask = ShuffleMask[SkipUndef];
5090    if (CurMask >= SkipUndef) {
5091      bool IsSequential = true;
5092      for (int I = SkipUndef; I < Length; ++I) {
5093        if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) {
5094          IsSequential = false;
5095          break;
5096        }
5097        CurMask++;
5098      }
5099      if (IsSequential) {
5100        assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
5101        unsigned VecSize = EltSize * V1EltNum;
5102        unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef);
5103        if (VecSize == 64 || VecSize == 128)
5104          return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
5105                             DAG.getConstant(Index, MVT::i64));
5106      }
5107    }
5108  }
5109
5110  // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
5111  // by element from V2 to V1 .
5112  // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
5113  // better choice to be inserted than V1 as less insert needed, so we count
5114  // element to be inserted for both V1 and V2, and select less one as insert
5115  // target.
5116
5117  // Collect elements need to be inserted and their index.
5118  SmallVector<int, 8> NV1Elt;
5119  SmallVector<int, 8> N1Index;
5120  SmallVector<int, 8> NV2Elt;
5121  SmallVector<int, 8> N2Index;
5122  for (int I = 0; I != Length; ++I) {
5123    if (ShuffleMask[I] != I) {
5124      NV1Elt.push_back(ShuffleMask[I]);
5125      N1Index.push_back(I);
5126    }
5127  }
5128  for (int I = 0; I != Length; ++I) {
5129    if (ShuffleMask[I] != (I + V1EltNum)) {
5130      NV2Elt.push_back(ShuffleMask[I]);
5131      N2Index.push_back(I);
5132    }
5133  }
5134
5135  // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
5136  // will be inserted.
5137  SDValue InsV = V1;
5138  SmallVector<int, 8> InsMasks = NV1Elt;
5139  SmallVector<int, 8> InsIndex = N1Index;
5140  if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
5141    if (NV1Elt.size() > NV2Elt.size()) {
5142      InsV = V2;
5143      InsMasks = NV2Elt;
5144      InsIndex = N2Index;
5145    }
5146  } else {
5147    InsV = DAG.getNode(ISD::UNDEF, dl, VT);
5148  }
5149
5150  for (int I = 0, E = InsMasks.size(); I != E; ++I) {
5151    SDValue ExtV = V1;
5152    int Mask = InsMasks[I];
5153    if (Mask >= V1EltNum) {
5154      ExtV = V2;
5155      Mask -= V1EltNum;
5156    }
5157    // Any value type smaller than i32 is illegal in AArch64, and this lower
5158    // function is called after legalize pass, so we need to legalize
5159    // the result here.
5160    EVT EltVT;
5161    if (VT.getVectorElementType().isFloatingPoint())
5162      EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
5163    else
5164      EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
5165
5166    if (Mask >= 0) {
5167      ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
5168                         DAG.getConstant(Mask, MVT::i64));
5169      InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
5170                         DAG.getConstant(InsIndex[I], MVT::i64));
5171    }
5172  }
5173  return InsV;
5174}
5175
5176AArch64TargetLowering::ConstraintType
5177AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
5178  if (Constraint.size() == 1) {
5179    switch (Constraint[0]) {
5180    default: break;
5181    case 'w': // An FP/SIMD vector register
5182      return C_RegisterClass;
5183    case 'I': // Constant that can be used with an ADD instruction
5184    case 'J': // Constant that can be used with a SUB instruction
5185    case 'K': // Constant that can be used with a 32-bit logical instruction
5186    case 'L': // Constant that can be used with a 64-bit logical instruction
5187    case 'M': // Constant that can be used as a 32-bit MOV immediate
5188    case 'N': // Constant that can be used as a 64-bit MOV immediate
5189    case 'Y': // Floating point constant zero
5190    case 'Z': // Integer constant zero
5191      return C_Other;
5192    case 'Q': // A memory reference with base register and no offset
5193      return C_Memory;
5194    case 'S': // A symbolic address
5195      return C_Other;
5196    }
5197  }
5198
5199  // FIXME: Ump, Utf, Usa, Ush
5200  // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
5201  //      whatever they may be
5202  // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
5203  // Usa: An absolute symbolic address
5204  // Ush: The high part (bits 32:12) of a pc-relative symbolic address
5205  assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
5206         && Constraint != "Ush" && "Unimplemented constraints");
5207
5208  return TargetLowering::getConstraintType(Constraint);
5209}
5210
5211TargetLowering::ConstraintWeight
5212AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
5213                                                const char *Constraint) const {
5214
5215  llvm_unreachable("Constraint weight unimplemented");
5216}
5217
5218void
5219AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
5220                                                    std::string &Constraint,
5221                                                    std::vector<SDValue> &Ops,
5222                                                    SelectionDAG &DAG) const {
5223  SDValue Result(0, 0);
5224
5225  // Only length 1 constraints are C_Other.
5226  if (Constraint.size() != 1) return;
5227
5228  // Only C_Other constraints get lowered like this. That means constants for us
5229  // so return early if there's no hope the constraint can be lowered.
5230
5231  switch(Constraint[0]) {
5232  default: break;
5233  case 'I': case 'J': case 'K': case 'L':
5234  case 'M': case 'N': case 'Z': {
5235    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5236    if (!C)
5237      return;
5238
5239    uint64_t CVal = C->getZExtValue();
5240    uint32_t Bits;
5241
5242    switch (Constraint[0]) {
5243    default:
5244      // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
5245      // is a peculiarly useless SUB constraint.
5246      llvm_unreachable("Unimplemented C_Other constraint");
5247    case 'I':
5248      if (CVal <= 0xfff)
5249        break;
5250      return;
5251    case 'K':
5252      if (A64Imms::isLogicalImm(32, CVal, Bits))
5253        break;
5254      return;
5255    case 'L':
5256      if (A64Imms::isLogicalImm(64, CVal, Bits))
5257        break;
5258      return;
5259    case 'Z':
5260      if (CVal == 0)
5261        break;
5262      return;
5263    }
5264
5265    Result = DAG.getTargetConstant(CVal, Op.getValueType());
5266    break;
5267  }
5268  case 'S': {
5269    // An absolute symbolic address or label reference.
5270    if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
5271      Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
5272                                          GA->getValueType(0));
5273    } else if (const BlockAddressSDNode *BA
5274                 = dyn_cast<BlockAddressSDNode>(Op)) {
5275      Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
5276                                         BA->getValueType(0));
5277    } else if (const ExternalSymbolSDNode *ES
5278                 = dyn_cast<ExternalSymbolSDNode>(Op)) {
5279      Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
5280                                           ES->getValueType(0));
5281    } else
5282      return;
5283    break;
5284  }
5285  case 'Y':
5286    if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
5287      if (CFP->isExactlyValue(0.0)) {
5288        Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
5289        break;
5290      }
5291    }
5292    return;
5293  }
5294
5295  if (Result.getNode()) {
5296    Ops.push_back(Result);
5297    return;
5298  }
5299
5300  // It's an unknown constraint for us. Let generic code have a go.
5301  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
5302}
5303
5304std::pair<unsigned, const TargetRegisterClass*>
5305AArch64TargetLowering::getRegForInlineAsmConstraint(
5306                                                  const std::string &Constraint,
5307                                                  MVT VT) const {
5308  if (Constraint.size() == 1) {
5309    switch (Constraint[0]) {
5310    case 'r':
5311      if (VT.getSizeInBits() <= 32)
5312        return std::make_pair(0U, &AArch64::GPR32RegClass);
5313      else if (VT == MVT::i64)
5314        return std::make_pair(0U, &AArch64::GPR64RegClass);
5315      break;
5316    case 'w':
5317      if (VT == MVT::f16)
5318        return std::make_pair(0U, &AArch64::FPR16RegClass);
5319      else if (VT == MVT::f32)
5320        return std::make_pair(0U, &AArch64::FPR32RegClass);
5321      else if (VT.getSizeInBits() == 64)
5322        return std::make_pair(0U, &AArch64::FPR64RegClass);
5323      else if (VT.getSizeInBits() == 128)
5324        return std::make_pair(0U, &AArch64::FPR128RegClass);
5325      break;
5326    }
5327  }
5328
5329  // Use the default implementation in TargetLowering to convert the register
5330  // constraint into a member of a register class.
5331  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
5332}
5333
5334/// Represent NEON load and store intrinsics as MemIntrinsicNodes.
5335/// The associated MachineMemOperands record the alignment specified
5336/// in the intrinsic calls.
5337bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5338                                               const CallInst &I,
5339                                               unsigned Intrinsic) const {
5340  switch (Intrinsic) {
5341  case Intrinsic::arm_neon_vld1:
5342  case Intrinsic::arm_neon_vld2:
5343  case Intrinsic::arm_neon_vld3:
5344  case Intrinsic::arm_neon_vld4:
5345  case Intrinsic::aarch64_neon_vld1x2:
5346  case Intrinsic::aarch64_neon_vld1x3:
5347  case Intrinsic::aarch64_neon_vld1x4:
5348  case Intrinsic::arm_neon_vld2lane:
5349  case Intrinsic::arm_neon_vld3lane:
5350  case Intrinsic::arm_neon_vld4lane: {
5351    Info.opc = ISD::INTRINSIC_W_CHAIN;
5352    // Conservatively set memVT to the entire set of vectors loaded.
5353    uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
5354    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
5355    Info.ptrVal = I.getArgOperand(0);
5356    Info.offset = 0;
5357    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
5358    Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
5359    Info.vol = false; // volatile loads with NEON intrinsics not supported
5360    Info.readMem = true;
5361    Info.writeMem = false;
5362    return true;
5363  }
5364  case Intrinsic::arm_neon_vst1:
5365  case Intrinsic::arm_neon_vst2:
5366  case Intrinsic::arm_neon_vst3:
5367  case Intrinsic::arm_neon_vst4:
5368  case Intrinsic::aarch64_neon_vst1x2:
5369  case Intrinsic::aarch64_neon_vst1x3:
5370  case Intrinsic::aarch64_neon_vst1x4:
5371  case Intrinsic::arm_neon_vst2lane:
5372  case Intrinsic::arm_neon_vst3lane:
5373  case Intrinsic::arm_neon_vst4lane: {
5374    Info.opc = ISD::INTRINSIC_VOID;
5375    // Conservatively set memVT to the entire set of vectors stored.
5376    unsigned NumElts = 0;
5377    for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
5378      Type *ArgTy = I.getArgOperand(ArgI)->getType();
5379      if (!ArgTy->isVectorTy())
5380        break;
5381      NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
5382    }
5383    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
5384    Info.ptrVal = I.getArgOperand(0);
5385    Info.offset = 0;
5386    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
5387    Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
5388    Info.vol = false; // volatile stores with NEON intrinsics not supported
5389    Info.readMem = false;
5390    Info.writeMem = true;
5391    return true;
5392  }
5393  default:
5394    break;
5395  }
5396
5397  return false;
5398}
5399