ARMISelLowering.cpp revision cfbb32346a7eca19d4dc00fd2c1a0248537b3497
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that ARM uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMISelLowering.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMPerfectShuffle.h"
21#include "ARMRegisterInfo.h"
22#include "ARMSubtarget.h"
23#include "ARMTargetMachine.h"
24#include "ARMTargetObjectFile.h"
25#include "llvm/CallingConv.h"
26#include "llvm/Constants.h"
27#include "llvm/Function.h"
28#include "llvm/GlobalValue.h"
29#include "llvm/Instruction.h"
30#include "llvm/Intrinsics.h"
31#include "llvm/Type.h"
32#include "llvm/CodeGen/CallingConvLower.h"
33#include "llvm/CodeGen/MachineBasicBlock.h"
34#include "llvm/CodeGen/MachineFrameInfo.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/CodeGen/PseudoSourceValue.h"
39#include "llvm/CodeGen/SelectionDAG.h"
40#include "llvm/MC/MCSectionMachO.h"
41#include "llvm/Target/TargetOptions.h"
42#include "llvm/ADT/VectorExtras.h"
43#include "llvm/Support/CommandLine.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/raw_ostream.h"
47#include <sstream>
48using namespace llvm;
49
50static cl::opt<bool>
51EnableARMLongCalls("arm-long-calls", cl::Hidden,
52  cl::desc("Generate calls via indirect call instructions."),
53  cl::init(false));
54
55static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
56                                   CCValAssign::LocInfo &LocInfo,
57                                   ISD::ArgFlagsTy &ArgFlags,
58                                   CCState &State);
59static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
60                                    CCValAssign::LocInfo &LocInfo,
61                                    ISD::ArgFlagsTy &ArgFlags,
62                                    CCState &State);
63static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
64                                      CCValAssign::LocInfo &LocInfo,
65                                      ISD::ArgFlagsTy &ArgFlags,
66                                      CCState &State);
67static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
68                                       CCValAssign::LocInfo &LocInfo,
69                                       ISD::ArgFlagsTy &ArgFlags,
70                                       CCState &State);
71
72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
73                                       EVT PromotedBitwiseVT) {
74  if (VT != PromotedLdStVT) {
75    setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
76    AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
77                       PromotedLdStVT.getSimpleVT());
78
79    setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
80    AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
81                       PromotedLdStVT.getSimpleVT());
82  }
83
84  EVT ElemTy = VT.getVectorElementType();
85  if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
86    setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
87  if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
88    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
89  if (ElemTy != MVT::i32) {
90    setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
91    setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
92    setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
93    setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
94  }
95  setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
96  setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
97  if (llvm::ModelWithRegSequence())
98    setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
99  else
100    setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
101  setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
102  setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
103  setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
104  if (VT.isInteger()) {
105    setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
106    setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
107    setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
108  }
109
110  // Promote all bit-wise operations.
111  if (VT.isInteger() && VT != PromotedBitwiseVT) {
112    setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
113    AddPromotedToType (ISD::AND, VT.getSimpleVT(),
114                       PromotedBitwiseVT.getSimpleVT());
115    setOperationAction(ISD::OR,  VT.getSimpleVT(), Promote);
116    AddPromotedToType (ISD::OR,  VT.getSimpleVT(),
117                       PromotedBitwiseVT.getSimpleVT());
118    setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
119    AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
120                       PromotedBitwiseVT.getSimpleVT());
121  }
122
123  // Neon does not support vector divide/remainder operations.
124  setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
125  setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
126  setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
127  setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
128  setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
129  setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
130}
131
132void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
133  addRegisterClass(VT, ARM::DPRRegisterClass);
134  addTypeForNEON(VT, MVT::f64, MVT::v2i32);
135}
136
137void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
138  addRegisterClass(VT, ARM::QPRRegisterClass);
139  addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
140}
141
142static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
143  if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
144    return new TargetLoweringObjectFileMachO();
145
146  return new ARMElfTargetObjectFile();
147}
148
149ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
150    : TargetLowering(TM, createTLOF(TM)) {
151  Subtarget = &TM.getSubtarget<ARMSubtarget>();
152
153  if (Subtarget->isTargetDarwin()) {
154    // Uses VFP for Thumb libfuncs if available.
155    if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
156      // Single-precision floating-point arithmetic.
157      setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
158      setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
159      setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
160      setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
161
162      // Double-precision floating-point arithmetic.
163      setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
164      setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
165      setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
166      setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
167
168      // Single-precision comparisons.
169      setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
170      setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
171      setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
172      setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
173      setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
174      setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
175      setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
176      setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
177
178      setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
179      setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
180      setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
181      setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
182      setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
183      setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
184      setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
185      setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
186
187      // Double-precision comparisons.
188      setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
189      setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
190      setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
191      setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
192      setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
193      setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
194      setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
195      setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
196
197      setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
198      setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
199      setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
200      setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
201      setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
202      setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
203      setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
204      setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
205
206      // Floating-point to integer conversions.
207      // i64 conversions are done via library routines even when generating VFP
208      // instructions, so use the same ones.
209      setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
210      setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
211      setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
212      setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
213
214      // Conversions between floating types.
215      setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
216      setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
217
218      // Integer to floating-point conversions.
219      // i64 conversions are done via library routines even when generating VFP
220      // instructions, so use the same ones.
221      // FIXME: There appears to be some naming inconsistency in ARM libgcc:
222      // e.g., __floatunsidf vs. __floatunssidfvfp.
223      setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
224      setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
225      setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
226      setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
227    }
228  }
229
230  // These libcalls are not available in 32-bit.
231  setLibcallName(RTLIB::SHL_I128, 0);
232  setLibcallName(RTLIB::SRL_I128, 0);
233  setLibcallName(RTLIB::SRA_I128, 0);
234
235  // Libcalls should use the AAPCS base standard ABI, even if hard float
236  // is in effect, as per the ARM RTABI specification, section 4.1.2.
237  if (Subtarget->isAAPCS_ABI()) {
238    for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
239      setLibcallCallingConv(static_cast<RTLIB::Libcall>(i),
240                            CallingConv::ARM_AAPCS);
241    }
242  }
243
244  if (Subtarget->isThumb1Only())
245    addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
246  else
247    addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
248  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
249    addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
250    addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
251
252    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
253  }
254
255  if (Subtarget->hasNEON()) {
256    addDRTypeForNEON(MVT::v2f32);
257    addDRTypeForNEON(MVT::v8i8);
258    addDRTypeForNEON(MVT::v4i16);
259    addDRTypeForNEON(MVT::v2i32);
260    addDRTypeForNEON(MVT::v1i64);
261
262    addQRTypeForNEON(MVT::v4f32);
263    addQRTypeForNEON(MVT::v2f64);
264    addQRTypeForNEON(MVT::v16i8);
265    addQRTypeForNEON(MVT::v8i16);
266    addQRTypeForNEON(MVT::v4i32);
267    addQRTypeForNEON(MVT::v2i64);
268
269    // Map v4i64 to QQ registers but do not make the type legal for any
270    // operations. v4i64 is only used for REG_SEQUENCE to load / store quad
271    // D registers.
272    addRegisterClass(MVT::v4i64, ARM::QQPRRegisterClass);
273
274    // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
275    // neither Neon nor VFP support any arithmetic operations on it.
276    setOperationAction(ISD::FADD, MVT::v2f64, Expand);
277    setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
278    setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
279    setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
280    setOperationAction(ISD::FREM, MVT::v2f64, Expand);
281    setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
282    setOperationAction(ISD::VSETCC, MVT::v2f64, Expand);
283    setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
284    setOperationAction(ISD::FABS, MVT::v2f64, Expand);
285    setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
286    setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
287    setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
288    setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
289    setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
290    setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
291    setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
292    setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
293    setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
294    setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
295    setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
296    setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
297    setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
298    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
299    setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
300
301    // Neon does not support some operations on v1i64 and v2i64 types.
302    setOperationAction(ISD::MUL, MVT::v1i64, Expand);
303    setOperationAction(ISD::MUL, MVT::v2i64, Expand);
304    setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
305    setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
306
307    setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
308    setTargetDAGCombine(ISD::SHL);
309    setTargetDAGCombine(ISD::SRL);
310    setTargetDAGCombine(ISD::SRA);
311    setTargetDAGCombine(ISD::SIGN_EXTEND);
312    setTargetDAGCombine(ISD::ZERO_EXTEND);
313    setTargetDAGCombine(ISD::ANY_EXTEND);
314    setTargetDAGCombine(ISD::SELECT_CC);
315  }
316
317  computeRegisterProperties();
318
319  // ARM does not have f32 extending load.
320  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
321
322  // ARM does not have i1 sign extending load.
323  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
324
325  // ARM supports all 4 flavors of integer indexed load / store.
326  if (!Subtarget->isThumb1Only()) {
327    for (unsigned im = (unsigned)ISD::PRE_INC;
328         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
329      setIndexedLoadAction(im,  MVT::i1,  Legal);
330      setIndexedLoadAction(im,  MVT::i8,  Legal);
331      setIndexedLoadAction(im,  MVT::i16, Legal);
332      setIndexedLoadAction(im,  MVT::i32, Legal);
333      setIndexedStoreAction(im, MVT::i1,  Legal);
334      setIndexedStoreAction(im, MVT::i8,  Legal);
335      setIndexedStoreAction(im, MVT::i16, Legal);
336      setIndexedStoreAction(im, MVT::i32, Legal);
337    }
338  }
339
340  // i64 operation support.
341  if (Subtarget->isThumb1Only()) {
342    setOperationAction(ISD::MUL,     MVT::i64, Expand);
343    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
344    setOperationAction(ISD::MULHS,   MVT::i32, Expand);
345    setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
346    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
347  } else {
348    setOperationAction(ISD::MUL,     MVT::i64, Expand);
349    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
350    if (!Subtarget->hasV6Ops())
351      setOperationAction(ISD::MULHS, MVT::i32, Expand);
352  }
353  setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
354  setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
355  setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
356  setOperationAction(ISD::SRL,       MVT::i64, Custom);
357  setOperationAction(ISD::SRA,       MVT::i64, Custom);
358
359  // ARM does not have ROTL.
360  setOperationAction(ISD::ROTL,  MVT::i32, Expand);
361  setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
362  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
363  if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
364    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
365
366  // Only ARMv6 has BSWAP.
367  if (!Subtarget->hasV6Ops())
368    setOperationAction(ISD::BSWAP, MVT::i32, Expand);
369
370  // These are expanded into libcalls.
371  if (!Subtarget->hasDivide()) {
372    // v7M has a hardware divider
373    setOperationAction(ISD::SDIV,  MVT::i32, Expand);
374    setOperationAction(ISD::UDIV,  MVT::i32, Expand);
375  }
376  setOperationAction(ISD::SREM,  MVT::i32, Expand);
377  setOperationAction(ISD::UREM,  MVT::i32, Expand);
378  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
379  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
380
381  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
382  setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
383  setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
384  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
385  setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
386
387  setOperationAction(ISD::TRAP, MVT::Other, Legal);
388
389  // Use the default implementation.
390  setOperationAction(ISD::VASTART,            MVT::Other, Custom);
391  setOperationAction(ISD::VAARG,              MVT::Other, Expand);
392  setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
393  setOperationAction(ISD::VAEND,              MVT::Other, Expand);
394  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
395  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
396  setOperationAction(ISD::EHSELECTION,        MVT::i32,   Expand);
397  // FIXME: Shouldn't need this, since no register is used, but the legalizer
398  // doesn't yet know how to not do that for SjLj.
399  setExceptionSelectorRegister(ARM::R0);
400  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
401  setOperationAction(ISD::MEMBARRIER,         MVT::Other, Custom);
402
403  // If the subtarget does not have extract instructions, sign_extend_inreg
404  // needs to be expanded. Extract is available in ARM mode on v6 and up,
405  // and on most Thumb2 implementations.
406  if ((!Subtarget->isThumb() && !Subtarget->hasV6Ops())
407      || (Subtarget->isThumb2() && !Subtarget->hasT2ExtractPack())) {
408    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
409    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
410  }
411  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
412
413  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
414    // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
415    // iff target supports vfp2.
416    setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
417
418  // We want to custom lower some of our intrinsics.
419  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
420
421  setOperationAction(ISD::SETCC,     MVT::i32, Expand);
422  setOperationAction(ISD::SETCC,     MVT::f32, Expand);
423  setOperationAction(ISD::SETCC,     MVT::f64, Expand);
424  setOperationAction(ISD::SELECT,    MVT::i32, Expand);
425  setOperationAction(ISD::SELECT,    MVT::f32, Expand);
426  setOperationAction(ISD::SELECT,    MVT::f64, Expand);
427  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
428  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
429  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
430
431  setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
432  setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
433  setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
434  setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
435  setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
436
437  // We don't support sin/cos/fmod/copysign/pow
438  setOperationAction(ISD::FSIN,      MVT::f64, Expand);
439  setOperationAction(ISD::FSIN,      MVT::f32, Expand);
440  setOperationAction(ISD::FCOS,      MVT::f32, Expand);
441  setOperationAction(ISD::FCOS,      MVT::f64, Expand);
442  setOperationAction(ISD::FREM,      MVT::f64, Expand);
443  setOperationAction(ISD::FREM,      MVT::f32, Expand);
444  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
445    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
446    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
447  }
448  setOperationAction(ISD::FPOW,      MVT::f64, Expand);
449  setOperationAction(ISD::FPOW,      MVT::f32, Expand);
450
451  // Various VFP goodness
452  if (!UseSoftFloat && !Subtarget->isThumb1Only()) {
453    // int <-> fp are custom expanded into bit_convert + ARMISD ops.
454    if (Subtarget->hasVFP2()) {
455      setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
456      setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
457      setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
458      setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
459    }
460    // Special handling for half-precision FP.
461    if (!Subtarget->hasFP16()) {
462      setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand);
463      setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
464    }
465  }
466
467  // We have target-specific dag combine patterns for the following nodes:
468  // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
469  setTargetDAGCombine(ISD::ADD);
470  setTargetDAGCombine(ISD::SUB);
471
472  setStackPointerRegisterToSaveRestore(ARM::SP);
473  setSchedulingPreference(SchedulingForRegPressure);
474
475  // FIXME: If-converter should use instruction latency to determine
476  // profitability rather than relying on fixed limits.
477  if (Subtarget->getCPUString() == "generic") {
478    // Generic (and overly aggressive) if-conversion limits.
479    setIfCvtBlockSizeLimit(10);
480    setIfCvtDupBlockSizeLimit(2);
481  } else if (Subtarget->hasV7Ops()) {
482    setIfCvtBlockSizeLimit(3);
483    setIfCvtDupBlockSizeLimit(1);
484  } else if (Subtarget->hasV6Ops()) {
485    setIfCvtBlockSizeLimit(2);
486    setIfCvtDupBlockSizeLimit(1);
487  } else {
488    setIfCvtBlockSizeLimit(3);
489    setIfCvtDupBlockSizeLimit(2);
490  }
491
492  maxStoresPerMemcpy = 1;   //// temporary - rewrite interface to use type
493  // Do not enable CodePlacementOpt for now: it currently runs after the
494  // ARMConstantIslandPass and messes up branch relaxation and placement
495  // of constant islands.
496  // benefitFromCodePlacementOpt = true;
497}
498
499const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
500  switch (Opcode) {
501  default: return 0;
502  case ARMISD::Wrapper:       return "ARMISD::Wrapper";
503  case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
504  case ARMISD::CALL:          return "ARMISD::CALL";
505  case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
506  case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
507  case ARMISD::tCALL:         return "ARMISD::tCALL";
508  case ARMISD::BRCOND:        return "ARMISD::BRCOND";
509  case ARMISD::BR_JT:         return "ARMISD::BR_JT";
510  case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
511  case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
512  case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
513  case ARMISD::CMP:           return "ARMISD::CMP";
514  case ARMISD::CMPZ:          return "ARMISD::CMPZ";
515  case ARMISD::CMPFP:         return "ARMISD::CMPFP";
516  case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
517  case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
518  case ARMISD::CMOV:          return "ARMISD::CMOV";
519  case ARMISD::CNEG:          return "ARMISD::CNEG";
520
521  case ARMISD::RBIT:          return "ARMISD::RBIT";
522
523  case ARMISD::FTOSI:         return "ARMISD::FTOSI";
524  case ARMISD::FTOUI:         return "ARMISD::FTOUI";
525  case ARMISD::SITOF:         return "ARMISD::SITOF";
526  case ARMISD::UITOF:         return "ARMISD::UITOF";
527
528  case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
529  case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
530  case ARMISD::RRX:           return "ARMISD::RRX";
531
532  case ARMISD::VMOVRRD:         return "ARMISD::VMOVRRD";
533  case ARMISD::VMOVDRR:         return "ARMISD::VMOVDRR";
534
535  case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
536  case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
537
538  case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
539
540  case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
541
542  case ARMISD::MEMBARRIER:    return "ARMISD::MEMBARRIER";
543  case ARMISD::SYNCBARRIER:   return "ARMISD::SYNCBARRIER";
544
545  case ARMISD::VCEQ:          return "ARMISD::VCEQ";
546  case ARMISD::VCGE:          return "ARMISD::VCGE";
547  case ARMISD::VCGEU:         return "ARMISD::VCGEU";
548  case ARMISD::VCGT:          return "ARMISD::VCGT";
549  case ARMISD::VCGTU:         return "ARMISD::VCGTU";
550  case ARMISD::VTST:          return "ARMISD::VTST";
551
552  case ARMISD::VSHL:          return "ARMISD::VSHL";
553  case ARMISD::VSHRs:         return "ARMISD::VSHRs";
554  case ARMISD::VSHRu:         return "ARMISD::VSHRu";
555  case ARMISD::VSHLLs:        return "ARMISD::VSHLLs";
556  case ARMISD::VSHLLu:        return "ARMISD::VSHLLu";
557  case ARMISD::VSHLLi:        return "ARMISD::VSHLLi";
558  case ARMISD::VSHRN:         return "ARMISD::VSHRN";
559  case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
560  case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
561  case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
562  case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
563  case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
564  case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
565  case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
566  case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
567  case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
568  case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
569  case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
570  case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
571  case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
572  case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
573  case ARMISD::VDUP:          return "ARMISD::VDUP";
574  case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
575  case ARMISD::VEXT:          return "ARMISD::VEXT";
576  case ARMISD::VREV64:        return "ARMISD::VREV64";
577  case ARMISD::VREV32:        return "ARMISD::VREV32";
578  case ARMISD::VREV16:        return "ARMISD::VREV16";
579  case ARMISD::VZIP:          return "ARMISD::VZIP";
580  case ARMISD::VUZP:          return "ARMISD::VUZP";
581  case ARMISD::VTRN:          return "ARMISD::VTRN";
582  case ARMISD::FMAX:          return "ARMISD::FMAX";
583  case ARMISD::FMIN:          return "ARMISD::FMIN";
584  }
585}
586
587/// getFunctionAlignment - Return the Log2 alignment of this function.
588unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
589  return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
590}
591
592//===----------------------------------------------------------------------===//
593// Lowering Code
594//===----------------------------------------------------------------------===//
595
596/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
597static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
598  switch (CC) {
599  default: llvm_unreachable("Unknown condition code!");
600  case ISD::SETNE:  return ARMCC::NE;
601  case ISD::SETEQ:  return ARMCC::EQ;
602  case ISD::SETGT:  return ARMCC::GT;
603  case ISD::SETGE:  return ARMCC::GE;
604  case ISD::SETLT:  return ARMCC::LT;
605  case ISD::SETLE:  return ARMCC::LE;
606  case ISD::SETUGT: return ARMCC::HI;
607  case ISD::SETUGE: return ARMCC::HS;
608  case ISD::SETULT: return ARMCC::LO;
609  case ISD::SETULE: return ARMCC::LS;
610  }
611}
612
613/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
614static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
615                        ARMCC::CondCodes &CondCode2) {
616  CondCode2 = ARMCC::AL;
617  switch (CC) {
618  default: llvm_unreachable("Unknown FP condition!");
619  case ISD::SETEQ:
620  case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
621  case ISD::SETGT:
622  case ISD::SETOGT: CondCode = ARMCC::GT; break;
623  case ISD::SETGE:
624  case ISD::SETOGE: CondCode = ARMCC::GE; break;
625  case ISD::SETOLT: CondCode = ARMCC::MI; break;
626  case ISD::SETOLE: CondCode = ARMCC::LS; break;
627  case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
628  case ISD::SETO:   CondCode = ARMCC::VC; break;
629  case ISD::SETUO:  CondCode = ARMCC::VS; break;
630  case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
631  case ISD::SETUGT: CondCode = ARMCC::HI; break;
632  case ISD::SETUGE: CondCode = ARMCC::PL; break;
633  case ISD::SETLT:
634  case ISD::SETULT: CondCode = ARMCC::LT; break;
635  case ISD::SETLE:
636  case ISD::SETULE: CondCode = ARMCC::LE; break;
637  case ISD::SETNE:
638  case ISD::SETUNE: CondCode = ARMCC::NE; break;
639  }
640}
641
642//===----------------------------------------------------------------------===//
643//                      Calling Convention Implementation
644//===----------------------------------------------------------------------===//
645
646#include "ARMGenCallingConv.inc"
647
648// APCS f64 is in register pairs, possibly split to stack
649static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
650                          CCValAssign::LocInfo &LocInfo,
651                          CCState &State, bool CanFail) {
652  static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
653
654  // Try to get the first register.
655  if (unsigned Reg = State.AllocateReg(RegList, 4))
656    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
657  else {
658    // For the 2nd half of a v2f64, do not fail.
659    if (CanFail)
660      return false;
661
662    // Put the whole thing on the stack.
663    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
664                                           State.AllocateStack(8, 4),
665                                           LocVT, LocInfo));
666    return true;
667  }
668
669  // Try to get the second register.
670  if (unsigned Reg = State.AllocateReg(RegList, 4))
671    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
672  else
673    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
674                                           State.AllocateStack(4, 4),
675                                           LocVT, LocInfo));
676  return true;
677}
678
679static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
680                                   CCValAssign::LocInfo &LocInfo,
681                                   ISD::ArgFlagsTy &ArgFlags,
682                                   CCState &State) {
683  if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
684    return false;
685  if (LocVT == MVT::v2f64 &&
686      !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
687    return false;
688  return true;  // we handled it
689}
690
691// AAPCS f64 is in aligned register pairs
692static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
693                           CCValAssign::LocInfo &LocInfo,
694                           CCState &State, bool CanFail) {
695  static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
696  static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
697
698  unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
699  if (Reg == 0) {
700    // For the 2nd half of a v2f64, do not just fail.
701    if (CanFail)
702      return false;
703
704    // Put the whole thing on the stack.
705    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
706                                           State.AllocateStack(8, 8),
707                                           LocVT, LocInfo));
708    return true;
709  }
710
711  unsigned i;
712  for (i = 0; i < 2; ++i)
713    if (HiRegList[i] == Reg)
714      break;
715
716  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
717  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
718                                         LocVT, LocInfo));
719  return true;
720}
721
722static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
723                                    CCValAssign::LocInfo &LocInfo,
724                                    ISD::ArgFlagsTy &ArgFlags,
725                                    CCState &State) {
726  if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
727    return false;
728  if (LocVT == MVT::v2f64 &&
729      !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
730    return false;
731  return true;  // we handled it
732}
733
734static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
735                         CCValAssign::LocInfo &LocInfo, CCState &State) {
736  static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
737  static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
738
739  unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
740  if (Reg == 0)
741    return false; // we didn't handle it
742
743  unsigned i;
744  for (i = 0; i < 2; ++i)
745    if (HiRegList[i] == Reg)
746      break;
747
748  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
749  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
750                                         LocVT, LocInfo));
751  return true;
752}
753
754static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
755                                      CCValAssign::LocInfo &LocInfo,
756                                      ISD::ArgFlagsTy &ArgFlags,
757                                      CCState &State) {
758  if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
759    return false;
760  if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
761    return false;
762  return true;  // we handled it
763}
764
765static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
766                                       CCValAssign::LocInfo &LocInfo,
767                                       ISD::ArgFlagsTy &ArgFlags,
768                                       CCState &State) {
769  return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
770                                   State);
771}
772
773/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
774/// given CallingConvention value.
775CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
776                                                 bool Return,
777                                                 bool isVarArg) const {
778  switch (CC) {
779  default:
780    llvm_unreachable("Unsupported calling convention");
781  case CallingConv::C:
782  case CallingConv::Fast:
783    // Use target triple & subtarget features to do actual dispatch.
784    if (Subtarget->isAAPCS_ABI()) {
785      if (Subtarget->hasVFP2() &&
786          FloatABIType == FloatABI::Hard && !isVarArg)
787        return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
788      else
789        return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
790    } else
791        return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
792  case CallingConv::ARM_AAPCS_VFP:
793    return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
794  case CallingConv::ARM_AAPCS:
795    return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
796  case CallingConv::ARM_APCS:
797    return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
798  }
799}
800
801/// LowerCallResult - Lower the result values of a call into the
802/// appropriate copies out of appropriate physical registers.
803SDValue
804ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
805                                   CallingConv::ID CallConv, bool isVarArg,
806                                   const SmallVectorImpl<ISD::InputArg> &Ins,
807                                   DebugLoc dl, SelectionDAG &DAG,
808                                   SmallVectorImpl<SDValue> &InVals) const {
809
810  // Assign locations to each value returned by this call.
811  SmallVector<CCValAssign, 16> RVLocs;
812  CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
813                 RVLocs, *DAG.getContext());
814  CCInfo.AnalyzeCallResult(Ins,
815                           CCAssignFnForNode(CallConv, /* Return*/ true,
816                                             isVarArg));
817
818  // Copy all of the result registers out of their specified physreg.
819  for (unsigned i = 0; i != RVLocs.size(); ++i) {
820    CCValAssign VA = RVLocs[i];
821
822    SDValue Val;
823    if (VA.needsCustom()) {
824      // Handle f64 or half of a v2f64.
825      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
826                                      InFlag);
827      Chain = Lo.getValue(1);
828      InFlag = Lo.getValue(2);
829      VA = RVLocs[++i]; // skip ahead to next loc
830      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
831                                      InFlag);
832      Chain = Hi.getValue(1);
833      InFlag = Hi.getValue(2);
834      Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
835
836      if (VA.getLocVT() == MVT::v2f64) {
837        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
838        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
839                          DAG.getConstant(0, MVT::i32));
840
841        VA = RVLocs[++i]; // skip ahead to next loc
842        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
843        Chain = Lo.getValue(1);
844        InFlag = Lo.getValue(2);
845        VA = RVLocs[++i]; // skip ahead to next loc
846        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
847        Chain = Hi.getValue(1);
848        InFlag = Hi.getValue(2);
849        Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
850        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
851                          DAG.getConstant(1, MVT::i32));
852      }
853    } else {
854      Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
855                               InFlag);
856      Chain = Val.getValue(1);
857      InFlag = Val.getValue(2);
858    }
859
860    switch (VA.getLocInfo()) {
861    default: llvm_unreachable("Unknown loc info!");
862    case CCValAssign::Full: break;
863    case CCValAssign::BCvt:
864      Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
865      break;
866    }
867
868    InVals.push_back(Val);
869  }
870
871  return Chain;
872}
873
874/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
875/// by "Src" to address "Dst" of size "Size".  Alignment information is
876/// specified by the specific parameter attribute.  The copy will be passed as
877/// a byval function parameter.
878/// Sometimes what we are copying is the end of a larger object, the part that
879/// does not fit in registers.
880static SDValue
881CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
882                          ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
883                          DebugLoc dl) {
884  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
885  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
886                       /*isVolatile=*/false, /*AlwaysInline=*/false,
887                       NULL, 0, NULL, 0);
888}
889
890/// LowerMemOpCallTo - Store the argument to the stack.
891SDValue
892ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
893                                    SDValue StackPtr, SDValue Arg,
894                                    DebugLoc dl, SelectionDAG &DAG,
895                                    const CCValAssign &VA,
896                                    ISD::ArgFlagsTy Flags) const {
897  unsigned LocMemOffset = VA.getLocMemOffset();
898  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
899  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
900  if (Flags.isByVal()) {
901    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
902  }
903  return DAG.getStore(Chain, dl, Arg, PtrOff,
904                      PseudoSourceValue::getStack(), LocMemOffset,
905                      false, false, 0);
906}
907
908void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
909                                         SDValue Chain, SDValue &Arg,
910                                         RegsToPassVector &RegsToPass,
911                                         CCValAssign &VA, CCValAssign &NextVA,
912                                         SDValue &StackPtr,
913                                         SmallVector<SDValue, 8> &MemOpChains,
914                                         ISD::ArgFlagsTy Flags) const {
915
916  SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
917                              DAG.getVTList(MVT::i32, MVT::i32), Arg);
918  RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
919
920  if (NextVA.isRegLoc())
921    RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
922  else {
923    assert(NextVA.isMemLoc());
924    if (StackPtr.getNode() == 0)
925      StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
926
927    MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
928                                           dl, DAG, NextVA,
929                                           Flags));
930  }
931}
932
933/// LowerCall - Lowering a call into a callseq_start <-
934/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
935/// nodes.
936SDValue
937ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
938                             CallingConv::ID CallConv, bool isVarArg,
939                             bool &isTailCall,
940                             const SmallVectorImpl<ISD::OutputArg> &Outs,
941                             const SmallVectorImpl<ISD::InputArg> &Ins,
942                             DebugLoc dl, SelectionDAG &DAG,
943                             SmallVectorImpl<SDValue> &InVals) const {
944  // ARM target does not yet support tail call optimization.
945  isTailCall = false;
946
947  // Analyze operands of the call, assigning locations to each operand.
948  SmallVector<CCValAssign, 16> ArgLocs;
949  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
950                 *DAG.getContext());
951  CCInfo.AnalyzeCallOperands(Outs,
952                             CCAssignFnForNode(CallConv, /* Return*/ false,
953                                               isVarArg));
954
955  // Get a count of how many bytes are to be pushed on the stack.
956  unsigned NumBytes = CCInfo.getNextStackOffset();
957
958  // Adjust the stack pointer for the new arguments...
959  // These operations are automatically eliminated by the prolog/epilog pass
960  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
961
962  SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
963
964  RegsToPassVector RegsToPass;
965  SmallVector<SDValue, 8> MemOpChains;
966
967  // Walk the register/memloc assignments, inserting copies/loads.  In the case
968  // of tail call optimization, arguments are handled later.
969  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
970       i != e;
971       ++i, ++realArgIdx) {
972    CCValAssign &VA = ArgLocs[i];
973    SDValue Arg = Outs[realArgIdx].Val;
974    ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
975
976    // Promote the value if needed.
977    switch (VA.getLocInfo()) {
978    default: llvm_unreachable("Unknown loc info!");
979    case CCValAssign::Full: break;
980    case CCValAssign::SExt:
981      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
982      break;
983    case CCValAssign::ZExt:
984      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
985      break;
986    case CCValAssign::AExt:
987      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
988      break;
989    case CCValAssign::BCvt:
990      Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
991      break;
992    }
993
994    // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
995    if (VA.needsCustom()) {
996      if (VA.getLocVT() == MVT::v2f64) {
997        SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
998                                  DAG.getConstant(0, MVT::i32));
999        SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1000                                  DAG.getConstant(1, MVT::i32));
1001
1002        PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1003                         VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1004
1005        VA = ArgLocs[++i]; // skip ahead to next loc
1006        if (VA.isRegLoc()) {
1007          PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1008                           VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1009        } else {
1010          assert(VA.isMemLoc());
1011
1012          MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1013                                                 dl, DAG, VA, Flags));
1014        }
1015      } else {
1016        PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1017                         StackPtr, MemOpChains, Flags);
1018      }
1019    } else if (VA.isRegLoc()) {
1020      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1021    } else {
1022      assert(VA.isMemLoc());
1023
1024      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1025                                             dl, DAG, VA, Flags));
1026    }
1027  }
1028
1029  if (!MemOpChains.empty())
1030    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1031                        &MemOpChains[0], MemOpChains.size());
1032
1033  // Build a sequence of copy-to-reg nodes chained together with token chain
1034  // and flag operands which copy the outgoing args into the appropriate regs.
1035  SDValue InFlag;
1036  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1037    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1038                             RegsToPass[i].second, InFlag);
1039    InFlag = Chain.getValue(1);
1040  }
1041
1042  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1043  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1044  // node so that legalize doesn't hack it.
1045  bool isDirect = false;
1046  bool isARMFunc = false;
1047  bool isLocalARMFunc = false;
1048  MachineFunction &MF = DAG.getMachineFunction();
1049  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1050
1051  if (EnableARMLongCalls) {
1052    assert (getTargetMachine().getRelocationModel() == Reloc::Static
1053            && "long-calls with non-static relocation model!");
1054    // Handle a global address or an external symbol. If it's not one of
1055    // those, the target's already in a register, so we don't need to do
1056    // anything extra.
1057    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1058      const GlobalValue *GV = G->getGlobal();
1059      // Create a constant pool entry for the callee address
1060      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1061      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
1062                                                           ARMPCLabelIndex,
1063                                                           ARMCP::CPValue, 0);
1064      // Get the address of the callee into a register
1065      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1066      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1067      Callee = DAG.getLoad(getPointerTy(), dl,
1068                           DAG.getEntryNode(), CPAddr,
1069                           PseudoSourceValue::getConstantPool(), 0,
1070                           false, false, 0);
1071    } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
1072      const char *Sym = S->getSymbol();
1073
1074      // Create a constant pool entry for the callee address
1075      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1076      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1077                                                       Sym, ARMPCLabelIndex, 0);
1078      // Get the address of the callee into a register
1079      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1080      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1081      Callee = DAG.getLoad(getPointerTy(), dl,
1082                           DAG.getEntryNode(), CPAddr,
1083                           PseudoSourceValue::getConstantPool(), 0,
1084                           false, false, 0);
1085    }
1086  } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1087    const GlobalValue *GV = G->getGlobal();
1088    isDirect = true;
1089    bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
1090    bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
1091                   getTargetMachine().getRelocationModel() != Reloc::Static;
1092    isARMFunc = !Subtarget->isThumb() || isStub;
1093    // ARM call to a local ARM function is predicable.
1094    isLocalARMFunc = !Subtarget->isThumb() && !isExt;
1095    // tBX takes a register source operand.
1096    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1097      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1098      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
1099                                                           ARMPCLabelIndex,
1100                                                           ARMCP::CPValue, 4);
1101      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1102      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1103      Callee = DAG.getLoad(getPointerTy(), dl,
1104                           DAG.getEntryNode(), CPAddr,
1105                           PseudoSourceValue::getConstantPool(), 0,
1106                           false, false, 0);
1107      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1108      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1109                           getPointerTy(), Callee, PICLabel);
1110    } else
1111      Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
1112  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1113    isDirect = true;
1114    bool isStub = Subtarget->isTargetDarwin() &&
1115                  getTargetMachine().getRelocationModel() != Reloc::Static;
1116    isARMFunc = !Subtarget->isThumb() || isStub;
1117    // tBX takes a register source operand.
1118    const char *Sym = S->getSymbol();
1119    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1120      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1121      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1122                                                       Sym, ARMPCLabelIndex, 4);
1123      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1124      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1125      Callee = DAG.getLoad(getPointerTy(), dl,
1126                           DAG.getEntryNode(), CPAddr,
1127                           PseudoSourceValue::getConstantPool(), 0,
1128                           false, false, 0);
1129      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1130      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1131                           getPointerTy(), Callee, PICLabel);
1132    } else
1133      Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1134  }
1135
1136  // FIXME: handle tail calls differently.
1137  unsigned CallOpc;
1138  if (Subtarget->isThumb()) {
1139    if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
1140      CallOpc = ARMISD::CALL_NOLINK;
1141    else
1142      CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
1143  } else {
1144    CallOpc = (isDirect || Subtarget->hasV5TOps())
1145      ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
1146      : ARMISD::CALL_NOLINK;
1147  }
1148  if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
1149    // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
1150    Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
1151    InFlag = Chain.getValue(1);
1152  }
1153
1154  std::vector<SDValue> Ops;
1155  Ops.push_back(Chain);
1156  Ops.push_back(Callee);
1157
1158  // Add argument registers to the end of the list so that they are known live
1159  // into the call.
1160  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1161    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1162                                  RegsToPass[i].second.getValueType()));
1163
1164  if (InFlag.getNode())
1165    Ops.push_back(InFlag);
1166  // Returns a chain and a flag for retval copy to use.
1167  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1168                      &Ops[0], Ops.size());
1169  InFlag = Chain.getValue(1);
1170
1171  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1172                             DAG.getIntPtrConstant(0, true), InFlag);
1173  if (!Ins.empty())
1174    InFlag = Chain.getValue(1);
1175
1176  // Handle result values, copying them out of physregs into vregs that we
1177  // return.
1178  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
1179                         dl, DAG, InVals);
1180}
1181
1182SDValue
1183ARMTargetLowering::LowerReturn(SDValue Chain,
1184                               CallingConv::ID CallConv, bool isVarArg,
1185                               const SmallVectorImpl<ISD::OutputArg> &Outs,
1186                               DebugLoc dl, SelectionDAG &DAG) const {
1187
1188  // CCValAssign - represent the assignment of the return value to a location.
1189  SmallVector<CCValAssign, 16> RVLocs;
1190
1191  // CCState - Info about the registers and stack slots.
1192  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
1193                 *DAG.getContext());
1194
1195  // Analyze outgoing return values.
1196  CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
1197                                               isVarArg));
1198
1199  // If this is the first return lowered for this function, add
1200  // the regs to the liveout set for the function.
1201  if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1202    for (unsigned i = 0; i != RVLocs.size(); ++i)
1203      if (RVLocs[i].isRegLoc())
1204        DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1205  }
1206
1207  SDValue Flag;
1208
1209  // Copy the result values into the output registers.
1210  for (unsigned i = 0, realRVLocIdx = 0;
1211       i != RVLocs.size();
1212       ++i, ++realRVLocIdx) {
1213    CCValAssign &VA = RVLocs[i];
1214    assert(VA.isRegLoc() && "Can only return in registers!");
1215
1216    SDValue Arg = Outs[realRVLocIdx].Val;
1217
1218    switch (VA.getLocInfo()) {
1219    default: llvm_unreachable("Unknown loc info!");
1220    case CCValAssign::Full: break;
1221    case CCValAssign::BCvt:
1222      Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
1223      break;
1224    }
1225
1226    if (VA.needsCustom()) {
1227      if (VA.getLocVT() == MVT::v2f64) {
1228        // Extract the first half and return it in two registers.
1229        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1230                                   DAG.getConstant(0, MVT::i32));
1231        SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
1232                                       DAG.getVTList(MVT::i32, MVT::i32), Half);
1233
1234        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
1235        Flag = Chain.getValue(1);
1236        VA = RVLocs[++i]; // skip ahead to next loc
1237        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1238                                 HalfGPRs.getValue(1), Flag);
1239        Flag = Chain.getValue(1);
1240        VA = RVLocs[++i]; // skip ahead to next loc
1241
1242        // Extract the 2nd half and fall through to handle it as an f64 value.
1243        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1244                          DAG.getConstant(1, MVT::i32));
1245      }
1246      // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
1247      // available.
1248      SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1249                                  DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
1250      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
1251      Flag = Chain.getValue(1);
1252      VA = RVLocs[++i]; // skip ahead to next loc
1253      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
1254                               Flag);
1255    } else
1256      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1257
1258    // Guarantee that all emitted copies are
1259    // stuck together, avoiding something bad.
1260    Flag = Chain.getValue(1);
1261  }
1262
1263  SDValue result;
1264  if (Flag.getNode())
1265    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1266  else // Return Void
1267    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
1268
1269  return result;
1270}
1271
1272// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
1273// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
1274// one of the above mentioned nodes. It has to be wrapped because otherwise
1275// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
1276// be used to form addressing mode. These wrapped nodes will be selected
1277// into MOVi.
1278static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
1279  EVT PtrVT = Op.getValueType();
1280  // FIXME there is no actual debug info here
1281  DebugLoc dl = Op.getDebugLoc();
1282  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1283  SDValue Res;
1284  if (CP->isMachineConstantPoolEntry())
1285    Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1286                                    CP->getAlignment());
1287  else
1288    Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1289                                    CP->getAlignment());
1290  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
1291}
1292
1293SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
1294                                             SelectionDAG &DAG) const {
1295  MachineFunction &MF = DAG.getMachineFunction();
1296  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1297  unsigned ARMPCLabelIndex = 0;
1298  DebugLoc DL = Op.getDebugLoc();
1299  EVT PtrVT = getPointerTy();
1300  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1301  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1302  SDValue CPAddr;
1303  if (RelocM == Reloc::Static) {
1304    CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
1305  } else {
1306    unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1307    ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1308    ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
1309                                                         ARMCP::CPBlockAddress,
1310                                                         PCAdj);
1311    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1312  }
1313  CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
1314  SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
1315                               PseudoSourceValue::getConstantPool(), 0,
1316                               false, false, 0);
1317  if (RelocM == Reloc::Static)
1318    return Result;
1319  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1320  return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
1321}
1322
1323// Lower ISD::GlobalTLSAddress using the "general dynamic" model
1324SDValue
1325ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1326                                                 SelectionDAG &DAG) const {
1327  DebugLoc dl = GA->getDebugLoc();
1328  EVT PtrVT = getPointerTy();
1329  unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1330  MachineFunction &MF = DAG.getMachineFunction();
1331  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1332  unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1333  ARMConstantPoolValue *CPV =
1334    new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1335                             ARMCP::CPValue, PCAdj, "tlsgd", true);
1336  SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1337  Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
1338  Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
1339                         PseudoSourceValue::getConstantPool(), 0,
1340                         false, false, 0);
1341  SDValue Chain = Argument.getValue(1);
1342
1343  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1344  Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
1345
1346  // call __tls_get_addr.
1347  ArgListTy Args;
1348  ArgListEntry Entry;
1349  Entry.Node = Argument;
1350  Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
1351  Args.push_back(Entry);
1352  // FIXME: is there useful debug info available here?
1353  std::pair<SDValue, SDValue> CallResult =
1354    LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
1355                false, false, false, false,
1356                0, CallingConv::C, false, /*isReturnValueUsed=*/true,
1357                DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
1358  return CallResult.first;
1359}
1360
1361// Lower ISD::GlobalTLSAddress using the "initial exec" or
1362// "local exec" model.
1363SDValue
1364ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
1365                                        SelectionDAG &DAG) const {
1366  const GlobalValue *GV = GA->getGlobal();
1367  DebugLoc dl = GA->getDebugLoc();
1368  SDValue Offset;
1369  SDValue Chain = DAG.getEntryNode();
1370  EVT PtrVT = getPointerTy();
1371  // Get the Thread Pointer
1372  SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
1373
1374  if (GV->isDeclaration()) {
1375    MachineFunction &MF = DAG.getMachineFunction();
1376    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1377    unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1378    // Initial exec model.
1379    unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1380    ARMConstantPoolValue *CPV =
1381      new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1382                               ARMCP::CPValue, PCAdj, "gottpoff", true);
1383    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1384    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1385    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1386                         PseudoSourceValue::getConstantPool(), 0,
1387                         false, false, 0);
1388    Chain = Offset.getValue(1);
1389
1390    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1391    Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
1392
1393    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1394                         PseudoSourceValue::getConstantPool(), 0,
1395                         false, false, 0);
1396  } else {
1397    // local exec model
1398    ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff");
1399    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1400    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1401    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1402                         PseudoSourceValue::getConstantPool(), 0,
1403                         false, false, 0);
1404  }
1405
1406  // The address of the thread local variable is the add of the thread
1407  // pointer with the offset of the variable.
1408  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
1409}
1410
1411SDValue
1412ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
1413  // TODO: implement the "local dynamic" model
1414  assert(Subtarget->isTargetELF() &&
1415         "TLS not implemented for non-ELF targets");
1416  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1417  // If the relocation model is PIC, use the "General Dynamic" TLS Model,
1418  // otherwise use the "Local Exec" TLS Model
1419  if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
1420    return LowerToTLSGeneralDynamicModel(GA, DAG);
1421  else
1422    return LowerToTLSExecModels(GA, DAG);
1423}
1424
1425SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
1426                                                 SelectionDAG &DAG) const {
1427  EVT PtrVT = getPointerTy();
1428  DebugLoc dl = Op.getDebugLoc();
1429  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1430  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1431  if (RelocM == Reloc::PIC_) {
1432    bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
1433    ARMConstantPoolValue *CPV =
1434      new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT");
1435    SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1436    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1437    SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
1438                                 CPAddr,
1439                                 PseudoSourceValue::getConstantPool(), 0,
1440                                 false, false, 0);
1441    SDValue Chain = Result.getValue(1);
1442    SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1443    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
1444    if (!UseGOTOFF)
1445      Result = DAG.getLoad(PtrVT, dl, Chain, Result,
1446                           PseudoSourceValue::getGOT(), 0,
1447                           false, false, 0);
1448    return Result;
1449  } else {
1450    // If we have T2 ops, we can materialize the address directly via movt/movw
1451    // pair. This is always cheaper.
1452    if (Subtarget->useMovt()) {
1453      return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
1454                         DAG.getTargetGlobalAddress(GV, PtrVT));
1455    } else {
1456      SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1457      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1458      return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1459                         PseudoSourceValue::getConstantPool(), 0,
1460                         false, false, 0);
1461    }
1462  }
1463}
1464
1465SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
1466                                                    SelectionDAG &DAG) const {
1467  MachineFunction &MF = DAG.getMachineFunction();
1468  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1469  unsigned ARMPCLabelIndex = 0;
1470  EVT PtrVT = getPointerTy();
1471  DebugLoc dl = Op.getDebugLoc();
1472  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1473  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1474  SDValue CPAddr;
1475  if (RelocM == Reloc::Static)
1476    CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1477  else {
1478    ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1479    unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
1480    ARMConstantPoolValue *CPV =
1481      new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
1482    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1483  }
1484  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1485
1486  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1487                               PseudoSourceValue::getConstantPool(), 0,
1488                               false, false, 0);
1489  SDValue Chain = Result.getValue(1);
1490
1491  if (RelocM == Reloc::PIC_) {
1492    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1493    Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1494  }
1495
1496  if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
1497    Result = DAG.getLoad(PtrVT, dl, Chain, Result,
1498                         PseudoSourceValue::getGOT(), 0,
1499                         false, false, 0);
1500
1501  return Result;
1502}
1503
1504SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
1505                                                    SelectionDAG &DAG) const {
1506  assert(Subtarget->isTargetELF() &&
1507         "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
1508  MachineFunction &MF = DAG.getMachineFunction();
1509  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1510  unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1511  EVT PtrVT = getPointerTy();
1512  DebugLoc dl = Op.getDebugLoc();
1513  unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1514  ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1515                                                       "_GLOBAL_OFFSET_TABLE_",
1516                                                       ARMPCLabelIndex, PCAdj);
1517  SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1518  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1519  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1520                               PseudoSourceValue::getConstantPool(), 0,
1521                               false, false, 0);
1522  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1523  return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1524}
1525
1526SDValue
1527ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
1528                                           const ARMSubtarget *Subtarget)
1529                                             const {
1530  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1531  DebugLoc dl = Op.getDebugLoc();
1532  switch (IntNo) {
1533  default: return SDValue();    // Don't custom lower most intrinsics.
1534  case Intrinsic::arm_thread_pointer: {
1535    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1536    return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
1537  }
1538  case Intrinsic::eh_sjlj_lsda: {
1539    MachineFunction &MF = DAG.getMachineFunction();
1540    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1541    unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1542    EVT PtrVT = getPointerTy();
1543    DebugLoc dl = Op.getDebugLoc();
1544    Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1545    SDValue CPAddr;
1546    unsigned PCAdj = (RelocM != Reloc::PIC_)
1547      ? 0 : (Subtarget->isThumb() ? 4 : 8);
1548    ARMConstantPoolValue *CPV =
1549      new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex,
1550                               ARMCP::CPLSDA, PCAdj);
1551    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1552    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1553    SDValue Result =
1554      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1555                  PseudoSourceValue::getConstantPool(), 0,
1556                  false, false, 0);
1557    SDValue Chain = Result.getValue(1);
1558
1559    if (RelocM == Reloc::PIC_) {
1560      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1561      Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1562    }
1563    return Result;
1564  }
1565  case Intrinsic::eh_sjlj_setjmp:
1566    SDValue Val = Subtarget->isThumb() ?
1567      DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::SP, MVT::i32) :
1568      DAG.getConstant(0, MVT::i32);
1569    return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1),
1570                       Val);
1571  }
1572}
1573
1574static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
1575                          const ARMSubtarget *Subtarget) {
1576  DebugLoc dl = Op.getDebugLoc();
1577  SDValue Op5 = Op.getOperand(5);
1578  SDValue Res;
1579  unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
1580  if (isDeviceBarrier) {
1581    if (Subtarget->hasV7Ops())
1582      Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0));
1583    else
1584      Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0),
1585                        DAG.getConstant(0, MVT::i32));
1586  } else {
1587    if (Subtarget->hasV7Ops())
1588      Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
1589    else
1590      Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
1591                        DAG.getConstant(0, MVT::i32));
1592  }
1593  return Res;
1594}
1595
1596static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
1597  MachineFunction &MF = DAG.getMachineFunction();
1598  ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
1599
1600  // vastart just stores the address of the VarArgsFrameIndex slot into the
1601  // memory location argument.
1602  DebugLoc dl = Op.getDebugLoc();
1603  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1604  SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1605  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1606  return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
1607                      false, false, 0);
1608}
1609
1610SDValue
1611ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1612                                           SelectionDAG &DAG) const {
1613  SDNode *Node = Op.getNode();
1614  DebugLoc dl = Node->getDebugLoc();
1615  EVT VT = Node->getValueType(0);
1616  SDValue Chain = Op.getOperand(0);
1617  SDValue Size  = Op.getOperand(1);
1618  SDValue Align = Op.getOperand(2);
1619
1620  // Chain the dynamic stack allocation so that it doesn't modify the stack
1621  // pointer when other instructions are using the stack.
1622  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
1623
1624  unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue();
1625  unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment();
1626  if (AlignVal > StackAlign)
1627    // Do this now since selection pass cannot introduce new target
1628    // independent node.
1629    Align = DAG.getConstant(-(uint64_t)AlignVal, VT);
1630
1631  // In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up
1632  // using a "add r, sp, r" instead. Negate the size now so we don't have to
1633  // do even more horrible hack later.
1634  MachineFunction &MF = DAG.getMachineFunction();
1635  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1636  if (AFI->isThumb1OnlyFunction()) {
1637    bool Negate = true;
1638    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size);
1639    if (C) {
1640      uint32_t Val = C->getZExtValue();
1641      if (Val <= 508 && ((Val & 3) == 0))
1642        Negate = false;
1643    }
1644    if (Negate)
1645      Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
1646  }
1647
1648  SDVTList VTList = DAG.getVTList(VT, MVT::Other);
1649  SDValue Ops1[] = { Chain, Size, Align };
1650  SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
1651  Chain = Res.getValue(1);
1652  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
1653                             DAG.getIntPtrConstant(0, true), SDValue());
1654  SDValue Ops2[] = { Res, Chain };
1655  return DAG.getMergeValues(Ops2, 2, dl);
1656}
1657
1658SDValue
1659ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
1660                                        SDValue &Root, SelectionDAG &DAG,
1661                                        DebugLoc dl) const {
1662  MachineFunction &MF = DAG.getMachineFunction();
1663  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1664
1665  TargetRegisterClass *RC;
1666  if (AFI->isThumb1OnlyFunction())
1667    RC = ARM::tGPRRegisterClass;
1668  else
1669    RC = ARM::GPRRegisterClass;
1670
1671  // Transform the arguments stored in physical registers into virtual ones.
1672  unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1673  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
1674
1675  SDValue ArgValue2;
1676  if (NextVA.isMemLoc()) {
1677    MachineFrameInfo *MFI = MF.getFrameInfo();
1678    int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true, false);
1679
1680    // Create load node to retrieve arguments from the stack.
1681    SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1682    ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
1683                            PseudoSourceValue::getFixedStack(FI), 0,
1684                            false, false, 0);
1685  } else {
1686    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
1687    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
1688  }
1689
1690  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
1691}
1692
1693SDValue
1694ARMTargetLowering::LowerFormalArguments(SDValue Chain,
1695                                        CallingConv::ID CallConv, bool isVarArg,
1696                                        const SmallVectorImpl<ISD::InputArg>
1697                                          &Ins,
1698                                        DebugLoc dl, SelectionDAG &DAG,
1699                                        SmallVectorImpl<SDValue> &InVals)
1700                                          const {
1701
1702  MachineFunction &MF = DAG.getMachineFunction();
1703  MachineFrameInfo *MFI = MF.getFrameInfo();
1704
1705  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1706
1707  // Assign locations to all of the incoming arguments.
1708  SmallVector<CCValAssign, 16> ArgLocs;
1709  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1710                 *DAG.getContext());
1711  CCInfo.AnalyzeFormalArguments(Ins,
1712                                CCAssignFnForNode(CallConv, /* Return*/ false,
1713                                                  isVarArg));
1714
1715  SmallVector<SDValue, 16> ArgValues;
1716
1717  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1718    CCValAssign &VA = ArgLocs[i];
1719
1720    // Arguments stored in registers.
1721    if (VA.isRegLoc()) {
1722      EVT RegVT = VA.getLocVT();
1723
1724      SDValue ArgValue;
1725      if (VA.needsCustom()) {
1726        // f64 and vector types are split up into multiple registers or
1727        // combinations of registers and stack slots.
1728        if (VA.getLocVT() == MVT::v2f64) {
1729          SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
1730                                                   Chain, DAG, dl);
1731          VA = ArgLocs[++i]; // skip ahead to next loc
1732          SDValue ArgValue2;
1733          if (VA.isMemLoc()) {
1734            int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(),
1735                                            true, false);
1736            SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1737            ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
1738                                    PseudoSourceValue::getFixedStack(FI), 0,
1739                                    false, false, 0);
1740          } else {
1741            ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
1742                                             Chain, DAG, dl);
1743          }
1744          ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1745          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
1746                                 ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
1747          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
1748                                 ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
1749        } else
1750          ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
1751
1752      } else {
1753        TargetRegisterClass *RC;
1754
1755        if (RegVT == MVT::f32)
1756          RC = ARM::SPRRegisterClass;
1757        else if (RegVT == MVT::f64)
1758          RC = ARM::DPRRegisterClass;
1759        else if (RegVT == MVT::v2f64)
1760          RC = ARM::QPRRegisterClass;
1761        else if (RegVT == MVT::i32)
1762          RC = (AFI->isThumb1OnlyFunction() ?
1763                ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
1764        else
1765          llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
1766
1767        // Transform the arguments in physical registers into virtual ones.
1768        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1769        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1770      }
1771
1772      // If this is an 8 or 16-bit value, it is really passed promoted
1773      // to 32 bits.  Insert an assert[sz]ext to capture this, then
1774      // truncate to the right size.
1775      switch (VA.getLocInfo()) {
1776      default: llvm_unreachable("Unknown loc info!");
1777      case CCValAssign::Full: break;
1778      case CCValAssign::BCvt:
1779        ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
1780        break;
1781      case CCValAssign::SExt:
1782        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1783                               DAG.getValueType(VA.getValVT()));
1784        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1785        break;
1786      case CCValAssign::ZExt:
1787        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1788                               DAG.getValueType(VA.getValVT()));
1789        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1790        break;
1791      }
1792
1793      InVals.push_back(ArgValue);
1794
1795    } else { // VA.isRegLoc()
1796
1797      // sanity check
1798      assert(VA.isMemLoc());
1799      assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
1800
1801      unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
1802      int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
1803                                      true, false);
1804
1805      // Create load nodes to retrieve arguments from the stack.
1806      SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1807      InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
1808                                   PseudoSourceValue::getFixedStack(FI), 0,
1809                                   false, false, 0));
1810    }
1811  }
1812
1813  // varargs
1814  if (isVarArg) {
1815    static const unsigned GPRArgRegs[] = {
1816      ARM::R0, ARM::R1, ARM::R2, ARM::R3
1817    };
1818
1819    unsigned NumGPRs = CCInfo.getFirstUnallocated
1820      (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
1821
1822    unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1823    unsigned VARegSize = (4 - NumGPRs) * 4;
1824    unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
1825    unsigned ArgOffset = CCInfo.getNextStackOffset();
1826    if (VARegSaveSize) {
1827      // If this function is vararg, store any remaining integer argument regs
1828      // to their spots on the stack so that they may be loaded by deferencing
1829      // the result of va_next.
1830      AFI->setVarArgsRegSaveSize(VARegSaveSize);
1831      AFI->setVarArgsFrameIndex(
1832        MFI->CreateFixedObject(VARegSaveSize,
1833                               ArgOffset + VARegSaveSize - VARegSize,
1834                               true, false));
1835      SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
1836                                      getPointerTy());
1837
1838      SmallVector<SDValue, 4> MemOps;
1839      for (; NumGPRs < 4; ++NumGPRs) {
1840        TargetRegisterClass *RC;
1841        if (AFI->isThumb1OnlyFunction())
1842          RC = ARM::tGPRRegisterClass;
1843        else
1844          RC = ARM::GPRRegisterClass;
1845
1846        unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
1847        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1848        SDValue Store =
1849          DAG.getStore(Val.getValue(1), dl, Val, FIN,
1850                       PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()), 0,
1851                       false, false, 0);
1852        MemOps.push_back(Store);
1853        FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
1854                          DAG.getConstant(4, getPointerTy()));
1855      }
1856      if (!MemOps.empty())
1857        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1858                            &MemOps[0], MemOps.size());
1859    } else
1860      // This will point to the next argument passed via stack.
1861      AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset,
1862                                                       true, false));
1863  }
1864
1865  return Chain;
1866}
1867
1868/// isFloatingPointZero - Return true if this is +0.0.
1869static bool isFloatingPointZero(SDValue Op) {
1870  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1871    return CFP->getValueAPF().isPosZero();
1872  else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1873    // Maybe this has already been legalized into the constant pool?
1874    if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
1875      SDValue WrapperOp = Op.getOperand(1).getOperand(0);
1876      if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
1877        if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1878          return CFP->getValueAPF().isPosZero();
1879    }
1880  }
1881  return false;
1882}
1883
1884/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
1885/// the given operands.
1886SDValue
1887ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
1888                             SDValue &ARMCC, SelectionDAG &DAG,
1889                             DebugLoc dl) const {
1890  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1891    unsigned C = RHSC->getZExtValue();
1892    if (!isLegalICmpImmediate(C)) {
1893      // Constant does not fit, try adjusting it by one?
1894      switch (CC) {
1895      default: break;
1896      case ISD::SETLT:
1897      case ISD::SETGE:
1898        if (isLegalICmpImmediate(C-1)) {
1899          CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1900          RHS = DAG.getConstant(C-1, MVT::i32);
1901        }
1902        break;
1903      case ISD::SETULT:
1904      case ISD::SETUGE:
1905        if (C > 0 && isLegalICmpImmediate(C-1)) {
1906          CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1907          RHS = DAG.getConstant(C-1, MVT::i32);
1908        }
1909        break;
1910      case ISD::SETLE:
1911      case ISD::SETGT:
1912        if (isLegalICmpImmediate(C+1)) {
1913          CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1914          RHS = DAG.getConstant(C+1, MVT::i32);
1915        }
1916        break;
1917      case ISD::SETULE:
1918      case ISD::SETUGT:
1919        if (C < 0xffffffff && isLegalICmpImmediate(C+1)) {
1920          CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1921          RHS = DAG.getConstant(C+1, MVT::i32);
1922        }
1923        break;
1924      }
1925    }
1926  }
1927
1928  ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
1929  ARMISD::NodeType CompareType;
1930  switch (CondCode) {
1931  default:
1932    CompareType = ARMISD::CMP;
1933    break;
1934  case ARMCC::EQ:
1935  case ARMCC::NE:
1936    // Uses only Z Flag
1937    CompareType = ARMISD::CMPZ;
1938    break;
1939  }
1940  ARMCC = DAG.getConstant(CondCode, MVT::i32);
1941  return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
1942}
1943
1944/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
1945static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
1946                         DebugLoc dl) {
1947  SDValue Cmp;
1948  if (!isFloatingPointZero(RHS))
1949    Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
1950  else
1951    Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
1952  return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
1953}
1954
1955SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
1956  EVT VT = Op.getValueType();
1957  SDValue LHS = Op.getOperand(0);
1958  SDValue RHS = Op.getOperand(1);
1959  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1960  SDValue TrueVal = Op.getOperand(2);
1961  SDValue FalseVal = Op.getOperand(3);
1962  DebugLoc dl = Op.getDebugLoc();
1963
1964  if (LHS.getValueType() == MVT::i32) {
1965    SDValue ARMCC;
1966    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1967    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
1968    return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
1969  }
1970
1971  ARMCC::CondCodes CondCode, CondCode2;
1972  FPCCToARMCC(CC, CondCode, CondCode2);
1973
1974  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
1975  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1976  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
1977  SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
1978                                 ARMCC, CCR, Cmp);
1979  if (CondCode2 != ARMCC::AL) {
1980    SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
1981    // FIXME: Needs another CMP because flag can have but one use.
1982    SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
1983    Result = DAG.getNode(ARMISD::CMOV, dl, VT,
1984                         Result, TrueVal, ARMCC2, CCR, Cmp2);
1985  }
1986  return Result;
1987}
1988
1989SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1990  SDValue  Chain = Op.getOperand(0);
1991  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1992  SDValue    LHS = Op.getOperand(2);
1993  SDValue    RHS = Op.getOperand(3);
1994  SDValue   Dest = Op.getOperand(4);
1995  DebugLoc dl = Op.getDebugLoc();
1996
1997  if (LHS.getValueType() == MVT::i32) {
1998    SDValue ARMCC;
1999    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2000    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
2001    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2002                       Chain, Dest, ARMCC, CCR,Cmp);
2003  }
2004
2005  assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
2006  ARMCC::CondCodes CondCode, CondCode2;
2007  FPCCToARMCC(CC, CondCode, CondCode2);
2008
2009  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2010  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
2011  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2012  SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
2013  SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
2014  SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2015  if (CondCode2 != ARMCC::AL) {
2016    ARMCC = DAG.getConstant(CondCode2, MVT::i32);
2017    SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
2018    Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2019  }
2020  return Res;
2021}
2022
2023SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
2024  SDValue Chain = Op.getOperand(0);
2025  SDValue Table = Op.getOperand(1);
2026  SDValue Index = Op.getOperand(2);
2027  DebugLoc dl = Op.getDebugLoc();
2028
2029  EVT PTy = getPointerTy();
2030  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
2031  ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2032  SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
2033  SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
2034  Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
2035  Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
2036  SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
2037  if (Subtarget->isThumb2()) {
2038    // Thumb2 uses a two-level jump. That is, it jumps into the jump table
2039    // which does another jump to the destination. This also makes it easier
2040    // to translate it to TBB / TBH later.
2041    // FIXME: This might not work if the function is extremely large.
2042    return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
2043                       Addr, Op.getOperand(2), JTI, UId);
2044  }
2045  if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
2046    Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
2047                       PseudoSourceValue::getJumpTable(), 0,
2048                       false, false, 0);
2049    Chain = Addr.getValue(1);
2050    Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
2051    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
2052  } else {
2053    Addr = DAG.getLoad(PTy, dl, Chain, Addr,
2054                       PseudoSourceValue::getJumpTable(), 0, false, false, 0);
2055    Chain = Addr.getValue(1);
2056    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
2057  }
2058}
2059
2060static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
2061  DebugLoc dl = Op.getDebugLoc();
2062  unsigned Opc;
2063
2064  switch (Op.getOpcode()) {
2065  default:
2066    assert(0 && "Invalid opcode!");
2067  case ISD::FP_TO_SINT:
2068    Opc = ARMISD::FTOSI;
2069    break;
2070  case ISD::FP_TO_UINT:
2071    Opc = ARMISD::FTOUI;
2072    break;
2073  }
2074  Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
2075  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
2076}
2077
2078static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
2079  EVT VT = Op.getValueType();
2080  DebugLoc dl = Op.getDebugLoc();
2081  unsigned Opc;
2082
2083  switch (Op.getOpcode()) {
2084  default:
2085    assert(0 && "Invalid opcode!");
2086  case ISD::SINT_TO_FP:
2087    Opc = ARMISD::SITOF;
2088    break;
2089  case ISD::UINT_TO_FP:
2090    Opc = ARMISD::UITOF;
2091    break;
2092  }
2093
2094  Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
2095  return DAG.getNode(Opc, dl, VT, Op);
2096}
2097
2098static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
2099  // Implement fcopysign with a fabs and a conditional fneg.
2100  SDValue Tmp0 = Op.getOperand(0);
2101  SDValue Tmp1 = Op.getOperand(1);
2102  DebugLoc dl = Op.getDebugLoc();
2103  EVT VT = Op.getValueType();
2104  EVT SrcVT = Tmp1.getValueType();
2105  SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
2106  SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
2107  SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
2108  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2109  return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
2110}
2111
2112SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2113  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2114  MFI->setFrameAddressIsTaken(true);
2115  EVT VT = Op.getValueType();
2116  DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
2117  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2118  unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
2119    ? ARM::R7 : ARM::R11;
2120  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2121  while (Depth--)
2122    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0,
2123                            false, false, 0);
2124  return FrameAddr;
2125}
2126
2127/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to
2128/// expand a bit convert where either the source or destination type is i64 to
2129/// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
2130/// operand type is illegal (e.g., v2f32 for a target that doesn't support
2131/// vectors), since the legalizer won't know what to do with that.
2132static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
2133  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2134  DebugLoc dl = N->getDebugLoc();
2135  SDValue Op = N->getOperand(0);
2136
2137  // This function is only supposed to be called for i64 types, either as the
2138  // source or destination of the bit convert.
2139  EVT SrcVT = Op.getValueType();
2140  EVT DstVT = N->getValueType(0);
2141  assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
2142         "ExpandBIT_CONVERT called for non-i64 type");
2143
2144  // Turn i64->f64 into VMOVDRR.
2145  if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
2146    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2147                             DAG.getConstant(0, MVT::i32));
2148    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2149                             DAG.getConstant(1, MVT::i32));
2150    return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2151  }
2152
2153  // Turn f64->i64 into VMOVRRD.
2154  if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
2155    SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
2156                              DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
2157    // Merge the pieces into a single i64 value.
2158    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
2159  }
2160
2161  return SDValue();
2162}
2163
2164/// getZeroVector - Returns a vector of specified type with all zero elements.
2165///
2166static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2167  assert(VT.isVector() && "Expected a vector type");
2168
2169  // Zero vectors are used to represent vector negation and in those cases
2170  // will be implemented with the NEON VNEG instruction.  However, VNEG does
2171  // not support i64 elements, so sometimes the zero vectors will need to be
2172  // explicitly constructed.  For those cases, and potentially other uses in
2173  // the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted
2174  // to their dest type.  This ensures they get CSE'd.
2175  SDValue Vec;
2176  SDValue Cst = DAG.getTargetConstant(0, MVT::i8);
2177  SmallVector<SDValue, 8> Ops;
2178  MVT TVT;
2179
2180  if (VT.getSizeInBits() == 64) {
2181    Ops.assign(8, Cst); TVT = MVT::v8i8;
2182  } else {
2183    Ops.assign(16, Cst); TVT = MVT::v16i8;
2184  }
2185  Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
2186
2187  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
2188}
2189
2190/// getOnesVector - Returns a vector of specified type with all bits set.
2191///
2192static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2193  assert(VT.isVector() && "Expected a vector type");
2194
2195  // Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their
2196  // dest type. This ensures they get CSE'd.
2197  SDValue Vec;
2198  SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8);
2199  SmallVector<SDValue, 8> Ops;
2200  MVT TVT;
2201
2202  if (VT.getSizeInBits() == 64) {
2203    Ops.assign(8, Cst); TVT = MVT::v8i8;
2204  } else {
2205    Ops.assign(16, Cst); TVT = MVT::v16i8;
2206  }
2207  Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
2208
2209  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
2210}
2211
2212/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
2213/// i32 values and take a 2 x i32 value to shift plus a shift amount.
2214SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
2215                                                SelectionDAG &DAG) const {
2216  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2217  EVT VT = Op.getValueType();
2218  unsigned VTBits = VT.getSizeInBits();
2219  DebugLoc dl = Op.getDebugLoc();
2220  SDValue ShOpLo = Op.getOperand(0);
2221  SDValue ShOpHi = Op.getOperand(1);
2222  SDValue ShAmt  = Op.getOperand(2);
2223  SDValue ARMCC;
2224  unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2225
2226  assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2227
2228  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2229                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2230  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2231  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2232                                   DAG.getConstant(VTBits, MVT::i32));
2233  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2234  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2235  SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2236
2237  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2238  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2239                          ARMCC, DAG, dl);
2240  SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2241  SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC,
2242                           CCR, Cmp);
2243
2244  SDValue Ops[2] = { Lo, Hi };
2245  return DAG.getMergeValues(Ops, 2, dl);
2246}
2247
2248/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
2249/// i32 values and take a 2 x i32 value to shift plus a shift amount.
2250SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
2251                                               SelectionDAG &DAG) const {
2252  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2253  EVT VT = Op.getValueType();
2254  unsigned VTBits = VT.getSizeInBits();
2255  DebugLoc dl = Op.getDebugLoc();
2256  SDValue ShOpLo = Op.getOperand(0);
2257  SDValue ShOpHi = Op.getOperand(1);
2258  SDValue ShAmt  = Op.getOperand(2);
2259  SDValue ARMCC;
2260
2261  assert(Op.getOpcode() == ISD::SHL_PARTS);
2262  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2263                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2264  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2265  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2266                                   DAG.getConstant(VTBits, MVT::i32));
2267  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2268  SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2269
2270  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2271  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2272  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2273                          ARMCC, DAG, dl);
2274  SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2275  SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC,
2276                           CCR, Cmp);
2277
2278  SDValue Ops[2] = { Lo, Hi };
2279  return DAG.getMergeValues(Ops, 2, dl);
2280}
2281
2282static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
2283                         const ARMSubtarget *ST) {
2284  EVT VT = N->getValueType(0);
2285  DebugLoc dl = N->getDebugLoc();
2286
2287  if (!ST->hasV6T2Ops())
2288    return SDValue();
2289
2290  SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
2291  return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
2292}
2293
2294static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
2295                          const ARMSubtarget *ST) {
2296  EVT VT = N->getValueType(0);
2297  DebugLoc dl = N->getDebugLoc();
2298
2299  // Lower vector shifts on NEON to use VSHL.
2300  if (VT.isVector()) {
2301    assert(ST->hasNEON() && "unexpected vector shift");
2302
2303    // Left shifts translate directly to the vshiftu intrinsic.
2304    if (N->getOpcode() == ISD::SHL)
2305      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
2306                         DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
2307                         N->getOperand(0), N->getOperand(1));
2308
2309    assert((N->getOpcode() == ISD::SRA ||
2310            N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
2311
2312    // NEON uses the same intrinsics for both left and right shifts.  For
2313    // right shifts, the shift amounts are negative, so negate the vector of
2314    // shift amounts.
2315    EVT ShiftVT = N->getOperand(1).getValueType();
2316    SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
2317                                       getZeroVector(ShiftVT, DAG, dl),
2318                                       N->getOperand(1));
2319    Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
2320                               Intrinsic::arm_neon_vshifts :
2321                               Intrinsic::arm_neon_vshiftu);
2322    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
2323                       DAG.getConstant(vshiftInt, MVT::i32),
2324                       N->getOperand(0), NegatedCount);
2325  }
2326
2327  // We can get here for a node like i32 = ISD::SHL i32, i64
2328  if (VT != MVT::i64)
2329    return SDValue();
2330
2331  assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
2332         "Unknown shift to lower!");
2333
2334  // We only lower SRA, SRL of 1 here, all others use generic lowering.
2335  if (!isa<ConstantSDNode>(N->getOperand(1)) ||
2336      cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
2337    return SDValue();
2338
2339  // If we are in thumb mode, we don't have RRX.
2340  if (ST->isThumb1Only()) return SDValue();
2341
2342  // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
2343  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
2344                             DAG.getConstant(0, MVT::i32));
2345  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
2346                             DAG.getConstant(1, MVT::i32));
2347
2348  // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
2349  // captures the result into a carry flag.
2350  unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
2351  Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
2352
2353  // The low part is an ARMISD::RRX operand, which shifts the carry in.
2354  Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
2355
2356  // Merge the pieces into a single i64 value.
2357 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
2358}
2359
2360static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
2361  SDValue TmpOp0, TmpOp1;
2362  bool Invert = false;
2363  bool Swap = false;
2364  unsigned Opc = 0;
2365
2366  SDValue Op0 = Op.getOperand(0);
2367  SDValue Op1 = Op.getOperand(1);
2368  SDValue CC = Op.getOperand(2);
2369  EVT VT = Op.getValueType();
2370  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
2371  DebugLoc dl = Op.getDebugLoc();
2372
2373  if (Op.getOperand(1).getValueType().isFloatingPoint()) {
2374    switch (SetCCOpcode) {
2375    default: llvm_unreachable("Illegal FP comparison"); break;
2376    case ISD::SETUNE:
2377    case ISD::SETNE:  Invert = true; // Fallthrough
2378    case ISD::SETOEQ:
2379    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
2380    case ISD::SETOLT:
2381    case ISD::SETLT: Swap = true; // Fallthrough
2382    case ISD::SETOGT:
2383    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
2384    case ISD::SETOLE:
2385    case ISD::SETLE:  Swap = true; // Fallthrough
2386    case ISD::SETOGE:
2387    case ISD::SETGE: Opc = ARMISD::VCGE; break;
2388    case ISD::SETUGE: Swap = true; // Fallthrough
2389    case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
2390    case ISD::SETUGT: Swap = true; // Fallthrough
2391    case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
2392    case ISD::SETUEQ: Invert = true; // Fallthrough
2393    case ISD::SETONE:
2394      // Expand this to (OLT | OGT).
2395      TmpOp0 = Op0;
2396      TmpOp1 = Op1;
2397      Opc = ISD::OR;
2398      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
2399      Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
2400      break;
2401    case ISD::SETUO: Invert = true; // Fallthrough
2402    case ISD::SETO:
2403      // Expand this to (OLT | OGE).
2404      TmpOp0 = Op0;
2405      TmpOp1 = Op1;
2406      Opc = ISD::OR;
2407      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
2408      Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
2409      break;
2410    }
2411  } else {
2412    // Integer comparisons.
2413    switch (SetCCOpcode) {
2414    default: llvm_unreachable("Illegal integer comparison"); break;
2415    case ISD::SETNE:  Invert = true;
2416    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
2417    case ISD::SETLT:  Swap = true;
2418    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
2419    case ISD::SETLE:  Swap = true;
2420    case ISD::SETGE:  Opc = ARMISD::VCGE; break;
2421    case ISD::SETULT: Swap = true;
2422    case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
2423    case ISD::SETULE: Swap = true;
2424    case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
2425    }
2426
2427    // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
2428    if (Opc == ARMISD::VCEQ) {
2429
2430      SDValue AndOp;
2431      if (ISD::isBuildVectorAllZeros(Op1.getNode()))
2432        AndOp = Op0;
2433      else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
2434        AndOp = Op1;
2435
2436      // Ignore bitconvert.
2437      if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
2438        AndOp = AndOp.getOperand(0);
2439
2440      if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
2441        Opc = ARMISD::VTST;
2442        Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
2443        Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
2444        Invert = !Invert;
2445      }
2446    }
2447  }
2448
2449  if (Swap)
2450    std::swap(Op0, Op1);
2451
2452  SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
2453
2454  if (Invert)
2455    Result = DAG.getNOT(dl, Result, VT);
2456
2457  return Result;
2458}
2459
2460/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
2461/// VMOV instruction, and if so, return the constant being splatted.
2462static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
2463                           unsigned SplatBitSize, SelectionDAG &DAG) {
2464  switch (SplatBitSize) {
2465  case 8:
2466    // Any 1-byte value is OK.
2467    assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
2468    return DAG.getTargetConstant(SplatBits, MVT::i8);
2469
2470  case 16:
2471    // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
2472    if ((SplatBits & ~0xff) == 0 ||
2473        (SplatBits & ~0xff00) == 0)
2474      return DAG.getTargetConstant(SplatBits, MVT::i16);
2475    break;
2476
2477  case 32:
2478    // NEON's 32-bit VMOV supports splat values where:
2479    // * only one byte is nonzero, or
2480    // * the least significant byte is 0xff and the second byte is nonzero, or
2481    // * the least significant 2 bytes are 0xff and the third is nonzero.
2482    if ((SplatBits & ~0xff) == 0 ||
2483        (SplatBits & ~0xff00) == 0 ||
2484        (SplatBits & ~0xff0000) == 0 ||
2485        (SplatBits & ~0xff000000) == 0)
2486      return DAG.getTargetConstant(SplatBits, MVT::i32);
2487
2488    if ((SplatBits & ~0xffff) == 0 &&
2489        ((SplatBits | SplatUndef) & 0xff) == 0xff)
2490      return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
2491
2492    if ((SplatBits & ~0xffffff) == 0 &&
2493        ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
2494      return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
2495
2496    // Note: there are a few 32-bit splat values (specifically: 00ffff00,
2497    // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
2498    // VMOV.I32.  A (very) minor optimization would be to replicate the value
2499    // and fall through here to test for a valid 64-bit splat.  But, then the
2500    // caller would also need to check and handle the change in size.
2501    break;
2502
2503  case 64: {
2504    // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
2505    uint64_t BitMask = 0xff;
2506    uint64_t Val = 0;
2507    for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
2508      if (((SplatBits | SplatUndef) & BitMask) == BitMask)
2509        Val |= BitMask;
2510      else if ((SplatBits & BitMask) != 0)
2511        return SDValue();
2512      BitMask <<= 8;
2513    }
2514    return DAG.getTargetConstant(Val, MVT::i64);
2515  }
2516
2517  default:
2518    llvm_unreachable("unexpected size for isVMOVSplat");
2519    break;
2520  }
2521
2522  return SDValue();
2523}
2524
2525/// getVMOVImm - If this is a build_vector of constants which can be
2526/// formed by using a VMOV instruction of the specified element size,
2527/// return the constant being splatted.  The ByteSize field indicates the
2528/// number of bytes of each element [1248].
2529SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2530  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
2531  APInt SplatBits, SplatUndef;
2532  unsigned SplatBitSize;
2533  bool HasAnyUndefs;
2534  if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
2535                                      HasAnyUndefs, ByteSize * 8))
2536    return SDValue();
2537
2538  if (SplatBitSize > ByteSize * 8)
2539    return SDValue();
2540
2541  return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
2542                     SplatBitSize, DAG);
2543}
2544
2545static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
2546                       bool &ReverseVEXT, unsigned &Imm) {
2547  unsigned NumElts = VT.getVectorNumElements();
2548  ReverseVEXT = false;
2549  Imm = M[0];
2550
2551  // If this is a VEXT shuffle, the immediate value is the index of the first
2552  // element.  The other shuffle indices must be the successive elements after
2553  // the first one.
2554  unsigned ExpectedElt = Imm;
2555  for (unsigned i = 1; i < NumElts; ++i) {
2556    // Increment the expected index.  If it wraps around, it may still be
2557    // a VEXT but the source vectors must be swapped.
2558    ExpectedElt += 1;
2559    if (ExpectedElt == NumElts * 2) {
2560      ExpectedElt = 0;
2561      ReverseVEXT = true;
2562    }
2563
2564    if (ExpectedElt != static_cast<unsigned>(M[i]))
2565      return false;
2566  }
2567
2568  // Adjust the index value if the source operands will be swapped.
2569  if (ReverseVEXT)
2570    Imm -= NumElts;
2571
2572  return true;
2573}
2574
2575/// isVREVMask - Check if a vector shuffle corresponds to a VREV
2576/// instruction with the specified blocksize.  (The order of the elements
2577/// within each block of the vector is reversed.)
2578static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
2579                       unsigned BlockSize) {
2580  assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
2581         "Only possible block sizes for VREV are: 16, 32, 64");
2582
2583  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2584  if (EltSz == 64)
2585    return false;
2586
2587  unsigned NumElts = VT.getVectorNumElements();
2588  unsigned BlockElts = M[0] + 1;
2589
2590  if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
2591    return false;
2592
2593  for (unsigned i = 0; i < NumElts; ++i) {
2594    if ((unsigned) M[i] !=
2595        (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
2596      return false;
2597  }
2598
2599  return true;
2600}
2601
2602static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
2603                       unsigned &WhichResult) {
2604  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2605  if (EltSz == 64)
2606    return false;
2607
2608  unsigned NumElts = VT.getVectorNumElements();
2609  WhichResult = (M[0] == 0 ? 0 : 1);
2610  for (unsigned i = 0; i < NumElts; i += 2) {
2611    if ((unsigned) M[i] != i + WhichResult ||
2612        (unsigned) M[i+1] != i + NumElts + WhichResult)
2613      return false;
2614  }
2615  return true;
2616}
2617
2618/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
2619/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2620/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
2621static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2622                                unsigned &WhichResult) {
2623  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2624  if (EltSz == 64)
2625    return false;
2626
2627  unsigned NumElts = VT.getVectorNumElements();
2628  WhichResult = (M[0] == 0 ? 0 : 1);
2629  for (unsigned i = 0; i < NumElts; i += 2) {
2630    if ((unsigned) M[i] != i + WhichResult ||
2631        (unsigned) M[i+1] != i + WhichResult)
2632      return false;
2633  }
2634  return true;
2635}
2636
2637static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
2638                       unsigned &WhichResult) {
2639  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2640  if (EltSz == 64)
2641    return false;
2642
2643  unsigned NumElts = VT.getVectorNumElements();
2644  WhichResult = (M[0] == 0 ? 0 : 1);
2645  for (unsigned i = 0; i != NumElts; ++i) {
2646    if ((unsigned) M[i] != 2 * i + WhichResult)
2647      return false;
2648  }
2649
2650  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2651  if (VT.is64BitVector() && EltSz == 32)
2652    return false;
2653
2654  return true;
2655}
2656
2657/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
2658/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2659/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
2660static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2661                                unsigned &WhichResult) {
2662  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2663  if (EltSz == 64)
2664    return false;
2665
2666  unsigned Half = VT.getVectorNumElements() / 2;
2667  WhichResult = (M[0] == 0 ? 0 : 1);
2668  for (unsigned j = 0; j != 2; ++j) {
2669    unsigned Idx = WhichResult;
2670    for (unsigned i = 0; i != Half; ++i) {
2671      if ((unsigned) M[i + j * Half] != Idx)
2672        return false;
2673      Idx += 2;
2674    }
2675  }
2676
2677  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2678  if (VT.is64BitVector() && EltSz == 32)
2679    return false;
2680
2681  return true;
2682}
2683
2684static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
2685                       unsigned &WhichResult) {
2686  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2687  if (EltSz == 64)
2688    return false;
2689
2690  unsigned NumElts = VT.getVectorNumElements();
2691  WhichResult = (M[0] == 0 ? 0 : 1);
2692  unsigned Idx = WhichResult * NumElts / 2;
2693  for (unsigned i = 0; i != NumElts; i += 2) {
2694    if ((unsigned) M[i] != Idx ||
2695        (unsigned) M[i+1] != Idx + NumElts)
2696      return false;
2697    Idx += 1;
2698  }
2699
2700  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2701  if (VT.is64BitVector() && EltSz == 32)
2702    return false;
2703
2704  return true;
2705}
2706
2707/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
2708/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2709/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
2710static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2711                                unsigned &WhichResult) {
2712  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2713  if (EltSz == 64)
2714    return false;
2715
2716  unsigned NumElts = VT.getVectorNumElements();
2717  WhichResult = (M[0] == 0 ? 0 : 1);
2718  unsigned Idx = WhichResult * NumElts / 2;
2719  for (unsigned i = 0; i != NumElts; i += 2) {
2720    if ((unsigned) M[i] != Idx ||
2721        (unsigned) M[i+1] != Idx)
2722      return false;
2723    Idx += 1;
2724  }
2725
2726  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2727  if (VT.is64BitVector() && EltSz == 32)
2728    return false;
2729
2730  return true;
2731}
2732
2733
2734static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2735  // Canonicalize all-zeros and all-ones vectors.
2736  ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
2737  if (ConstVal->isNullValue())
2738    return getZeroVector(VT, DAG, dl);
2739  if (ConstVal->isAllOnesValue())
2740    return getOnesVector(VT, DAG, dl);
2741
2742  EVT CanonicalVT;
2743  if (VT.is64BitVector()) {
2744    switch (Val.getValueType().getSizeInBits()) {
2745    case 8:  CanonicalVT = MVT::v8i8; break;
2746    case 16: CanonicalVT = MVT::v4i16; break;
2747    case 32: CanonicalVT = MVT::v2i32; break;
2748    case 64: CanonicalVT = MVT::v1i64; break;
2749    default: llvm_unreachable("unexpected splat element type"); break;
2750    }
2751  } else {
2752    assert(VT.is128BitVector() && "unknown splat vector size");
2753    switch (Val.getValueType().getSizeInBits()) {
2754    case 8:  CanonicalVT = MVT::v16i8; break;
2755    case 16: CanonicalVT = MVT::v8i16; break;
2756    case 32: CanonicalVT = MVT::v4i32; break;
2757    case 64: CanonicalVT = MVT::v2i64; break;
2758    default: llvm_unreachable("unexpected splat element type"); break;
2759    }
2760  }
2761
2762  // Build a canonical splat for this value.
2763  SmallVector<SDValue, 8> Ops;
2764  Ops.assign(CanonicalVT.getVectorNumElements(), Val);
2765  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
2766                            Ops.size());
2767  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
2768}
2769
2770// If this is a case we can't handle, return null and let the default
2771// expansion code take care of it.
2772static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
2773  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
2774  DebugLoc dl = Op.getDebugLoc();
2775  EVT VT = Op.getValueType();
2776
2777  APInt SplatBits, SplatUndef;
2778  unsigned SplatBitSize;
2779  bool HasAnyUndefs;
2780  if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
2781    if (SplatBitSize <= 64) {
2782      SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
2783                                SplatUndef.getZExtValue(), SplatBitSize, DAG);
2784      if (Val.getNode())
2785        return BuildSplat(Val, VT, DAG, dl);
2786    }
2787  }
2788
2789  // If there are only 2 elements in a 128-bit vector, insert them into an
2790  // undef vector.  This handles the common case for 128-bit vector argument
2791  // passing, where the insertions should be translated to subreg accesses
2792  // with no real instructions.
2793  if (VT.is128BitVector() && Op.getNumOperands() == 2) {
2794    SDValue Val = DAG.getUNDEF(VT);
2795    SDValue Op0 = Op.getOperand(0);
2796    SDValue Op1 = Op.getOperand(1);
2797    if (Op0.getOpcode() != ISD::UNDEF)
2798      Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0,
2799                        DAG.getIntPtrConstant(0));
2800    if (Op1.getOpcode() != ISD::UNDEF)
2801      Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1,
2802                        DAG.getIntPtrConstant(1));
2803    return Val;
2804  }
2805
2806  return SDValue();
2807}
2808
2809/// isShuffleMaskLegal - Targets can use this to indicate that they only
2810/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
2811/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
2812/// are assumed to be legal.
2813bool
2814ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
2815                                      EVT VT) const {
2816  if (VT.getVectorNumElements() == 4 &&
2817      (VT.is128BitVector() || VT.is64BitVector())) {
2818    unsigned PFIndexes[4];
2819    for (unsigned i = 0; i != 4; ++i) {
2820      if (M[i] < 0)
2821        PFIndexes[i] = 8;
2822      else
2823        PFIndexes[i] = M[i];
2824    }
2825
2826    // Compute the index in the perfect shuffle table.
2827    unsigned PFTableIndex =
2828      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
2829    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
2830    unsigned Cost = (PFEntry >> 30);
2831
2832    if (Cost <= 4)
2833      return true;
2834  }
2835
2836  bool ReverseVEXT;
2837  unsigned Imm, WhichResult;
2838
2839  return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
2840          isVREVMask(M, VT, 64) ||
2841          isVREVMask(M, VT, 32) ||
2842          isVREVMask(M, VT, 16) ||
2843          isVEXTMask(M, VT, ReverseVEXT, Imm) ||
2844          isVTRNMask(M, VT, WhichResult) ||
2845          isVUZPMask(M, VT, WhichResult) ||
2846          isVZIPMask(M, VT, WhichResult) ||
2847          isVTRN_v_undef_Mask(M, VT, WhichResult) ||
2848          isVUZP_v_undef_Mask(M, VT, WhichResult) ||
2849          isVZIP_v_undef_Mask(M, VT, WhichResult));
2850}
2851
2852/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
2853/// the specified operations to build the shuffle.
2854static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
2855                                      SDValue RHS, SelectionDAG &DAG,
2856                                      DebugLoc dl) {
2857  unsigned OpNum = (PFEntry >> 26) & 0x0F;
2858  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
2859  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
2860
2861  enum {
2862    OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
2863    OP_VREV,
2864    OP_VDUP0,
2865    OP_VDUP1,
2866    OP_VDUP2,
2867    OP_VDUP3,
2868    OP_VEXT1,
2869    OP_VEXT2,
2870    OP_VEXT3,
2871    OP_VUZPL, // VUZP, left result
2872    OP_VUZPR, // VUZP, right result
2873    OP_VZIPL, // VZIP, left result
2874    OP_VZIPR, // VZIP, right result
2875    OP_VTRNL, // VTRN, left result
2876    OP_VTRNR  // VTRN, right result
2877  };
2878
2879  if (OpNum == OP_COPY) {
2880    if (LHSID == (1*9+2)*9+3) return LHS;
2881    assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
2882    return RHS;
2883  }
2884
2885  SDValue OpLHS, OpRHS;
2886  OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
2887  OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
2888  EVT VT = OpLHS.getValueType();
2889
2890  switch (OpNum) {
2891  default: llvm_unreachable("Unknown shuffle opcode!");
2892  case OP_VREV:
2893    return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
2894  case OP_VDUP0:
2895  case OP_VDUP1:
2896  case OP_VDUP2:
2897  case OP_VDUP3:
2898    return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
2899                       OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
2900  case OP_VEXT1:
2901  case OP_VEXT2:
2902  case OP_VEXT3:
2903    return DAG.getNode(ARMISD::VEXT, dl, VT,
2904                       OpLHS, OpRHS,
2905                       DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
2906  case OP_VUZPL:
2907  case OP_VUZPR:
2908    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2909                       OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
2910  case OP_VZIPL:
2911  case OP_VZIPR:
2912    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2913                       OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
2914  case OP_VTRNL:
2915  case OP_VTRNR:
2916    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2917                       OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
2918  }
2919}
2920
2921static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
2922  SDValue V1 = Op.getOperand(0);
2923  SDValue V2 = Op.getOperand(1);
2924  DebugLoc dl = Op.getDebugLoc();
2925  EVT VT = Op.getValueType();
2926  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2927  SmallVector<int, 8> ShuffleMask;
2928
2929  // Convert shuffles that are directly supported on NEON to target-specific
2930  // DAG nodes, instead of keeping them as shuffles and matching them again
2931  // during code selection.  This is more efficient and avoids the possibility
2932  // of inconsistencies between legalization and selection.
2933  // FIXME: floating-point vectors should be canonicalized to integer vectors
2934  // of the same time so that they get CSEd properly.
2935  SVN->getMask(ShuffleMask);
2936
2937  if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
2938    int Lane = SVN->getSplatIndex();
2939    // If this is undef splat, generate it via "just" vdup, if possible.
2940    if (Lane == -1) Lane = 0;
2941
2942    if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
2943      return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
2944    }
2945    return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
2946                       DAG.getConstant(Lane, MVT::i32));
2947  }
2948
2949  bool ReverseVEXT;
2950  unsigned Imm;
2951  if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
2952    if (ReverseVEXT)
2953      std::swap(V1, V2);
2954    return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
2955                       DAG.getConstant(Imm, MVT::i32));
2956  }
2957
2958  if (isVREVMask(ShuffleMask, VT, 64))
2959    return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
2960  if (isVREVMask(ShuffleMask, VT, 32))
2961    return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
2962  if (isVREVMask(ShuffleMask, VT, 16))
2963    return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
2964
2965  // Check for Neon shuffles that modify both input vectors in place.
2966  // If both results are used, i.e., if there are two shuffles with the same
2967  // source operands and with masks corresponding to both results of one of
2968  // these operations, DAG memoization will ensure that a single node is
2969  // used for both shuffles.
2970  unsigned WhichResult;
2971  if (isVTRNMask(ShuffleMask, VT, WhichResult))
2972    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2973                       V1, V2).getValue(WhichResult);
2974  if (isVUZPMask(ShuffleMask, VT, WhichResult))
2975    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2976                       V1, V2).getValue(WhichResult);
2977  if (isVZIPMask(ShuffleMask, VT, WhichResult))
2978    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2979                       V1, V2).getValue(WhichResult);
2980
2981  if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
2982    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2983                       V1, V1).getValue(WhichResult);
2984  if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
2985    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2986                       V1, V1).getValue(WhichResult);
2987  if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
2988    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2989                       V1, V1).getValue(WhichResult);
2990
2991  // If the shuffle is not directly supported and it has 4 elements, use
2992  // the PerfectShuffle-generated table to synthesize it from other shuffles.
2993  if (VT.getVectorNumElements() == 4 &&
2994      (VT.is128BitVector() || VT.is64BitVector())) {
2995    unsigned PFIndexes[4];
2996    for (unsigned i = 0; i != 4; ++i) {
2997      if (ShuffleMask[i] < 0)
2998        PFIndexes[i] = 8;
2999      else
3000        PFIndexes[i] = ShuffleMask[i];
3001    }
3002
3003    // Compute the index in the perfect shuffle table.
3004    unsigned PFTableIndex =
3005      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3006
3007    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3008    unsigned Cost = (PFEntry >> 30);
3009
3010    if (Cost <= 4)
3011      return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
3012  }
3013
3014  return SDValue();
3015}
3016
3017static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
3018  EVT VT = Op.getValueType();
3019  DebugLoc dl = Op.getDebugLoc();
3020  SDValue Vec = Op.getOperand(0);
3021  SDValue Lane = Op.getOperand(1);
3022  assert(VT == MVT::i32 &&
3023         Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
3024         "unexpected type for custom-lowering vector extract");
3025  return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
3026}
3027
3028static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
3029  // The only time a CONCAT_VECTORS operation can have legal types is when
3030  // two 64-bit vectors are concatenated to a 128-bit vector.
3031  assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
3032         "unexpected CONCAT_VECTORS");
3033  DebugLoc dl = Op.getDebugLoc();
3034  SDValue Val = DAG.getUNDEF(MVT::v2f64);
3035  SDValue Op0 = Op.getOperand(0);
3036  SDValue Op1 = Op.getOperand(1);
3037  if (Op0.getOpcode() != ISD::UNDEF)
3038    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
3039                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
3040                      DAG.getIntPtrConstant(0));
3041  if (Op1.getOpcode() != ISD::UNDEF)
3042    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
3043                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
3044                      DAG.getIntPtrConstant(1));
3045  return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
3046}
3047
3048SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3049  switch (Op.getOpcode()) {
3050  default: llvm_unreachable("Don't know how to custom lower this!");
3051  case ISD::ConstantPool:  return LowerConstantPool(Op, DAG);
3052  case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
3053  case ISD::GlobalAddress:
3054    return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
3055      LowerGlobalAddressELF(Op, DAG);
3056  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
3057  case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
3058  case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
3059  case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
3060  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3061  case ISD::VASTART:       return LowerVASTART(Op, DAG);
3062  case ISD::MEMBARRIER:    return LowerMEMBARRIER(Op, DAG, Subtarget);
3063  case ISD::SINT_TO_FP:
3064  case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
3065  case ISD::FP_TO_SINT:
3066  case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
3067  case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
3068  case ISD::RETURNADDR:    break;
3069  case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
3070  case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3071  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
3072                                                               Subtarget);
3073  case ISD::BIT_CONVERT:   return ExpandBIT_CONVERT(Op.getNode(), DAG);
3074  case ISD::SHL:
3075  case ISD::SRL:
3076  case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
3077  case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
3078  case ISD::SRL_PARTS:
3079  case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
3080  case ISD::CTTZ:          return LowerCTTZ(Op.getNode(), DAG, Subtarget);
3081  case ISD::VSETCC:        return LowerVSETCC(Op, DAG);
3082  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG);
3083  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3084  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3085  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3086  }
3087  return SDValue();
3088}
3089
3090/// ReplaceNodeResults - Replace the results of node with an illegal result
3091/// type with new values built out of custom code.
3092void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
3093                                           SmallVectorImpl<SDValue>&Results,
3094                                           SelectionDAG &DAG) const {
3095  SDValue Res;
3096  switch (N->getOpcode()) {
3097  default:
3098    llvm_unreachable("Don't know how to custom expand this!");
3099    break;
3100  case ISD::BIT_CONVERT:
3101    Res = ExpandBIT_CONVERT(N, DAG);
3102    break;
3103  case ISD::SRL:
3104  case ISD::SRA:
3105    Res = LowerShift(N, DAG, Subtarget);
3106    break;
3107  }
3108  if (Res.getNode())
3109    Results.push_back(Res);
3110}
3111
3112//===----------------------------------------------------------------------===//
3113//                           ARM Scheduler Hooks
3114//===----------------------------------------------------------------------===//
3115
3116MachineBasicBlock *
3117ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
3118                                     MachineBasicBlock *BB,
3119                                     unsigned Size) const {
3120  unsigned dest    = MI->getOperand(0).getReg();
3121  unsigned ptr     = MI->getOperand(1).getReg();
3122  unsigned oldval  = MI->getOperand(2).getReg();
3123  unsigned newval  = MI->getOperand(3).getReg();
3124  unsigned scratch = BB->getParent()->getRegInfo()
3125    .createVirtualRegister(ARM::GPRRegisterClass);
3126  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3127  DebugLoc dl = MI->getDebugLoc();
3128  bool isThumb2 = Subtarget->isThumb2();
3129
3130  unsigned ldrOpc, strOpc;
3131  switch (Size) {
3132  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
3133  case 1:
3134    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
3135    strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB;
3136    break;
3137  case 2:
3138    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
3139    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
3140    break;
3141  case 4:
3142    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
3143    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
3144    break;
3145  }
3146
3147  MachineFunction *MF = BB->getParent();
3148  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3149  MachineFunction::iterator It = BB;
3150  ++It; // insert the new blocks after the current block
3151
3152  MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
3153  MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
3154  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3155  MF->insert(It, loop1MBB);
3156  MF->insert(It, loop2MBB);
3157  MF->insert(It, exitMBB);
3158  exitMBB->transferSuccessors(BB);
3159
3160  //  thisMBB:
3161  //   ...
3162  //   fallthrough --> loop1MBB
3163  BB->addSuccessor(loop1MBB);
3164
3165  // loop1MBB:
3166  //   ldrex dest, [ptr]
3167  //   cmp dest, oldval
3168  //   bne exitMBB
3169  BB = loop1MBB;
3170  AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
3171  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
3172                 .addReg(dest).addReg(oldval));
3173  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3174    .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3175  BB->addSuccessor(loop2MBB);
3176  BB->addSuccessor(exitMBB);
3177
3178  // loop2MBB:
3179  //   strex scratch, newval, [ptr]
3180  //   cmp scratch, #0
3181  //   bne loop1MBB
3182  BB = loop2MBB;
3183  AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval)
3184                 .addReg(ptr));
3185  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
3186                 .addReg(scratch).addImm(0));
3187  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3188    .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3189  BB->addSuccessor(loop1MBB);
3190  BB->addSuccessor(exitMBB);
3191
3192  //  exitMBB:
3193  //   ...
3194  BB = exitMBB;
3195
3196  MF->DeleteMachineInstr(MI);   // The instruction is gone now.
3197
3198  return BB;
3199}
3200
3201MachineBasicBlock *
3202ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
3203                                    unsigned Size, unsigned BinOpcode) const {
3204  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3205  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3206
3207  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3208  MachineFunction *MF = BB->getParent();
3209  MachineFunction::iterator It = BB;
3210  ++It;
3211
3212  unsigned dest = MI->getOperand(0).getReg();
3213  unsigned ptr = MI->getOperand(1).getReg();
3214  unsigned incr = MI->getOperand(2).getReg();
3215  DebugLoc dl = MI->getDebugLoc();
3216
3217  bool isThumb2 = Subtarget->isThumb2();
3218  unsigned ldrOpc, strOpc;
3219  switch (Size) {
3220  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
3221  case 1:
3222    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
3223    strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
3224    break;
3225  case 2:
3226    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
3227    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
3228    break;
3229  case 4:
3230    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
3231    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
3232    break;
3233  }
3234
3235  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3236  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3237  MF->insert(It, loopMBB);
3238  MF->insert(It, exitMBB);
3239  exitMBB->transferSuccessors(BB);
3240
3241  MachineRegisterInfo &RegInfo = MF->getRegInfo();
3242  unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
3243  unsigned scratch2 = (!BinOpcode) ? incr :
3244    RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
3245
3246  //  thisMBB:
3247  //   ...
3248  //   fallthrough --> loopMBB
3249  BB->addSuccessor(loopMBB);
3250
3251  //  loopMBB:
3252  //   ldrex dest, ptr
3253  //   <binop> scratch2, dest, incr
3254  //   strex scratch, scratch2, ptr
3255  //   cmp scratch, #0
3256  //   bne- loopMBB
3257  //   fallthrough --> exitMBB
3258  BB = loopMBB;
3259  AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
3260  if (BinOpcode) {
3261    // operand order needs to go the other way for NAND
3262    if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
3263      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
3264                     addReg(incr).addReg(dest)).addReg(0);
3265    else
3266      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
3267                     addReg(dest).addReg(incr)).addReg(0);
3268  }
3269
3270  AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2)
3271                 .addReg(ptr));
3272  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
3273                 .addReg(scratch).addImm(0));
3274  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3275    .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3276
3277  BB->addSuccessor(loopMBB);
3278  BB->addSuccessor(exitMBB);
3279
3280  //  exitMBB:
3281  //   ...
3282  BB = exitMBB;
3283
3284  MF->DeleteMachineInstr(MI);   // The instruction is gone now.
3285
3286  return BB;
3287}
3288
3289MachineBasicBlock *
3290ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
3291                                               MachineBasicBlock *BB) const {
3292  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3293  DebugLoc dl = MI->getDebugLoc();
3294  bool isThumb2 = Subtarget->isThumb2();
3295  switch (MI->getOpcode()) {
3296  default:
3297    MI->dump();
3298    llvm_unreachable("Unexpected instr type to insert");
3299
3300  case ARM::ATOMIC_LOAD_ADD_I8:
3301     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3302  case ARM::ATOMIC_LOAD_ADD_I16:
3303     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3304  case ARM::ATOMIC_LOAD_ADD_I32:
3305     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3306
3307  case ARM::ATOMIC_LOAD_AND_I8:
3308     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3309  case ARM::ATOMIC_LOAD_AND_I16:
3310     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3311  case ARM::ATOMIC_LOAD_AND_I32:
3312     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3313
3314  case ARM::ATOMIC_LOAD_OR_I8:
3315     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3316  case ARM::ATOMIC_LOAD_OR_I16:
3317     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3318  case ARM::ATOMIC_LOAD_OR_I32:
3319     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3320
3321  case ARM::ATOMIC_LOAD_XOR_I8:
3322     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3323  case ARM::ATOMIC_LOAD_XOR_I16:
3324     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3325  case ARM::ATOMIC_LOAD_XOR_I32:
3326     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3327
3328  case ARM::ATOMIC_LOAD_NAND_I8:
3329     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3330  case ARM::ATOMIC_LOAD_NAND_I16:
3331     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3332  case ARM::ATOMIC_LOAD_NAND_I32:
3333     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3334
3335  case ARM::ATOMIC_LOAD_SUB_I8:
3336     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3337  case ARM::ATOMIC_LOAD_SUB_I16:
3338     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3339  case ARM::ATOMIC_LOAD_SUB_I32:
3340     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3341
3342  case ARM::ATOMIC_SWAP_I8:  return EmitAtomicBinary(MI, BB, 1, 0);
3343  case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
3344  case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
3345
3346  case ARM::ATOMIC_CMP_SWAP_I8:  return EmitAtomicCmpSwap(MI, BB, 1);
3347  case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
3348  case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
3349
3350  case ARM::tMOVCCr_pseudo: {
3351    // To "insert" a SELECT_CC instruction, we actually have to insert the
3352    // diamond control-flow pattern.  The incoming instruction knows the
3353    // destination vreg to set, the condition code register to branch on, the
3354    // true/false values to select between, and a branch opcode to use.
3355    const BasicBlock *LLVM_BB = BB->getBasicBlock();
3356    MachineFunction::iterator It = BB;
3357    ++It;
3358
3359    //  thisMBB:
3360    //  ...
3361    //   TrueVal = ...
3362    //   cmpTY ccX, r1, r2
3363    //   bCC copy1MBB
3364    //   fallthrough --> copy0MBB
3365    MachineBasicBlock *thisMBB  = BB;
3366    MachineFunction *F = BB->getParent();
3367    MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
3368    MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
3369    BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
3370      .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
3371    F->insert(It, copy0MBB);
3372    F->insert(It, sinkMBB);
3373    // Update machine-CFG edges by first adding all successors of the current
3374    // block to the new block which will contain the Phi node for the select.
3375    for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
3376           E = BB->succ_end(); I != E; ++I)
3377      sinkMBB->addSuccessor(*I);
3378    // Next, remove all successors of the current block, and add the true
3379    // and fallthrough blocks as its successors.
3380    while (!BB->succ_empty())
3381      BB->removeSuccessor(BB->succ_begin());
3382    BB->addSuccessor(copy0MBB);
3383    BB->addSuccessor(sinkMBB);
3384
3385    //  copy0MBB:
3386    //   %FalseValue = ...
3387    //   # fallthrough to sinkMBB
3388    BB = copy0MBB;
3389
3390    // Update machine-CFG edges
3391    BB->addSuccessor(sinkMBB);
3392
3393    //  sinkMBB:
3394    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
3395    //  ...
3396    BB = sinkMBB;
3397    BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
3398      .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
3399      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
3400
3401    F->DeleteMachineInstr(MI);   // The pseudo instruction is gone now.
3402    return BB;
3403  }
3404
3405  case ARM::tANDsp:
3406  case ARM::tADDspr_:
3407  case ARM::tSUBspi_:
3408  case ARM::t2SUBrSPi_:
3409  case ARM::t2SUBrSPi12_:
3410  case ARM::t2SUBrSPs_: {
3411    MachineFunction *MF = BB->getParent();
3412    unsigned DstReg = MI->getOperand(0).getReg();
3413    unsigned SrcReg = MI->getOperand(1).getReg();
3414    bool DstIsDead = MI->getOperand(0).isDead();
3415    bool SrcIsKill = MI->getOperand(1).isKill();
3416
3417    if (SrcReg != ARM::SP) {
3418      // Copy the source to SP from virtual register.
3419      const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
3420      unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
3421        ? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
3422      BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
3423        .addReg(SrcReg, getKillRegState(SrcIsKill));
3424    }
3425
3426    unsigned OpOpc = 0;
3427    bool NeedPred = false, NeedCC = false, NeedOp3 = false;
3428    switch (MI->getOpcode()) {
3429    default:
3430      llvm_unreachable("Unexpected pseudo instruction!");
3431    case ARM::tANDsp:
3432      OpOpc = ARM::tAND;
3433      NeedPred = true;
3434      break;
3435    case ARM::tADDspr_:
3436      OpOpc = ARM::tADDspr;
3437      break;
3438    case ARM::tSUBspi_:
3439      OpOpc = ARM::tSUBspi;
3440      break;
3441    case ARM::t2SUBrSPi_:
3442      OpOpc = ARM::t2SUBrSPi;
3443      NeedPred = true; NeedCC = true;
3444      break;
3445    case ARM::t2SUBrSPi12_:
3446      OpOpc = ARM::t2SUBrSPi12;
3447      NeedPred = true;
3448      break;
3449    case ARM::t2SUBrSPs_:
3450      OpOpc = ARM::t2SUBrSPs;
3451      NeedPred = true; NeedCC = true; NeedOp3 = true;
3452      break;
3453    }
3454    MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
3455    if (OpOpc == ARM::tAND)
3456      AddDefaultT1CC(MIB);
3457    MIB.addReg(ARM::SP);
3458    MIB.addOperand(MI->getOperand(2));
3459    if (NeedOp3)
3460      MIB.addOperand(MI->getOperand(3));
3461    if (NeedPred)
3462      AddDefaultPred(MIB);
3463    if (NeedCC)
3464      AddDefaultCC(MIB);
3465
3466    // Copy the result from SP to virtual register.
3467    const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
3468    unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
3469      ? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
3470    BuildMI(BB, dl, TII->get(CopyOpc))
3471      .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
3472      .addReg(ARM::SP);
3473    MF->DeleteMachineInstr(MI);   // The pseudo instruction is gone now.
3474    return BB;
3475  }
3476  }
3477}
3478
3479//===----------------------------------------------------------------------===//
3480//                           ARM Optimization Hooks
3481//===----------------------------------------------------------------------===//
3482
3483static
3484SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
3485                            TargetLowering::DAGCombinerInfo &DCI) {
3486  SelectionDAG &DAG = DCI.DAG;
3487  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3488  EVT VT = N->getValueType(0);
3489  unsigned Opc = N->getOpcode();
3490  bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
3491  SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
3492  SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
3493  ISD::CondCode CC = ISD::SETCC_INVALID;
3494
3495  if (isSlctCC) {
3496    CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
3497  } else {
3498    SDValue CCOp = Slct.getOperand(0);
3499    if (CCOp.getOpcode() == ISD::SETCC)
3500      CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
3501  }
3502
3503  bool DoXform = false;
3504  bool InvCC = false;
3505  assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
3506          "Bad input!");
3507
3508  if (LHS.getOpcode() == ISD::Constant &&
3509      cast<ConstantSDNode>(LHS)->isNullValue()) {
3510    DoXform = true;
3511  } else if (CC != ISD::SETCC_INVALID &&
3512             RHS.getOpcode() == ISD::Constant &&
3513             cast<ConstantSDNode>(RHS)->isNullValue()) {
3514    std::swap(LHS, RHS);
3515    SDValue Op0 = Slct.getOperand(0);
3516    EVT OpVT = isSlctCC ? Op0.getValueType() :
3517                          Op0.getOperand(0).getValueType();
3518    bool isInt = OpVT.isInteger();
3519    CC = ISD::getSetCCInverse(CC, isInt);
3520
3521    if (!TLI.isCondCodeLegal(CC, OpVT))
3522      return SDValue();         // Inverse operator isn't legal.
3523
3524    DoXform = true;
3525    InvCC = true;
3526  }
3527
3528  if (DoXform) {
3529    SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
3530    if (isSlctCC)
3531      return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
3532                             Slct.getOperand(0), Slct.getOperand(1), CC);
3533    SDValue CCOp = Slct.getOperand(0);
3534    if (InvCC)
3535      CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
3536                          CCOp.getOperand(0), CCOp.getOperand(1), CC);
3537    return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
3538                       CCOp, OtherOp, Result);
3539  }
3540  return SDValue();
3541}
3542
3543/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
3544static SDValue PerformADDCombine(SDNode *N,
3545                                 TargetLowering::DAGCombinerInfo &DCI) {
3546  // added by evan in r37685 with no testcase.
3547  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3548
3549  // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
3550  if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
3551    SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
3552    if (Result.getNode()) return Result;
3553  }
3554  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
3555    SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
3556    if (Result.getNode()) return Result;
3557  }
3558
3559  return SDValue();
3560}
3561
3562/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
3563static SDValue PerformSUBCombine(SDNode *N,
3564                                 TargetLowering::DAGCombinerInfo &DCI) {
3565  // added by evan in r37685 with no testcase.
3566  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3567
3568  // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
3569  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
3570    SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
3571    if (Result.getNode()) return Result;
3572  }
3573
3574  return SDValue();
3575}
3576
3577/// PerformVMOVRRDCombine - Target-specific dag combine xforms for
3578/// ARMISD::VMOVRRD.
3579static SDValue PerformVMOVRRDCombine(SDNode *N,
3580                                   TargetLowering::DAGCombinerInfo &DCI) {
3581  // fmrrd(fmdrr x, y) -> x,y
3582  SDValue InDouble = N->getOperand(0);
3583  if (InDouble.getOpcode() == ARMISD::VMOVDRR)
3584    return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
3585  return SDValue();
3586}
3587
3588/// getVShiftImm - Check if this is a valid build_vector for the immediate
3589/// operand of a vector shift operation, where all the elements of the
3590/// build_vector must have the same constant integer value.
3591static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3592  // Ignore bit_converts.
3593  while (Op.getOpcode() == ISD::BIT_CONVERT)
3594    Op = Op.getOperand(0);
3595  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3596  APInt SplatBits, SplatUndef;
3597  unsigned SplatBitSize;
3598  bool HasAnyUndefs;
3599  if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3600                                      HasAnyUndefs, ElementBits) ||
3601      SplatBitSize > ElementBits)
3602    return false;
3603  Cnt = SplatBits.getSExtValue();
3604  return true;
3605}
3606
3607/// isVShiftLImm - Check if this is a valid build_vector for the immediate
3608/// operand of a vector shift left operation.  That value must be in the range:
3609///   0 <= Value < ElementBits for a left shift; or
3610///   0 <= Value <= ElementBits for a long left shift.
3611static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
3612  assert(VT.isVector() && "vector shift count is not a vector type");
3613  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3614  if (! getVShiftImm(Op, ElementBits, Cnt))
3615    return false;
3616  return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
3617}
3618
3619/// isVShiftRImm - Check if this is a valid build_vector for the immediate
3620/// operand of a vector shift right operation.  For a shift opcode, the value
3621/// is positive, but for an intrinsic the value count must be negative. The
3622/// absolute value must be in the range:
3623///   1 <= |Value| <= ElementBits for a right shift; or
3624///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
3625static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
3626                         int64_t &Cnt) {
3627  assert(VT.isVector() && "vector shift count is not a vector type");
3628  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3629  if (! getVShiftImm(Op, ElementBits, Cnt))
3630    return false;
3631  if (isIntrinsic)
3632    Cnt = -Cnt;
3633  return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
3634}
3635
3636/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
3637static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
3638  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3639  switch (IntNo) {
3640  default:
3641    // Don't do anything for most intrinsics.
3642    break;
3643
3644  // Vector shifts: check for immediate versions and lower them.
3645  // Note: This is done during DAG combining instead of DAG legalizing because
3646  // the build_vectors for 64-bit vector element shift counts are generally
3647  // not legal, and it is hard to see their values after they get legalized to
3648  // loads from a constant pool.
3649  case Intrinsic::arm_neon_vshifts:
3650  case Intrinsic::arm_neon_vshiftu:
3651  case Intrinsic::arm_neon_vshiftls:
3652  case Intrinsic::arm_neon_vshiftlu:
3653  case Intrinsic::arm_neon_vshiftn:
3654  case Intrinsic::arm_neon_vrshifts:
3655  case Intrinsic::arm_neon_vrshiftu:
3656  case Intrinsic::arm_neon_vrshiftn:
3657  case Intrinsic::arm_neon_vqshifts:
3658  case Intrinsic::arm_neon_vqshiftu:
3659  case Intrinsic::arm_neon_vqshiftsu:
3660  case Intrinsic::arm_neon_vqshiftns:
3661  case Intrinsic::arm_neon_vqshiftnu:
3662  case Intrinsic::arm_neon_vqshiftnsu:
3663  case Intrinsic::arm_neon_vqrshiftns:
3664  case Intrinsic::arm_neon_vqrshiftnu:
3665  case Intrinsic::arm_neon_vqrshiftnsu: {
3666    EVT VT = N->getOperand(1).getValueType();
3667    int64_t Cnt;
3668    unsigned VShiftOpc = 0;
3669
3670    switch (IntNo) {
3671    case Intrinsic::arm_neon_vshifts:
3672    case Intrinsic::arm_neon_vshiftu:
3673      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
3674        VShiftOpc = ARMISD::VSHL;
3675        break;
3676      }
3677      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
3678        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
3679                     ARMISD::VSHRs : ARMISD::VSHRu);
3680        break;
3681      }
3682      return SDValue();
3683
3684    case Intrinsic::arm_neon_vshiftls:
3685    case Intrinsic::arm_neon_vshiftlu:
3686      if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
3687        break;
3688      llvm_unreachable("invalid shift count for vshll intrinsic");
3689
3690    case Intrinsic::arm_neon_vrshifts:
3691    case Intrinsic::arm_neon_vrshiftu:
3692      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
3693        break;
3694      return SDValue();
3695
3696    case Intrinsic::arm_neon_vqshifts:
3697    case Intrinsic::arm_neon_vqshiftu:
3698      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
3699        break;
3700      return SDValue();
3701
3702    case Intrinsic::arm_neon_vqshiftsu:
3703      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
3704        break;
3705      llvm_unreachable("invalid shift count for vqshlu intrinsic");
3706
3707    case Intrinsic::arm_neon_vshiftn:
3708    case Intrinsic::arm_neon_vrshiftn:
3709    case Intrinsic::arm_neon_vqshiftns:
3710    case Intrinsic::arm_neon_vqshiftnu:
3711    case Intrinsic::arm_neon_vqshiftnsu:
3712    case Intrinsic::arm_neon_vqrshiftns:
3713    case Intrinsic::arm_neon_vqrshiftnu:
3714    case Intrinsic::arm_neon_vqrshiftnsu:
3715      // Narrowing shifts require an immediate right shift.
3716      if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
3717        break;
3718      llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
3719
3720    default:
3721      llvm_unreachable("unhandled vector shift");
3722    }
3723
3724    switch (IntNo) {
3725    case Intrinsic::arm_neon_vshifts:
3726    case Intrinsic::arm_neon_vshiftu:
3727      // Opcode already set above.
3728      break;
3729    case Intrinsic::arm_neon_vshiftls:
3730    case Intrinsic::arm_neon_vshiftlu:
3731      if (Cnt == VT.getVectorElementType().getSizeInBits())
3732        VShiftOpc = ARMISD::VSHLLi;
3733      else
3734        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
3735                     ARMISD::VSHLLs : ARMISD::VSHLLu);
3736      break;
3737    case Intrinsic::arm_neon_vshiftn:
3738      VShiftOpc = ARMISD::VSHRN; break;
3739    case Intrinsic::arm_neon_vrshifts:
3740      VShiftOpc = ARMISD::VRSHRs; break;
3741    case Intrinsic::arm_neon_vrshiftu:
3742      VShiftOpc = ARMISD::VRSHRu; break;
3743    case Intrinsic::arm_neon_vrshiftn:
3744      VShiftOpc = ARMISD::VRSHRN; break;
3745    case Intrinsic::arm_neon_vqshifts:
3746      VShiftOpc = ARMISD::VQSHLs; break;
3747    case Intrinsic::arm_neon_vqshiftu:
3748      VShiftOpc = ARMISD::VQSHLu; break;
3749    case Intrinsic::arm_neon_vqshiftsu:
3750      VShiftOpc = ARMISD::VQSHLsu; break;
3751    case Intrinsic::arm_neon_vqshiftns:
3752      VShiftOpc = ARMISD::VQSHRNs; break;
3753    case Intrinsic::arm_neon_vqshiftnu:
3754      VShiftOpc = ARMISD::VQSHRNu; break;
3755    case Intrinsic::arm_neon_vqshiftnsu:
3756      VShiftOpc = ARMISD::VQSHRNsu; break;
3757    case Intrinsic::arm_neon_vqrshiftns:
3758      VShiftOpc = ARMISD::VQRSHRNs; break;
3759    case Intrinsic::arm_neon_vqrshiftnu:
3760      VShiftOpc = ARMISD::VQRSHRNu; break;
3761    case Intrinsic::arm_neon_vqrshiftnsu:
3762      VShiftOpc = ARMISD::VQRSHRNsu; break;
3763    }
3764
3765    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
3766                       N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
3767  }
3768
3769  case Intrinsic::arm_neon_vshiftins: {
3770    EVT VT = N->getOperand(1).getValueType();
3771    int64_t Cnt;
3772    unsigned VShiftOpc = 0;
3773
3774    if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
3775      VShiftOpc = ARMISD::VSLI;
3776    else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
3777      VShiftOpc = ARMISD::VSRI;
3778    else {
3779      llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
3780    }
3781
3782    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
3783                       N->getOperand(1), N->getOperand(2),
3784                       DAG.getConstant(Cnt, MVT::i32));
3785  }
3786
3787  case Intrinsic::arm_neon_vqrshifts:
3788  case Intrinsic::arm_neon_vqrshiftu:
3789    // No immediate versions of these to check for.
3790    break;
3791  }
3792
3793  return SDValue();
3794}
3795
3796/// PerformShiftCombine - Checks for immediate versions of vector shifts and
3797/// lowers them.  As with the vector shift intrinsics, this is done during DAG
3798/// combining instead of DAG legalizing because the build_vectors for 64-bit
3799/// vector element shift counts are generally not legal, and it is hard to see
3800/// their values after they get legalized to loads from a constant pool.
3801static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
3802                                   const ARMSubtarget *ST) {
3803  EVT VT = N->getValueType(0);
3804
3805  // Nothing to be done for scalar shifts.
3806  if (! VT.isVector())
3807    return SDValue();
3808
3809  assert(ST->hasNEON() && "unexpected vector shift");
3810  int64_t Cnt;
3811
3812  switch (N->getOpcode()) {
3813  default: llvm_unreachable("unexpected shift opcode");
3814
3815  case ISD::SHL:
3816    if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
3817      return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
3818                         DAG.getConstant(Cnt, MVT::i32));
3819    break;
3820
3821  case ISD::SRA:
3822  case ISD::SRL:
3823    if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
3824      unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
3825                            ARMISD::VSHRs : ARMISD::VSHRu);
3826      return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
3827                         DAG.getConstant(Cnt, MVT::i32));
3828    }
3829  }
3830  return SDValue();
3831}
3832
3833/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
3834/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
3835static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
3836                                    const ARMSubtarget *ST) {
3837  SDValue N0 = N->getOperand(0);
3838
3839  // Check for sign- and zero-extensions of vector extract operations of 8-
3840  // and 16-bit vector elements.  NEON supports these directly.  They are
3841  // handled during DAG combining because type legalization will promote them
3842  // to 32-bit types and it is messy to recognize the operations after that.
3843  if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
3844    SDValue Vec = N0.getOperand(0);
3845    SDValue Lane = N0.getOperand(1);
3846    EVT VT = N->getValueType(0);
3847    EVT EltVT = N0.getValueType();
3848    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3849
3850    if (VT == MVT::i32 &&
3851        (EltVT == MVT::i8 || EltVT == MVT::i16) &&
3852        TLI.isTypeLegal(Vec.getValueType())) {
3853
3854      unsigned Opc = 0;
3855      switch (N->getOpcode()) {
3856      default: llvm_unreachable("unexpected opcode");
3857      case ISD::SIGN_EXTEND:
3858        Opc = ARMISD::VGETLANEs;
3859        break;
3860      case ISD::ZERO_EXTEND:
3861      case ISD::ANY_EXTEND:
3862        Opc = ARMISD::VGETLANEu;
3863        break;
3864      }
3865      return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
3866    }
3867  }
3868
3869  return SDValue();
3870}
3871
3872/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC
3873/// to match f32 max/min patterns to use NEON vmax/vmin instructions.
3874static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
3875                                       const ARMSubtarget *ST) {
3876  // If the target supports NEON, try to use vmax/vmin instructions for f32
3877  // selects like "x < y ? x : y".  Unless the FiniteOnlyFPMath option is set,
3878  // be careful about NaNs:  NEON's vmax/vmin return NaN if either operand is
3879  // a NaN; only do the transformation when it matches that behavior.
3880
3881  // For now only do this when using NEON for FP operations; if using VFP, it
3882  // is not obvious that the benefit outweighs the cost of switching to the
3883  // NEON pipeline.
3884  if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() ||
3885      N->getValueType(0) != MVT::f32)
3886    return SDValue();
3887
3888  SDValue CondLHS = N->getOperand(0);
3889  SDValue CondRHS = N->getOperand(1);
3890  SDValue LHS = N->getOperand(2);
3891  SDValue RHS = N->getOperand(3);
3892  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
3893
3894  unsigned Opcode = 0;
3895  bool IsReversed;
3896  if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) {
3897    IsReversed = false; // x CC y ? x : y
3898  } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) {
3899    IsReversed = true ; // x CC y ? y : x
3900  } else {
3901    return SDValue();
3902  }
3903
3904  bool IsUnordered;
3905  switch (CC) {
3906  default: break;
3907  case ISD::SETOLT:
3908  case ISD::SETOLE:
3909  case ISD::SETLT:
3910  case ISD::SETLE:
3911  case ISD::SETULT:
3912  case ISD::SETULE:
3913    // If LHS is NaN, an ordered comparison will be false and the result will
3914    // be the RHS, but vmin(NaN, RHS) = NaN.  Avoid this by checking that LHS
3915    // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
3916    IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE);
3917    if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
3918      break;
3919    // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin
3920    // will return -0, so vmin can only be used for unsafe math or if one of
3921    // the operands is known to be nonzero.
3922    if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
3923        !UnsafeFPMath &&
3924        !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
3925      break;
3926    Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
3927    break;
3928
3929  case ISD::SETOGT:
3930  case ISD::SETOGE:
3931  case ISD::SETGT:
3932  case ISD::SETGE:
3933  case ISD::SETUGT:
3934  case ISD::SETUGE:
3935    // If LHS is NaN, an ordered comparison will be false and the result will
3936    // be the RHS, but vmax(NaN, RHS) = NaN.  Avoid this by checking that LHS
3937    // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
3938    IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE);
3939    if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
3940      break;
3941    // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax
3942    // will return +0, so vmax can only be used for unsafe math or if one of
3943    // the operands is known to be nonzero.
3944    if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
3945        !UnsafeFPMath &&
3946        !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
3947      break;
3948    Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
3949    break;
3950  }
3951
3952  if (!Opcode)
3953    return SDValue();
3954  return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
3955}
3956
3957SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
3958                                             DAGCombinerInfo &DCI) const {
3959  switch (N->getOpcode()) {
3960  default: break;
3961  case ISD::ADD:        return PerformADDCombine(N, DCI);
3962  case ISD::SUB:        return PerformSUBCombine(N, DCI);
3963  case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
3964  case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
3965  case ISD::SHL:
3966  case ISD::SRA:
3967  case ISD::SRL:        return PerformShiftCombine(N, DCI.DAG, Subtarget);
3968  case ISD::SIGN_EXTEND:
3969  case ISD::ZERO_EXTEND:
3970  case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
3971  case ISD::SELECT_CC:  return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
3972  }
3973  return SDValue();
3974}
3975
3976bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
3977  if (!Subtarget->hasV6Ops())
3978    // Pre-v6 does not support unaligned mem access.
3979    return false;
3980  else {
3981    // v6+ may or may not support unaligned mem access depending on the system
3982    // configuration.
3983    // FIXME: This is pretty conservative. Should we provide cmdline option to
3984    // control the behaviour?
3985    if (!Subtarget->isTargetDarwin())
3986      return false;
3987  }
3988
3989  switch (VT.getSimpleVT().SimpleTy) {
3990  default:
3991    return false;
3992  case MVT::i8:
3993  case MVT::i16:
3994  case MVT::i32:
3995    return true;
3996  // FIXME: VLD1 etc with standard alignment is legal.
3997  }
3998}
3999
4000static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
4001  if (V < 0)
4002    return false;
4003
4004  unsigned Scale = 1;
4005  switch (VT.getSimpleVT().SimpleTy) {
4006  default: return false;
4007  case MVT::i1:
4008  case MVT::i8:
4009    // Scale == 1;
4010    break;
4011  case MVT::i16:
4012    // Scale == 2;
4013    Scale = 2;
4014    break;
4015  case MVT::i32:
4016    // Scale == 4;
4017    Scale = 4;
4018    break;
4019  }
4020
4021  if ((V & (Scale - 1)) != 0)
4022    return false;
4023  V /= Scale;
4024  return V == (V & ((1LL << 5) - 1));
4025}
4026
4027static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
4028                                      const ARMSubtarget *Subtarget) {
4029  bool isNeg = false;
4030  if (V < 0) {
4031    isNeg = true;
4032    V = - V;
4033  }
4034
4035  switch (VT.getSimpleVT().SimpleTy) {
4036  default: return false;
4037  case MVT::i1:
4038  case MVT::i8:
4039  case MVT::i16:
4040  case MVT::i32:
4041    // + imm12 or - imm8
4042    if (isNeg)
4043      return V == (V & ((1LL << 8) - 1));
4044    return V == (V & ((1LL << 12) - 1));
4045  case MVT::f32:
4046  case MVT::f64:
4047    // Same as ARM mode. FIXME: NEON?
4048    if (!Subtarget->hasVFP2())
4049      return false;
4050    if ((V & 3) != 0)
4051      return false;
4052    V >>= 2;
4053    return V == (V & ((1LL << 8) - 1));
4054  }
4055}
4056
4057/// isLegalAddressImmediate - Return true if the integer value can be used
4058/// as the offset of the target addressing mode for load / store of the
4059/// given type.
4060static bool isLegalAddressImmediate(int64_t V, EVT VT,
4061                                    const ARMSubtarget *Subtarget) {
4062  if (V == 0)
4063    return true;
4064
4065  if (!VT.isSimple())
4066    return false;
4067
4068  if (Subtarget->isThumb1Only())
4069    return isLegalT1AddressImmediate(V, VT);
4070  else if (Subtarget->isThumb2())
4071    return isLegalT2AddressImmediate(V, VT, Subtarget);
4072
4073  // ARM mode.
4074  if (V < 0)
4075    V = - V;
4076  switch (VT.getSimpleVT().SimpleTy) {
4077  default: return false;
4078  case MVT::i1:
4079  case MVT::i8:
4080  case MVT::i32:
4081    // +- imm12
4082    return V == (V & ((1LL << 12) - 1));
4083  case MVT::i16:
4084    // +- imm8
4085    return V == (V & ((1LL << 8) - 1));
4086  case MVT::f32:
4087  case MVT::f64:
4088    if (!Subtarget->hasVFP2()) // FIXME: NEON?
4089      return false;
4090    if ((V & 3) != 0)
4091      return false;
4092    V >>= 2;
4093    return V == (V & ((1LL << 8) - 1));
4094  }
4095}
4096
4097bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
4098                                                      EVT VT) const {
4099  int Scale = AM.Scale;
4100  if (Scale < 0)
4101    return false;
4102
4103  switch (VT.getSimpleVT().SimpleTy) {
4104  default: return false;
4105  case MVT::i1:
4106  case MVT::i8:
4107  case MVT::i16:
4108  case MVT::i32:
4109    if (Scale == 1)
4110      return true;
4111    // r + r << imm
4112    Scale = Scale & ~1;
4113    return Scale == 2 || Scale == 4 || Scale == 8;
4114  case MVT::i64:
4115    // r + r
4116    if (((unsigned)AM.HasBaseReg + Scale) <= 2)
4117      return true;
4118    return false;
4119  case MVT::isVoid:
4120    // Note, we allow "void" uses (basically, uses that aren't loads or
4121    // stores), because arm allows folding a scale into many arithmetic
4122    // operations.  This should be made more precise and revisited later.
4123
4124    // Allow r << imm, but the imm has to be a multiple of two.
4125    if (Scale & 1) return false;
4126    return isPowerOf2_32(Scale);
4127  }
4128}
4129
4130/// isLegalAddressingMode - Return true if the addressing mode represented
4131/// by AM is legal for this target, for a load/store of the specified type.
4132bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
4133                                              const Type *Ty) const {
4134  EVT VT = getValueType(Ty, true);
4135  if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
4136    return false;
4137
4138  // Can never fold addr of global into load/store.
4139  if (AM.BaseGV)
4140    return false;
4141
4142  switch (AM.Scale) {
4143  case 0:  // no scale reg, must be "r+i" or "r", or "i".
4144    break;
4145  case 1:
4146    if (Subtarget->isThumb1Only())
4147      return false;
4148    // FALL THROUGH.
4149  default:
4150    // ARM doesn't support any R+R*scale+imm addr modes.
4151    if (AM.BaseOffs)
4152      return false;
4153
4154    if (!VT.isSimple())
4155      return false;
4156
4157    if (Subtarget->isThumb2())
4158      return isLegalT2ScaledAddressingMode(AM, VT);
4159
4160    int Scale = AM.Scale;
4161    switch (VT.getSimpleVT().SimpleTy) {
4162    default: return false;
4163    case MVT::i1:
4164    case MVT::i8:
4165    case MVT::i32:
4166      if (Scale < 0) Scale = -Scale;
4167      if (Scale == 1)
4168        return true;
4169      // r + r << imm
4170      return isPowerOf2_32(Scale & ~1);
4171    case MVT::i16:
4172    case MVT::i64:
4173      // r + r
4174      if (((unsigned)AM.HasBaseReg + Scale) <= 2)
4175        return true;
4176      return false;
4177
4178    case MVT::isVoid:
4179      // Note, we allow "void" uses (basically, uses that aren't loads or
4180      // stores), because arm allows folding a scale into many arithmetic
4181      // operations.  This should be made more precise and revisited later.
4182
4183      // Allow r << imm, but the imm has to be a multiple of two.
4184      if (Scale & 1) return false;
4185      return isPowerOf2_32(Scale);
4186    }
4187    break;
4188  }
4189  return true;
4190}
4191
4192/// isLegalICmpImmediate - Return true if the specified immediate is legal
4193/// icmp immediate, that is the target has icmp instructions which can compare
4194/// a register against the immediate without having to materialize the
4195/// immediate into a register.
4196bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
4197  if (!Subtarget->isThumb())
4198    return ARM_AM::getSOImmVal(Imm) != -1;
4199  if (Subtarget->isThumb2())
4200    return ARM_AM::getT2SOImmVal(Imm) != -1;
4201  return Imm >= 0 && Imm <= 255;
4202}
4203
4204static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
4205                                      bool isSEXTLoad, SDValue &Base,
4206                                      SDValue &Offset, bool &isInc,
4207                                      SelectionDAG &DAG) {
4208  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
4209    return false;
4210
4211  if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
4212    // AddressingMode 3
4213    Base = Ptr->getOperand(0);
4214    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4215      int RHSC = (int)RHS->getZExtValue();
4216      if (RHSC < 0 && RHSC > -256) {
4217        assert(Ptr->getOpcode() == ISD::ADD);
4218        isInc = false;
4219        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4220        return true;
4221      }
4222    }
4223    isInc = (Ptr->getOpcode() == ISD::ADD);
4224    Offset = Ptr->getOperand(1);
4225    return true;
4226  } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
4227    // AddressingMode 2
4228    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4229      int RHSC = (int)RHS->getZExtValue();
4230      if (RHSC < 0 && RHSC > -0x1000) {
4231        assert(Ptr->getOpcode() == ISD::ADD);
4232        isInc = false;
4233        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4234        Base = Ptr->getOperand(0);
4235        return true;
4236      }
4237    }
4238
4239    if (Ptr->getOpcode() == ISD::ADD) {
4240      isInc = true;
4241      ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
4242      if (ShOpcVal != ARM_AM::no_shift) {
4243        Base = Ptr->getOperand(1);
4244        Offset = Ptr->getOperand(0);
4245      } else {
4246        Base = Ptr->getOperand(0);
4247        Offset = Ptr->getOperand(1);
4248      }
4249      return true;
4250    }
4251
4252    isInc = (Ptr->getOpcode() == ISD::ADD);
4253    Base = Ptr->getOperand(0);
4254    Offset = Ptr->getOperand(1);
4255    return true;
4256  }
4257
4258  // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
4259  return false;
4260}
4261
4262static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
4263                                     bool isSEXTLoad, SDValue &Base,
4264                                     SDValue &Offset, bool &isInc,
4265                                     SelectionDAG &DAG) {
4266  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
4267    return false;
4268
4269  Base = Ptr->getOperand(0);
4270  if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4271    int RHSC = (int)RHS->getZExtValue();
4272    if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
4273      assert(Ptr->getOpcode() == ISD::ADD);
4274      isInc = false;
4275      Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4276      return true;
4277    } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
4278      isInc = Ptr->getOpcode() == ISD::ADD;
4279      Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
4280      return true;
4281    }
4282  }
4283
4284  return false;
4285}
4286
4287/// getPreIndexedAddressParts - returns true by value, base pointer and
4288/// offset pointer and addressing mode by reference if the node's address
4289/// can be legally represented as pre-indexed load / store address.
4290bool
4291ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
4292                                             SDValue &Offset,
4293                                             ISD::MemIndexedMode &AM,
4294                                             SelectionDAG &DAG) const {
4295  if (Subtarget->isThumb1Only())
4296    return false;
4297
4298  EVT VT;
4299  SDValue Ptr;
4300  bool isSEXTLoad = false;
4301  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
4302    Ptr = LD->getBasePtr();
4303    VT  = LD->getMemoryVT();
4304    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
4305  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
4306    Ptr = ST->getBasePtr();
4307    VT  = ST->getMemoryVT();
4308  } else
4309    return false;
4310
4311  bool isInc;
4312  bool isLegal = false;
4313  if (Subtarget->isThumb2())
4314    isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
4315                                       Offset, isInc, DAG);
4316  else
4317    isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
4318                                        Offset, isInc, DAG);
4319  if (!isLegal)
4320    return false;
4321
4322  AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
4323  return true;
4324}
4325
4326/// getPostIndexedAddressParts - returns true by value, base pointer and
4327/// offset pointer and addressing mode by reference if this node can be
4328/// combined with a load / store to form a post-indexed load / store.
4329bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
4330                                                   SDValue &Base,
4331                                                   SDValue &Offset,
4332                                                   ISD::MemIndexedMode &AM,
4333                                                   SelectionDAG &DAG) const {
4334  if (Subtarget->isThumb1Only())
4335    return false;
4336
4337  EVT VT;
4338  SDValue Ptr;
4339  bool isSEXTLoad = false;
4340  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
4341    VT  = LD->getMemoryVT();
4342    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
4343  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
4344    VT  = ST->getMemoryVT();
4345  } else
4346    return false;
4347
4348  bool isInc;
4349  bool isLegal = false;
4350  if (Subtarget->isThumb2())
4351    isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
4352                                        isInc, DAG);
4353  else
4354    isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
4355                                        isInc, DAG);
4356  if (!isLegal)
4357    return false;
4358
4359  AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
4360  return true;
4361}
4362
4363void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
4364                                                       const APInt &Mask,
4365                                                       APInt &KnownZero,
4366                                                       APInt &KnownOne,
4367                                                       const SelectionDAG &DAG,
4368                                                       unsigned Depth) const {
4369  KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
4370  switch (Op.getOpcode()) {
4371  default: break;
4372  case ARMISD::CMOV: {
4373    // Bits are known zero/one if known on the LHS and RHS.
4374    DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
4375    if (KnownZero == 0 && KnownOne == 0) return;
4376
4377    APInt KnownZeroRHS, KnownOneRHS;
4378    DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
4379                          KnownZeroRHS, KnownOneRHS, Depth+1);
4380    KnownZero &= KnownZeroRHS;
4381    KnownOne  &= KnownOneRHS;
4382    return;
4383  }
4384  }
4385}
4386
4387//===----------------------------------------------------------------------===//
4388//                           ARM Inline Assembly Support
4389//===----------------------------------------------------------------------===//
4390
4391/// getConstraintType - Given a constraint letter, return the type of
4392/// constraint it is for this target.
4393ARMTargetLowering::ConstraintType
4394ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
4395  if (Constraint.size() == 1) {
4396    switch (Constraint[0]) {
4397    default:  break;
4398    case 'l': return C_RegisterClass;
4399    case 'w': return C_RegisterClass;
4400    }
4401  }
4402  return TargetLowering::getConstraintType(Constraint);
4403}
4404
4405std::pair<unsigned, const TargetRegisterClass*>
4406ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4407                                                EVT VT) const {
4408  if (Constraint.size() == 1) {
4409    // GCC ARM Constraint Letters
4410    switch (Constraint[0]) {
4411    case 'l':
4412      if (Subtarget->isThumb())
4413        return std::make_pair(0U, ARM::tGPRRegisterClass);
4414      else
4415        return std::make_pair(0U, ARM::GPRRegisterClass);
4416    case 'r':
4417      return std::make_pair(0U, ARM::GPRRegisterClass);
4418    case 'w':
4419      if (VT == MVT::f32)
4420        return std::make_pair(0U, ARM::SPRRegisterClass);
4421      if (VT.getSizeInBits() == 64)
4422        return std::make_pair(0U, ARM::DPRRegisterClass);
4423      if (VT.getSizeInBits() == 128)
4424        return std::make_pair(0U, ARM::QPRRegisterClass);
4425      break;
4426    }
4427  }
4428  if (StringRef("{cc}").equals_lower(Constraint))
4429    return std::make_pair(0U, ARM::CCRRegisterClass);
4430
4431  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4432}
4433
4434std::vector<unsigned> ARMTargetLowering::
4435getRegClassForInlineAsmConstraint(const std::string &Constraint,
4436                                  EVT VT) const {
4437  if (Constraint.size() != 1)
4438    return std::vector<unsigned>();
4439
4440  switch (Constraint[0]) {      // GCC ARM Constraint Letters
4441  default: break;
4442  case 'l':
4443    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
4444                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
4445                                 0);
4446  case 'r':
4447    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
4448                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
4449                                 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
4450                                 ARM::R12, ARM::LR, 0);
4451  case 'w':
4452    if (VT == MVT::f32)
4453      return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
4454                                   ARM::S4, ARM::S5, ARM::S6, ARM::S7,
4455                                   ARM::S8, ARM::S9, ARM::S10, ARM::S11,
4456                                   ARM::S12,ARM::S13,ARM::S14,ARM::S15,
4457                                   ARM::S16,ARM::S17,ARM::S18,ARM::S19,
4458                                   ARM::S20,ARM::S21,ARM::S22,ARM::S23,
4459                                   ARM::S24,ARM::S25,ARM::S26,ARM::S27,
4460                                   ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
4461    if (VT.getSizeInBits() == 64)
4462      return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
4463                                   ARM::D4, ARM::D5, ARM::D6, ARM::D7,
4464                                   ARM::D8, ARM::D9, ARM::D10,ARM::D11,
4465                                   ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
4466    if (VT.getSizeInBits() == 128)
4467      return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
4468                                   ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
4469      break;
4470  }
4471
4472  return std::vector<unsigned>();
4473}
4474
4475/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4476/// vector.  If it is invalid, don't add anything to Ops.
4477void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4478                                                     char Constraint,
4479                                                     bool hasMemory,
4480                                                     std::vector<SDValue>&Ops,
4481                                                     SelectionDAG &DAG) const {
4482  SDValue Result(0, 0);
4483
4484  switch (Constraint) {
4485  default: break;
4486  case 'I': case 'J': case 'K': case 'L':
4487  case 'M': case 'N': case 'O':
4488    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4489    if (!C)
4490      return;
4491
4492    int64_t CVal64 = C->getSExtValue();
4493    int CVal = (int) CVal64;
4494    // None of these constraints allow values larger than 32 bits.  Check
4495    // that the value fits in an int.
4496    if (CVal != CVal64)
4497      return;
4498
4499    switch (Constraint) {
4500      case 'I':
4501        if (Subtarget->isThumb1Only()) {
4502          // This must be a constant between 0 and 255, for ADD
4503          // immediates.
4504          if (CVal >= 0 && CVal <= 255)
4505            break;
4506        } else if (Subtarget->isThumb2()) {
4507          // A constant that can be used as an immediate value in a
4508          // data-processing instruction.
4509          if (ARM_AM::getT2SOImmVal(CVal) != -1)
4510            break;
4511        } else {
4512          // A constant that can be used as an immediate value in a
4513          // data-processing instruction.
4514          if (ARM_AM::getSOImmVal(CVal) != -1)
4515            break;
4516        }
4517        return;
4518
4519      case 'J':
4520        if (Subtarget->isThumb()) {  // FIXME thumb2
4521          // This must be a constant between -255 and -1, for negated ADD
4522          // immediates. This can be used in GCC with an "n" modifier that
4523          // prints the negated value, for use with SUB instructions. It is
4524          // not useful otherwise but is implemented for compatibility.
4525          if (CVal >= -255 && CVal <= -1)
4526            break;
4527        } else {
4528          // This must be a constant between -4095 and 4095. It is not clear
4529          // what this constraint is intended for. Implemented for
4530          // compatibility with GCC.
4531          if (CVal >= -4095 && CVal <= 4095)
4532            break;
4533        }
4534        return;
4535
4536      case 'K':
4537        if (Subtarget->isThumb1Only()) {
4538          // A 32-bit value where only one byte has a nonzero value. Exclude
4539          // zero to match GCC. This constraint is used by GCC internally for
4540          // constants that can be loaded with a move/shift combination.
4541          // It is not useful otherwise but is implemented for compatibility.
4542          if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
4543            break;
4544        } else if (Subtarget->isThumb2()) {
4545          // A constant whose bitwise inverse can be used as an immediate
4546          // value in a data-processing instruction. This can be used in GCC
4547          // with a "B" modifier that prints the inverted value, for use with
4548          // BIC and MVN instructions. It is not useful otherwise but is
4549          // implemented for compatibility.
4550          if (ARM_AM::getT2SOImmVal(~CVal) != -1)
4551            break;
4552        } else {
4553          // A constant whose bitwise inverse can be used as an immediate
4554          // value in a data-processing instruction. This can be used in GCC
4555          // with a "B" modifier that prints the inverted value, for use with
4556          // BIC and MVN instructions. It is not useful otherwise but is
4557          // implemented for compatibility.
4558          if (ARM_AM::getSOImmVal(~CVal) != -1)
4559            break;
4560        }
4561        return;
4562
4563      case 'L':
4564        if (Subtarget->isThumb1Only()) {
4565          // This must be a constant between -7 and 7,
4566          // for 3-operand ADD/SUB immediate instructions.
4567          if (CVal >= -7 && CVal < 7)
4568            break;
4569        } else if (Subtarget->isThumb2()) {
4570          // A constant whose negation can be used as an immediate value in a
4571          // data-processing instruction. This can be used in GCC with an "n"
4572          // modifier that prints the negated value, for use with SUB
4573          // instructions. It is not useful otherwise but is implemented for
4574          // compatibility.
4575          if (ARM_AM::getT2SOImmVal(-CVal) != -1)
4576            break;
4577        } else {
4578          // A constant whose negation can be used as an immediate value in a
4579          // data-processing instruction. This can be used in GCC with an "n"
4580          // modifier that prints the negated value, for use with SUB
4581          // instructions. It is not useful otherwise but is implemented for
4582          // compatibility.
4583          if (ARM_AM::getSOImmVal(-CVal) != -1)
4584            break;
4585        }
4586        return;
4587
4588      case 'M':
4589        if (Subtarget->isThumb()) { // FIXME thumb2
4590          // This must be a multiple of 4 between 0 and 1020, for
4591          // ADD sp + immediate.
4592          if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
4593            break;
4594        } else {
4595          // A power of two or a constant between 0 and 32.  This is used in
4596          // GCC for the shift amount on shifted register operands, but it is
4597          // useful in general for any shift amounts.
4598          if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
4599            break;
4600        }
4601        return;
4602
4603      case 'N':
4604        if (Subtarget->isThumb()) {  // FIXME thumb2
4605          // This must be a constant between 0 and 31, for shift amounts.
4606          if (CVal >= 0 && CVal <= 31)
4607            break;
4608        }
4609        return;
4610
4611      case 'O':
4612        if (Subtarget->isThumb()) {  // FIXME thumb2
4613          // This must be a multiple of 4 between -508 and 508, for
4614          // ADD/SUB sp = sp + immediate.
4615          if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
4616            break;
4617        }
4618        return;
4619    }
4620    Result = DAG.getTargetConstant(CVal, Op.getValueType());
4621    break;
4622  }
4623
4624  if (Result.getNode()) {
4625    Ops.push_back(Result);
4626    return;
4627  }
4628  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
4629                                                      Ops, DAG);
4630}
4631
4632bool
4633ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4634  // The ARM target isn't yet aware of offsets.
4635  return false;
4636}
4637
4638int ARM::getVFPf32Imm(const APFloat &FPImm) {
4639  APInt Imm = FPImm.bitcastToAPInt();
4640  uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
4641  int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127;  // -126 to 127
4642  int64_t Mantissa = Imm.getZExtValue() & 0x7fffff;  // 23 bits
4643
4644  // We can handle 4 bits of mantissa.
4645  // mantissa = (16+UInt(e:f:g:h))/16.
4646  if (Mantissa & 0x7ffff)
4647    return -1;
4648  Mantissa >>= 19;
4649  if ((Mantissa & 0xf) != Mantissa)
4650    return -1;
4651
4652  // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
4653  if (Exp < -3 || Exp > 4)
4654    return -1;
4655  Exp = ((Exp+3) & 0x7) ^ 4;
4656
4657  return ((int)Sign << 7) | (Exp << 4) | Mantissa;
4658}
4659
4660int ARM::getVFPf64Imm(const APFloat &FPImm) {
4661  APInt Imm = FPImm.bitcastToAPInt();
4662  uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
4663  int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023;   // -1022 to 1023
4664  uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL;
4665
4666  // We can handle 4 bits of mantissa.
4667  // mantissa = (16+UInt(e:f:g:h))/16.
4668  if (Mantissa & 0xffffffffffffLL)
4669    return -1;
4670  Mantissa >>= 48;
4671  if ((Mantissa & 0xf) != Mantissa)
4672    return -1;
4673
4674  // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
4675  if (Exp < -3 || Exp > 4)
4676    return -1;
4677  Exp = ((Exp+3) & 0x7) ^ 4;
4678
4679  return ((int)Sign << 7) | (Exp << 4) | Mantissa;
4680}
4681
4682/// isFPImmLegal - Returns true if the target can instruction select the
4683/// specified FP immediate natively. If false, the legalizer will
4684/// materialize the FP immediate as a load from a constant pool.
4685bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
4686  if (!Subtarget->hasVFP3())
4687    return false;
4688  if (VT == MVT::f32)
4689    return ARM::getVFPf32Imm(Imm) != -1;
4690  if (VT == MVT::f64)
4691    return ARM::getVFPf64Imm(Imm) != -1;
4692  return false;
4693}
4694