ARMISelLowering.cpp revision 3cb6277d7f2dd5b0d47a327b4c5d1fa8188e28ae
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that ARM uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ARM.h"
16#include "ARMAddressingModes.h"
17#include "ARMConstantPoolValue.h"
18#include "ARMISelLowering.h"
19#include "ARMMachineFunctionInfo.h"
20#include "ARMPerfectShuffle.h"
21#include "ARMRegisterInfo.h"
22#include "ARMSubtarget.h"
23#include "ARMTargetMachine.h"
24#include "ARMTargetObjectFile.h"
25#include "llvm/CallingConv.h"
26#include "llvm/Constants.h"
27#include "llvm/Function.h"
28#include "llvm/GlobalValue.h"
29#include "llvm/Instruction.h"
30#include "llvm/Intrinsics.h"
31#include "llvm/Type.h"
32#include "llvm/CodeGen/CallingConvLower.h"
33#include "llvm/CodeGen/MachineBasicBlock.h"
34#include "llvm/CodeGen/MachineFrameInfo.h"
35#include "llvm/CodeGen/MachineFunction.h"
36#include "llvm/CodeGen/MachineInstrBuilder.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/CodeGen/PseudoSourceValue.h"
39#include "llvm/CodeGen/SelectionDAG.h"
40#include "llvm/Target/TargetOptions.h"
41#include "llvm/ADT/VectorExtras.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/raw_ostream.h"
46#include <sstream>
47using namespace llvm;
48
49static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
50                                   CCValAssign::LocInfo &LocInfo,
51                                   ISD::ArgFlagsTy &ArgFlags,
52                                   CCState &State);
53static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
54                                    CCValAssign::LocInfo &LocInfo,
55                                    ISD::ArgFlagsTy &ArgFlags,
56                                    CCState &State);
57static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
58                                      CCValAssign::LocInfo &LocInfo,
59                                      ISD::ArgFlagsTy &ArgFlags,
60                                      CCState &State);
61static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
62                                       CCValAssign::LocInfo &LocInfo,
63                                       ISD::ArgFlagsTy &ArgFlags,
64                                       CCState &State);
65
66void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
67                                       EVT PromotedBitwiseVT) {
68  if (VT != PromotedLdStVT) {
69    setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
70    AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
71                       PromotedLdStVT.getSimpleVT());
72
73    setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
74    AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
75                       PromotedLdStVT.getSimpleVT());
76  }
77
78  EVT ElemTy = VT.getVectorElementType();
79  if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
80    setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
81  if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
82    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
83  if (ElemTy != MVT::i32) {
84    setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
85    setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
86    setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
87    setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
88  }
89  setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
90  setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
91  setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
92  setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
93  if (VT.isInteger()) {
94    setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
95    setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
96    setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
97  }
98
99  // Promote all bit-wise operations.
100  if (VT.isInteger() && VT != PromotedBitwiseVT) {
101    setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
102    AddPromotedToType (ISD::AND, VT.getSimpleVT(),
103                       PromotedBitwiseVT.getSimpleVT());
104    setOperationAction(ISD::OR,  VT.getSimpleVT(), Promote);
105    AddPromotedToType (ISD::OR,  VT.getSimpleVT(),
106                       PromotedBitwiseVT.getSimpleVT());
107    setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
108    AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
109                       PromotedBitwiseVT.getSimpleVT());
110  }
111
112  // Neon does not support vector divide/remainder operations.
113  setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
114  setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
115  setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
116  setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
117  setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
118  setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
119}
120
121void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
122  addRegisterClass(VT, ARM::DPRRegisterClass);
123  addTypeForNEON(VT, MVT::f64, MVT::v2i32);
124}
125
126void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
127  addRegisterClass(VT, ARM::QPRRegisterClass);
128  addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
129}
130
131static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
132  if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
133    return new TargetLoweringObjectFileMachO();
134  return new ARMElfTargetObjectFile();
135}
136
137ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
138    : TargetLowering(TM, createTLOF(TM)) {
139  Subtarget = &TM.getSubtarget<ARMSubtarget>();
140
141  if (Subtarget->isTargetDarwin()) {
142    // Uses VFP for Thumb libfuncs if available.
143    if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
144      // Single-precision floating-point arithmetic.
145      setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
146      setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
147      setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
148      setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
149
150      // Double-precision floating-point arithmetic.
151      setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
152      setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
153      setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
154      setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
155
156      // Single-precision comparisons.
157      setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
158      setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
159      setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
160      setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
161      setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
162      setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
163      setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
164      setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
165
166      setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
167      setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
168      setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
169      setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
170      setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
171      setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
172      setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
173      setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
174
175      // Double-precision comparisons.
176      setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
177      setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
178      setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
179      setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
180      setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
181      setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
182      setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
183      setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
184
185      setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
186      setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
187      setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
188      setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
189      setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
190      setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
191      setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
192      setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
193
194      // Floating-point to integer conversions.
195      // i64 conversions are done via library routines even when generating VFP
196      // instructions, so use the same ones.
197      setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
198      setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
199      setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
200      setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
201
202      // Conversions between floating types.
203      setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
204      setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
205
206      // Integer to floating-point conversions.
207      // i64 conversions are done via library routines even when generating VFP
208      // instructions, so use the same ones.
209      // FIXME: There appears to be some naming inconsistency in ARM libgcc:
210      // e.g., __floatunsidf vs. __floatunssidfvfp.
211      setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
212      setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
213      setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
214      setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
215    }
216  }
217
218  // These libcalls are not available in 32-bit.
219  setLibcallName(RTLIB::SHL_I128, 0);
220  setLibcallName(RTLIB::SRL_I128, 0);
221  setLibcallName(RTLIB::SRA_I128, 0);
222
223  // Libcalls should use the AAPCS base standard ABI, even if hard float
224  // is in effect, as per the ARM RTABI specification, section 4.1.2.
225  if (Subtarget->isAAPCS_ABI()) {
226    for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
227      setLibcallCallingConv(static_cast<RTLIB::Libcall>(i),
228                            CallingConv::ARM_AAPCS);
229    }
230  }
231
232  if (Subtarget->isThumb1Only())
233    addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
234  else
235    addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
236  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
237    addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
238    addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
239
240    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
241  }
242
243  if (Subtarget->hasNEON()) {
244    addDRTypeForNEON(MVT::v2f32);
245    addDRTypeForNEON(MVT::v8i8);
246    addDRTypeForNEON(MVT::v4i16);
247    addDRTypeForNEON(MVT::v2i32);
248    addDRTypeForNEON(MVT::v1i64);
249
250    addQRTypeForNEON(MVT::v4f32);
251    addQRTypeForNEON(MVT::v2f64);
252    addQRTypeForNEON(MVT::v16i8);
253    addQRTypeForNEON(MVT::v8i16);
254    addQRTypeForNEON(MVT::v4i32);
255    addQRTypeForNEON(MVT::v2i64);
256
257    // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
258    // neither Neon nor VFP support any arithmetic operations on it.
259    setOperationAction(ISD::FADD, MVT::v2f64, Expand);
260    setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
261    setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
262    setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
263    setOperationAction(ISD::FREM, MVT::v2f64, Expand);
264    setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
265    setOperationAction(ISD::VSETCC, MVT::v2f64, Expand);
266    setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
267    setOperationAction(ISD::FABS, MVT::v2f64, Expand);
268    setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
269    setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
270    setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
271    setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
272    setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
273    setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
274    setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
275    setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
276    setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
277    setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
278    setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
279    setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
280    setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
281    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
282    setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
283
284    // Neon does not support some operations on v1i64 and v2i64 types.
285    setOperationAction(ISD::MUL, MVT::v1i64, Expand);
286    setOperationAction(ISD::MUL, MVT::v2i64, Expand);
287    setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
288    setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
289
290    setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
291    setTargetDAGCombine(ISD::SHL);
292    setTargetDAGCombine(ISD::SRL);
293    setTargetDAGCombine(ISD::SRA);
294    setTargetDAGCombine(ISD::SIGN_EXTEND);
295    setTargetDAGCombine(ISD::ZERO_EXTEND);
296    setTargetDAGCombine(ISD::ANY_EXTEND);
297  }
298
299  computeRegisterProperties();
300
301  // ARM does not have f32 extending load.
302  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
303
304  // ARM does not have i1 sign extending load.
305  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
306
307  // ARM supports all 4 flavors of integer indexed load / store.
308  if (!Subtarget->isThumb1Only()) {
309    for (unsigned im = (unsigned)ISD::PRE_INC;
310         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
311      setIndexedLoadAction(im,  MVT::i1,  Legal);
312      setIndexedLoadAction(im,  MVT::i8,  Legal);
313      setIndexedLoadAction(im,  MVT::i16, Legal);
314      setIndexedLoadAction(im,  MVT::i32, Legal);
315      setIndexedStoreAction(im, MVT::i1,  Legal);
316      setIndexedStoreAction(im, MVT::i8,  Legal);
317      setIndexedStoreAction(im, MVT::i16, Legal);
318      setIndexedStoreAction(im, MVT::i32, Legal);
319    }
320  }
321
322  // i64 operation support.
323  if (Subtarget->isThumb1Only()) {
324    setOperationAction(ISD::MUL,     MVT::i64, Expand);
325    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
326    setOperationAction(ISD::MULHS,   MVT::i32, Expand);
327    setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
328    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
329  } else {
330    setOperationAction(ISD::MUL,     MVT::i64, Expand);
331    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
332    if (!Subtarget->hasV6Ops())
333      setOperationAction(ISD::MULHS, MVT::i32, Expand);
334  }
335  setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
336  setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
337  setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
338  setOperationAction(ISD::SRL,       MVT::i64, Custom);
339  setOperationAction(ISD::SRA,       MVT::i64, Custom);
340
341  // ARM does not have ROTL.
342  setOperationAction(ISD::ROTL,  MVT::i32, Expand);
343  setOperationAction(ISD::CTTZ,  MVT::i32, Expand);
344  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
345  if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
346    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
347
348  // Only ARMv6 has BSWAP.
349  if (!Subtarget->hasV6Ops())
350    setOperationAction(ISD::BSWAP, MVT::i32, Expand);
351
352  // These are expanded into libcalls.
353  setOperationAction(ISD::SDIV,  MVT::i32, Expand);
354  setOperationAction(ISD::UDIV,  MVT::i32, Expand);
355  setOperationAction(ISD::SREM,  MVT::i32, Expand);
356  setOperationAction(ISD::UREM,  MVT::i32, Expand);
357  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
358  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
359
360  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
361  setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
362  setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
363  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
364  setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
365
366  // Use the default implementation.
367  setOperationAction(ISD::VASTART,            MVT::Other, Custom);
368  setOperationAction(ISD::VAARG,              MVT::Other, Expand);
369  setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
370  setOperationAction(ISD::VAEND,              MVT::Other, Expand);
371  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
372  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
373  setOperationAction(ISD::EHSELECTION,        MVT::i32,   Expand);
374  // FIXME: Shouldn't need this, since no register is used, but the legalizer
375  // doesn't yet know how to not do that for SjLj.
376  setExceptionSelectorRegister(ARM::R0);
377  if (Subtarget->isThumb())
378    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
379  else
380    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
381  setOperationAction(ISD::MEMBARRIER,         MVT::Other, Custom);
382
383  if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
384    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
385    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
386  }
387  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
388
389  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
390    // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR iff target supports vfp2.
391    setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
392
393  // We want to custom lower some of our intrinsics.
394  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
395
396  setOperationAction(ISD::SETCC,     MVT::i32, Expand);
397  setOperationAction(ISD::SETCC,     MVT::f32, Expand);
398  setOperationAction(ISD::SETCC,     MVT::f64, Expand);
399  setOperationAction(ISD::SELECT,    MVT::i32, Expand);
400  setOperationAction(ISD::SELECT,    MVT::f32, Expand);
401  setOperationAction(ISD::SELECT,    MVT::f64, Expand);
402  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
403  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
404  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
405
406  setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
407  setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
408  setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
409  setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
410  setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
411
412  // We don't support sin/cos/fmod/copysign/pow
413  setOperationAction(ISD::FSIN,      MVT::f64, Expand);
414  setOperationAction(ISD::FSIN,      MVT::f32, Expand);
415  setOperationAction(ISD::FCOS,      MVT::f32, Expand);
416  setOperationAction(ISD::FCOS,      MVT::f64, Expand);
417  setOperationAction(ISD::FREM,      MVT::f64, Expand);
418  setOperationAction(ISD::FREM,      MVT::f32, Expand);
419  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
420    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
421    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
422  }
423  setOperationAction(ISD::FPOW,      MVT::f64, Expand);
424  setOperationAction(ISD::FPOW,      MVT::f32, Expand);
425
426  // int <-> fp are custom expanded into bit_convert + ARMISD ops.
427  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
428    setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
429    setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
430    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
431    setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
432  }
433
434  // We have target-specific dag combine patterns for the following nodes:
435  // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
436  setTargetDAGCombine(ISD::ADD);
437  setTargetDAGCombine(ISD::SUB);
438
439  setStackPointerRegisterToSaveRestore(ARM::SP);
440  setSchedulingPreference(SchedulingForRegPressure);
441
442  // FIXME: If-converter should use instruction latency to determine
443  // profitability rather than relying on fixed limits.
444  if (Subtarget->getCPUString() == "generic") {
445    // Generic (and overly aggressive) if-conversion limits.
446    setIfCvtBlockSizeLimit(10);
447    setIfCvtDupBlockSizeLimit(2);
448  } else if (Subtarget->hasV6Ops()) {
449    setIfCvtBlockSizeLimit(2);
450    setIfCvtDupBlockSizeLimit(1);
451  } else {
452    setIfCvtBlockSizeLimit(3);
453    setIfCvtDupBlockSizeLimit(2);
454  }
455
456  maxStoresPerMemcpy = 1;   //// temporary - rewrite interface to use type
457  // Do not enable CodePlacementOpt for now: it currently runs after the
458  // ARMConstantIslandPass and messes up branch relaxation and placement
459  // of constant islands.
460  // benefitFromCodePlacementOpt = true;
461}
462
463const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
464  switch (Opcode) {
465  default: return 0;
466  case ARMISD::Wrapper:       return "ARMISD::Wrapper";
467  case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
468  case ARMISD::CALL:          return "ARMISD::CALL";
469  case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
470  case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
471  case ARMISD::tCALL:         return "ARMISD::tCALL";
472  case ARMISD::BRCOND:        return "ARMISD::BRCOND";
473  case ARMISD::BR_JT:         return "ARMISD::BR_JT";
474  case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
475  case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
476  case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
477  case ARMISD::CMP:           return "ARMISD::CMP";
478  case ARMISD::CMPZ:          return "ARMISD::CMPZ";
479  case ARMISD::CMPFP:         return "ARMISD::CMPFP";
480  case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
481  case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
482  case ARMISD::CMOV:          return "ARMISD::CMOV";
483  case ARMISD::CNEG:          return "ARMISD::CNEG";
484
485  case ARMISD::FTOSI:         return "ARMISD::FTOSI";
486  case ARMISD::FTOUI:         return "ARMISD::FTOUI";
487  case ARMISD::SITOF:         return "ARMISD::SITOF";
488  case ARMISD::UITOF:         return "ARMISD::UITOF";
489
490  case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
491  case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
492  case ARMISD::RRX:           return "ARMISD::RRX";
493
494  case ARMISD::VMOVRRD:         return "ARMISD::VMOVRRD";
495  case ARMISD::VMOVDRR:         return "ARMISD::VMOVDRR";
496
497  case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
498  case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
499
500  case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
501
502  case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
503
504  case ARMISD::MEMBARRIER:    return "ARMISD::MEMBARRIER";
505  case ARMISD::SYNCBARRIER:   return "ARMISD::SYNCBARRIER";
506
507  case ARMISD::VCEQ:          return "ARMISD::VCEQ";
508  case ARMISD::VCGE:          return "ARMISD::VCGE";
509  case ARMISD::VCGEU:         return "ARMISD::VCGEU";
510  case ARMISD::VCGT:          return "ARMISD::VCGT";
511  case ARMISD::VCGTU:         return "ARMISD::VCGTU";
512  case ARMISD::VTST:          return "ARMISD::VTST";
513
514  case ARMISD::VSHL:          return "ARMISD::VSHL";
515  case ARMISD::VSHRs:         return "ARMISD::VSHRs";
516  case ARMISD::VSHRu:         return "ARMISD::VSHRu";
517  case ARMISD::VSHLLs:        return "ARMISD::VSHLLs";
518  case ARMISD::VSHLLu:        return "ARMISD::VSHLLu";
519  case ARMISD::VSHLLi:        return "ARMISD::VSHLLi";
520  case ARMISD::VSHRN:         return "ARMISD::VSHRN";
521  case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
522  case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
523  case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
524  case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
525  case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
526  case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
527  case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
528  case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
529  case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
530  case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
531  case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
532  case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
533  case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
534  case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
535  case ARMISD::VDUP:          return "ARMISD::VDUP";
536  case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
537  case ARMISD::VEXT:          return "ARMISD::VEXT";
538  case ARMISD::VREV64:        return "ARMISD::VREV64";
539  case ARMISD::VREV32:        return "ARMISD::VREV32";
540  case ARMISD::VREV16:        return "ARMISD::VREV16";
541  case ARMISD::VZIP:          return "ARMISD::VZIP";
542  case ARMISD::VUZP:          return "ARMISD::VUZP";
543  case ARMISD::VTRN:          return "ARMISD::VTRN";
544  }
545}
546
547/// getFunctionAlignment - Return the Log2 alignment of this function.
548unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
549  return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
550}
551
552//===----------------------------------------------------------------------===//
553// Lowering Code
554//===----------------------------------------------------------------------===//
555
556/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
557static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
558  switch (CC) {
559  default: llvm_unreachable("Unknown condition code!");
560  case ISD::SETNE:  return ARMCC::NE;
561  case ISD::SETEQ:  return ARMCC::EQ;
562  case ISD::SETGT:  return ARMCC::GT;
563  case ISD::SETGE:  return ARMCC::GE;
564  case ISD::SETLT:  return ARMCC::LT;
565  case ISD::SETLE:  return ARMCC::LE;
566  case ISD::SETUGT: return ARMCC::HI;
567  case ISD::SETUGE: return ARMCC::HS;
568  case ISD::SETULT: return ARMCC::LO;
569  case ISD::SETULE: return ARMCC::LS;
570  }
571}
572
573/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
574static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
575                        ARMCC::CondCodes &CondCode2) {
576  CondCode2 = ARMCC::AL;
577  switch (CC) {
578  default: llvm_unreachable("Unknown FP condition!");
579  case ISD::SETEQ:
580  case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
581  case ISD::SETGT:
582  case ISD::SETOGT: CondCode = ARMCC::GT; break;
583  case ISD::SETGE:
584  case ISD::SETOGE: CondCode = ARMCC::GE; break;
585  case ISD::SETOLT: CondCode = ARMCC::MI; break;
586  case ISD::SETOLE: CondCode = ARMCC::LS; break;
587  case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
588  case ISD::SETO:   CondCode = ARMCC::VC; break;
589  case ISD::SETUO:  CondCode = ARMCC::VS; break;
590  case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
591  case ISD::SETUGT: CondCode = ARMCC::HI; break;
592  case ISD::SETUGE: CondCode = ARMCC::PL; break;
593  case ISD::SETLT:
594  case ISD::SETULT: CondCode = ARMCC::LT; break;
595  case ISD::SETLE:
596  case ISD::SETULE: CondCode = ARMCC::LE; break;
597  case ISD::SETNE:
598  case ISD::SETUNE: CondCode = ARMCC::NE; break;
599  }
600}
601
602//===----------------------------------------------------------------------===//
603//                      Calling Convention Implementation
604//===----------------------------------------------------------------------===//
605
606#include "ARMGenCallingConv.inc"
607
608// APCS f64 is in register pairs, possibly split to stack
609static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
610                          CCValAssign::LocInfo &LocInfo,
611                          CCState &State, bool CanFail) {
612  static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
613
614  // Try to get the first register.
615  if (unsigned Reg = State.AllocateReg(RegList, 4))
616    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
617  else {
618    // For the 2nd half of a v2f64, do not fail.
619    if (CanFail)
620      return false;
621
622    // Put the whole thing on the stack.
623    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
624                                           State.AllocateStack(8, 4),
625                                           LocVT, LocInfo));
626    return true;
627  }
628
629  // Try to get the second register.
630  if (unsigned Reg = State.AllocateReg(RegList, 4))
631    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
632  else
633    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
634                                           State.AllocateStack(4, 4),
635                                           LocVT, LocInfo));
636  return true;
637}
638
639static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
640                                   CCValAssign::LocInfo &LocInfo,
641                                   ISD::ArgFlagsTy &ArgFlags,
642                                   CCState &State) {
643  if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
644    return false;
645  if (LocVT == MVT::v2f64 &&
646      !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
647    return false;
648  return true;  // we handled it
649}
650
651// AAPCS f64 is in aligned register pairs
652static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
653                           CCValAssign::LocInfo &LocInfo,
654                           CCState &State, bool CanFail) {
655  static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
656  static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
657
658  unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
659  if (Reg == 0) {
660    // For the 2nd half of a v2f64, do not just fail.
661    if (CanFail)
662      return false;
663
664    // Put the whole thing on the stack.
665    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
666                                           State.AllocateStack(8, 8),
667                                           LocVT, LocInfo));
668    return true;
669  }
670
671  unsigned i;
672  for (i = 0; i < 2; ++i)
673    if (HiRegList[i] == Reg)
674      break;
675
676  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
677  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
678                                         LocVT, LocInfo));
679  return true;
680}
681
682static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
683                                    CCValAssign::LocInfo &LocInfo,
684                                    ISD::ArgFlagsTy &ArgFlags,
685                                    CCState &State) {
686  if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
687    return false;
688  if (LocVT == MVT::v2f64 &&
689      !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
690    return false;
691  return true;  // we handled it
692}
693
694static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
695                         CCValAssign::LocInfo &LocInfo, CCState &State) {
696  static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
697  static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
698
699  unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
700  if (Reg == 0)
701    return false; // we didn't handle it
702
703  unsigned i;
704  for (i = 0; i < 2; ++i)
705    if (HiRegList[i] == Reg)
706      break;
707
708  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
709  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
710                                         LocVT, LocInfo));
711  return true;
712}
713
714static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
715                                      CCValAssign::LocInfo &LocInfo,
716                                      ISD::ArgFlagsTy &ArgFlags,
717                                      CCState &State) {
718  if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
719    return false;
720  if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
721    return false;
722  return true;  // we handled it
723}
724
725static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
726                                       CCValAssign::LocInfo &LocInfo,
727                                       ISD::ArgFlagsTy &ArgFlags,
728                                       CCState &State) {
729  return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
730                                   State);
731}
732
733/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
734/// given CallingConvention value.
735CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
736                                                 bool Return,
737                                                 bool isVarArg) const {
738  switch (CC) {
739  default:
740    llvm_unreachable("Unsupported calling convention");
741  case CallingConv::C:
742  case CallingConv::Fast:
743    // Use target triple & subtarget features to do actual dispatch.
744    if (Subtarget->isAAPCS_ABI()) {
745      if (Subtarget->hasVFP2() &&
746          FloatABIType == FloatABI::Hard && !isVarArg)
747        return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
748      else
749        return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
750    } else
751        return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
752  case CallingConv::ARM_AAPCS_VFP:
753    return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
754  case CallingConv::ARM_AAPCS:
755    return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
756  case CallingConv::ARM_APCS:
757    return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
758  }
759}
760
761/// LowerCallResult - Lower the result values of a call into the
762/// appropriate copies out of appropriate physical registers.
763SDValue
764ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
765                                   CallingConv::ID CallConv, bool isVarArg,
766                                   const SmallVectorImpl<ISD::InputArg> &Ins,
767                                   DebugLoc dl, SelectionDAG &DAG,
768                                   SmallVectorImpl<SDValue> &InVals) {
769
770  // Assign locations to each value returned by this call.
771  SmallVector<CCValAssign, 16> RVLocs;
772  CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
773                 RVLocs, *DAG.getContext());
774  CCInfo.AnalyzeCallResult(Ins,
775                           CCAssignFnForNode(CallConv, /* Return*/ true,
776                                             isVarArg));
777
778  // Copy all of the result registers out of their specified physreg.
779  for (unsigned i = 0; i != RVLocs.size(); ++i) {
780    CCValAssign VA = RVLocs[i];
781
782    SDValue Val;
783    if (VA.needsCustom()) {
784      // Handle f64 or half of a v2f64.
785      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
786                                      InFlag);
787      Chain = Lo.getValue(1);
788      InFlag = Lo.getValue(2);
789      VA = RVLocs[++i]; // skip ahead to next loc
790      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
791                                      InFlag);
792      Chain = Hi.getValue(1);
793      InFlag = Hi.getValue(2);
794      Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
795
796      if (VA.getLocVT() == MVT::v2f64) {
797        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
798        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
799                          DAG.getConstant(0, MVT::i32));
800
801        VA = RVLocs[++i]; // skip ahead to next loc
802        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
803        Chain = Lo.getValue(1);
804        InFlag = Lo.getValue(2);
805        VA = RVLocs[++i]; // skip ahead to next loc
806        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
807        Chain = Hi.getValue(1);
808        InFlag = Hi.getValue(2);
809        Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
810        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
811                          DAG.getConstant(1, MVT::i32));
812      }
813    } else {
814      Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
815                               InFlag);
816      Chain = Val.getValue(1);
817      InFlag = Val.getValue(2);
818    }
819
820    switch (VA.getLocInfo()) {
821    default: llvm_unreachable("Unknown loc info!");
822    case CCValAssign::Full: break;
823    case CCValAssign::BCvt:
824      Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
825      break;
826    }
827
828    InVals.push_back(Val);
829  }
830
831  return Chain;
832}
833
834/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
835/// by "Src" to address "Dst" of size "Size".  Alignment information is
836/// specified by the specific parameter attribute.  The copy will be passed as
837/// a byval function parameter.
838/// Sometimes what we are copying is the end of a larger object, the part that
839/// does not fit in registers.
840static SDValue
841CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
842                          ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
843                          DebugLoc dl) {
844  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
845  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
846                       /*AlwaysInline=*/false, NULL, 0, NULL, 0);
847}
848
849/// LowerMemOpCallTo - Store the argument to the stack.
850SDValue
851ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
852                                    SDValue StackPtr, SDValue Arg,
853                                    DebugLoc dl, SelectionDAG &DAG,
854                                    const CCValAssign &VA,
855                                    ISD::ArgFlagsTy Flags) {
856  unsigned LocMemOffset = VA.getLocMemOffset();
857  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
858  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
859  if (Flags.isByVal()) {
860    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
861  }
862  return DAG.getStore(Chain, dl, Arg, PtrOff,
863                      PseudoSourceValue::getStack(), LocMemOffset);
864}
865
866void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
867                                         SDValue Chain, SDValue &Arg,
868                                         RegsToPassVector &RegsToPass,
869                                         CCValAssign &VA, CCValAssign &NextVA,
870                                         SDValue &StackPtr,
871                                         SmallVector<SDValue, 8> &MemOpChains,
872                                         ISD::ArgFlagsTy Flags) {
873
874  SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
875                              DAG.getVTList(MVT::i32, MVT::i32), Arg);
876  RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
877
878  if (NextVA.isRegLoc())
879    RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
880  else {
881    assert(NextVA.isMemLoc());
882    if (StackPtr.getNode() == 0)
883      StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
884
885    MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
886                                           dl, DAG, NextVA,
887                                           Flags));
888  }
889}
890
891/// LowerCall - Lowering a call into a callseq_start <-
892/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
893/// nodes.
894SDValue
895ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
896                             CallingConv::ID CallConv, bool isVarArg,
897                             bool isTailCall,
898                             const SmallVectorImpl<ISD::OutputArg> &Outs,
899                             const SmallVectorImpl<ISD::InputArg> &Ins,
900                             DebugLoc dl, SelectionDAG &DAG,
901                             SmallVectorImpl<SDValue> &InVals) {
902
903  // Analyze operands of the call, assigning locations to each operand.
904  SmallVector<CCValAssign, 16> ArgLocs;
905  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
906                 *DAG.getContext());
907  CCInfo.AnalyzeCallOperands(Outs,
908                             CCAssignFnForNode(CallConv, /* Return*/ false,
909                                               isVarArg));
910
911  // Get a count of how many bytes are to be pushed on the stack.
912  unsigned NumBytes = CCInfo.getNextStackOffset();
913
914  // Adjust the stack pointer for the new arguments...
915  // These operations are automatically eliminated by the prolog/epilog pass
916  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
917
918  SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
919
920  RegsToPassVector RegsToPass;
921  SmallVector<SDValue, 8> MemOpChains;
922
923  // Walk the register/memloc assignments, inserting copies/loads.  In the case
924  // of tail call optimization, arguments are handled later.
925  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
926       i != e;
927       ++i, ++realArgIdx) {
928    CCValAssign &VA = ArgLocs[i];
929    SDValue Arg = Outs[realArgIdx].Val;
930    ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
931
932    // Promote the value if needed.
933    switch (VA.getLocInfo()) {
934    default: llvm_unreachable("Unknown loc info!");
935    case CCValAssign::Full: break;
936    case CCValAssign::SExt:
937      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
938      break;
939    case CCValAssign::ZExt:
940      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
941      break;
942    case CCValAssign::AExt:
943      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
944      break;
945    case CCValAssign::BCvt:
946      Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
947      break;
948    }
949
950    // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
951    if (VA.needsCustom()) {
952      if (VA.getLocVT() == MVT::v2f64) {
953        SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
954                                  DAG.getConstant(0, MVT::i32));
955        SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
956                                  DAG.getConstant(1, MVT::i32));
957
958        PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
959                         VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
960
961        VA = ArgLocs[++i]; // skip ahead to next loc
962        if (VA.isRegLoc()) {
963          PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
964                           VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
965        } else {
966          assert(VA.isMemLoc());
967          if (StackPtr.getNode() == 0)
968            StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
969
970          MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
971                                                 dl, DAG, VA, Flags));
972        }
973      } else {
974        PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
975                         StackPtr, MemOpChains, Flags);
976      }
977    } else if (VA.isRegLoc()) {
978      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
979    } else {
980      assert(VA.isMemLoc());
981      if (StackPtr.getNode() == 0)
982        StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
983
984      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
985                                             dl, DAG, VA, Flags));
986    }
987  }
988
989  if (!MemOpChains.empty())
990    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
991                        &MemOpChains[0], MemOpChains.size());
992
993  // Build a sequence of copy-to-reg nodes chained together with token chain
994  // and flag operands which copy the outgoing args into the appropriate regs.
995  SDValue InFlag;
996  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
997    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
998                             RegsToPass[i].second, InFlag);
999    InFlag = Chain.getValue(1);
1000  }
1001
1002  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1003  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1004  // node so that legalize doesn't hack it.
1005  bool isDirect = false;
1006  bool isARMFunc = false;
1007  bool isLocalARMFunc = false;
1008  MachineFunction &MF = DAG.getMachineFunction();
1009  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1010  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1011    GlobalValue *GV = G->getGlobal();
1012    isDirect = true;
1013    bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
1014    bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
1015                   getTargetMachine().getRelocationModel() != Reloc::Static;
1016    isARMFunc = !Subtarget->isThumb() || isStub;
1017    // ARM call to a local ARM function is predicable.
1018    isLocalARMFunc = !Subtarget->isThumb() && !isExt;
1019    // tBX takes a register source operand.
1020    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1021      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1022      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
1023                                                           ARMPCLabelIndex,
1024                                                           ARMCP::CPValue, 4);
1025      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1026      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1027      Callee = DAG.getLoad(getPointerTy(), dl,
1028                           DAG.getEntryNode(), CPAddr,
1029                           PseudoSourceValue::getConstantPool(), 0);
1030      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1031      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1032                           getPointerTy(), Callee, PICLabel);
1033   } else
1034      Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
1035  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1036    isDirect = true;
1037    bool isStub = Subtarget->isTargetDarwin() &&
1038                  getTargetMachine().getRelocationModel() != Reloc::Static;
1039    isARMFunc = !Subtarget->isThumb() || isStub;
1040    // tBX takes a register source operand.
1041    const char *Sym = S->getSymbol();
1042    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1043      unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1044      ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1045                                                       Sym, ARMPCLabelIndex, 4);
1046      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1047      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1048      Callee = DAG.getLoad(getPointerTy(), dl,
1049                           DAG.getEntryNode(), CPAddr,
1050                           PseudoSourceValue::getConstantPool(), 0);
1051      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1052      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1053                           getPointerTy(), Callee, PICLabel);
1054    } else
1055      Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1056  }
1057
1058  // FIXME: handle tail calls differently.
1059  unsigned CallOpc;
1060  if (Subtarget->isThumb()) {
1061    if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
1062      CallOpc = ARMISD::CALL_NOLINK;
1063    else
1064      CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
1065  } else {
1066    CallOpc = (isDirect || Subtarget->hasV5TOps())
1067      ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
1068      : ARMISD::CALL_NOLINK;
1069  }
1070  if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
1071    // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
1072    Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
1073    InFlag = Chain.getValue(1);
1074  }
1075
1076  std::vector<SDValue> Ops;
1077  Ops.push_back(Chain);
1078  Ops.push_back(Callee);
1079
1080  // Add argument registers to the end of the list so that they are known live
1081  // into the call.
1082  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1083    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1084                                  RegsToPass[i].second.getValueType()));
1085
1086  if (InFlag.getNode())
1087    Ops.push_back(InFlag);
1088  // Returns a chain and a flag for retval copy to use.
1089  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
1090                      &Ops[0], Ops.size());
1091  InFlag = Chain.getValue(1);
1092
1093  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1094                             DAG.getIntPtrConstant(0, true), InFlag);
1095  if (!Ins.empty())
1096    InFlag = Chain.getValue(1);
1097
1098  // Handle result values, copying them out of physregs into vregs that we
1099  // return.
1100  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
1101                         dl, DAG, InVals);
1102}
1103
1104SDValue
1105ARMTargetLowering::LowerReturn(SDValue Chain,
1106                               CallingConv::ID CallConv, bool isVarArg,
1107                               const SmallVectorImpl<ISD::OutputArg> &Outs,
1108                               DebugLoc dl, SelectionDAG &DAG) {
1109
1110  // CCValAssign - represent the assignment of the return value to a location.
1111  SmallVector<CCValAssign, 16> RVLocs;
1112
1113  // CCState - Info about the registers and stack slots.
1114  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
1115                 *DAG.getContext());
1116
1117  // Analyze outgoing return values.
1118  CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
1119                                               isVarArg));
1120
1121  // If this is the first return lowered for this function, add
1122  // the regs to the liveout set for the function.
1123  if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1124    for (unsigned i = 0; i != RVLocs.size(); ++i)
1125      if (RVLocs[i].isRegLoc())
1126        DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1127  }
1128
1129  SDValue Flag;
1130
1131  // Copy the result values into the output registers.
1132  for (unsigned i = 0, realRVLocIdx = 0;
1133       i != RVLocs.size();
1134       ++i, ++realRVLocIdx) {
1135    CCValAssign &VA = RVLocs[i];
1136    assert(VA.isRegLoc() && "Can only return in registers!");
1137
1138    SDValue Arg = Outs[realRVLocIdx].Val;
1139
1140    switch (VA.getLocInfo()) {
1141    default: llvm_unreachable("Unknown loc info!");
1142    case CCValAssign::Full: break;
1143    case CCValAssign::BCvt:
1144      Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
1145      break;
1146    }
1147
1148    if (VA.needsCustom()) {
1149      if (VA.getLocVT() == MVT::v2f64) {
1150        // Extract the first half and return it in two registers.
1151        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1152                                   DAG.getConstant(0, MVT::i32));
1153        SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
1154                                       DAG.getVTList(MVT::i32, MVT::i32), Half);
1155
1156        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
1157        Flag = Chain.getValue(1);
1158        VA = RVLocs[++i]; // skip ahead to next loc
1159        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1160                                 HalfGPRs.getValue(1), Flag);
1161        Flag = Chain.getValue(1);
1162        VA = RVLocs[++i]; // skip ahead to next loc
1163
1164        // Extract the 2nd half and fall through to handle it as an f64 value.
1165        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1166                          DAG.getConstant(1, MVT::i32));
1167      }
1168      // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
1169      // available.
1170      SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1171                                  DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
1172      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
1173      Flag = Chain.getValue(1);
1174      VA = RVLocs[++i]; // skip ahead to next loc
1175      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
1176                               Flag);
1177    } else
1178      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1179
1180    // Guarantee that all emitted copies are
1181    // stuck together, avoiding something bad.
1182    Flag = Chain.getValue(1);
1183  }
1184
1185  SDValue result;
1186  if (Flag.getNode())
1187    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1188  else // Return Void
1189    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
1190
1191  return result;
1192}
1193
1194// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
1195// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
1196// one of the above mentioned nodes. It has to be wrapped because otherwise
1197// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
1198// be used to form addressing mode. These wrapped nodes will be selected
1199// into MOVi.
1200static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
1201  EVT PtrVT = Op.getValueType();
1202  // FIXME there is no actual debug info here
1203  DebugLoc dl = Op.getDebugLoc();
1204  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1205  SDValue Res;
1206  if (CP->isMachineConstantPoolEntry())
1207    Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1208                                    CP->getAlignment());
1209  else
1210    Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1211                                    CP->getAlignment());
1212  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
1213}
1214
1215SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
1216  MachineFunction &MF = DAG.getMachineFunction();
1217  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1218  unsigned ARMPCLabelIndex = 0;
1219  DebugLoc DL = Op.getDebugLoc();
1220  EVT PtrVT = getPointerTy();
1221  BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1222  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1223  SDValue CPAddr;
1224  if (RelocM == Reloc::Static) {
1225    CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
1226  } else {
1227    unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1228    ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1229    ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
1230                                                         ARMCP::CPBlockAddress,
1231                                                         PCAdj);
1232    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1233  }
1234  CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
1235  SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
1236                               PseudoSourceValue::getConstantPool(), 0);
1237  if (RelocM == Reloc::Static)
1238    return Result;
1239  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1240  return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
1241}
1242
1243// Lower ISD::GlobalTLSAddress using the "general dynamic" model
1244SDValue
1245ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1246                                                 SelectionDAG &DAG) {
1247  DebugLoc dl = GA->getDebugLoc();
1248  EVT PtrVT = getPointerTy();
1249  unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1250  MachineFunction &MF = DAG.getMachineFunction();
1251  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1252  unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1253  ARMConstantPoolValue *CPV =
1254    new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1255                             ARMCP::CPValue, PCAdj, "tlsgd", true);
1256  SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1257  Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
1258  Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
1259                         PseudoSourceValue::getConstantPool(), 0);
1260  SDValue Chain = Argument.getValue(1);
1261
1262  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1263  Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
1264
1265  // call __tls_get_addr.
1266  ArgListTy Args;
1267  ArgListEntry Entry;
1268  Entry.Node = Argument;
1269  Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
1270  Args.push_back(Entry);
1271  // FIXME: is there useful debug info available here?
1272  std::pair<SDValue, SDValue> CallResult =
1273    LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
1274                false, false, false, false,
1275                0, CallingConv::C, false, /*isReturnValueUsed=*/true,
1276                DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl,
1277                DAG.GetOrdering(Chain.getNode()));
1278  return CallResult.first;
1279}
1280
1281// Lower ISD::GlobalTLSAddress using the "initial exec" or
1282// "local exec" model.
1283SDValue
1284ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
1285                                        SelectionDAG &DAG) {
1286  GlobalValue *GV = GA->getGlobal();
1287  DebugLoc dl = GA->getDebugLoc();
1288  SDValue Offset;
1289  SDValue Chain = DAG.getEntryNode();
1290  EVT PtrVT = getPointerTy();
1291  // Get the Thread Pointer
1292  SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
1293
1294  if (GV->isDeclaration()) {
1295    MachineFunction &MF = DAG.getMachineFunction();
1296    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1297    unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1298    // Initial exec model.
1299    unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1300    ARMConstantPoolValue *CPV =
1301      new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
1302                               ARMCP::CPValue, PCAdj, "gottpoff", true);
1303    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1304    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1305    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1306                         PseudoSourceValue::getConstantPool(), 0);
1307    Chain = Offset.getValue(1);
1308
1309    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1310    Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
1311
1312    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1313                         PseudoSourceValue::getConstantPool(), 0);
1314  } else {
1315    // local exec model
1316    ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff");
1317    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1318    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
1319    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
1320                         PseudoSourceValue::getConstantPool(), 0);
1321  }
1322
1323  // The address of the thread local variable is the add of the thread
1324  // pointer with the offset of the variable.
1325  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
1326}
1327
1328SDValue
1329ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
1330  // TODO: implement the "local dynamic" model
1331  assert(Subtarget->isTargetELF() &&
1332         "TLS not implemented for non-ELF targets");
1333  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1334  // If the relocation model is PIC, use the "General Dynamic" TLS Model,
1335  // otherwise use the "Local Exec" TLS Model
1336  if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
1337    return LowerToTLSGeneralDynamicModel(GA, DAG);
1338  else
1339    return LowerToTLSExecModels(GA, DAG);
1340}
1341
1342SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
1343                                                 SelectionDAG &DAG) {
1344  EVT PtrVT = getPointerTy();
1345  DebugLoc dl = Op.getDebugLoc();
1346  GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1347  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1348  if (RelocM == Reloc::PIC_) {
1349    bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
1350    ARMConstantPoolValue *CPV =
1351      new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT");
1352    SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1353    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1354    SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
1355                                 CPAddr,
1356                                 PseudoSourceValue::getConstantPool(), 0);
1357    SDValue Chain = Result.getValue(1);
1358    SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1359    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
1360    if (!UseGOTOFF)
1361      Result = DAG.getLoad(PtrVT, dl, Chain, Result,
1362                           PseudoSourceValue::getGOT(), 0);
1363    return Result;
1364  } else {
1365    // If we have T2 ops, we can materialize the address directly via movt/movw
1366    // pair. This is always cheaper.
1367    if (Subtarget->useMovt()) {
1368      return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
1369                         DAG.getTargetGlobalAddress(GV, PtrVT));
1370    } else {
1371      SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1372      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1373      return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1374                         PseudoSourceValue::getConstantPool(), 0);
1375    }
1376  }
1377}
1378
1379SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
1380                                                    SelectionDAG &DAG) {
1381  MachineFunction &MF = DAG.getMachineFunction();
1382  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1383  unsigned ARMPCLabelIndex = 0;
1384  EVT PtrVT = getPointerTy();
1385  DebugLoc dl = Op.getDebugLoc();
1386  GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1387  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1388  SDValue CPAddr;
1389  if (RelocM == Reloc::Static)
1390    CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
1391  else {
1392    ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1393    unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
1394    ARMConstantPoolValue *CPV =
1395      new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
1396    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1397  }
1398  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1399
1400  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1401                               PseudoSourceValue::getConstantPool(), 0);
1402  SDValue Chain = Result.getValue(1);
1403
1404  if (RelocM == Reloc::PIC_) {
1405    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1406    Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1407  }
1408
1409  if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
1410    Result = DAG.getLoad(PtrVT, dl, Chain, Result,
1411                         PseudoSourceValue::getGOT(), 0);
1412
1413  return Result;
1414}
1415
1416SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
1417                                                    SelectionDAG &DAG){
1418  assert(Subtarget->isTargetELF() &&
1419         "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
1420  MachineFunction &MF = DAG.getMachineFunction();
1421  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1422  unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1423  EVT PtrVT = getPointerTy();
1424  DebugLoc dl = Op.getDebugLoc();
1425  unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1426  ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
1427                                                       "_GLOBAL_OFFSET_TABLE_",
1428                                                       ARMPCLabelIndex, PCAdj);
1429  SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1430  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1431  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1432                               PseudoSourceValue::getConstantPool(), 0);
1433  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1434  return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1435}
1436
1437SDValue
1438ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
1439  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1440  DebugLoc dl = Op.getDebugLoc();
1441  switch (IntNo) {
1442  default: return SDValue();    // Don't custom lower most intrinsics.
1443  case Intrinsic::arm_thread_pointer: {
1444    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1445    return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
1446  }
1447  case Intrinsic::eh_sjlj_lsda: {
1448    MachineFunction &MF = DAG.getMachineFunction();
1449    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1450    unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
1451    EVT PtrVT = getPointerTy();
1452    DebugLoc dl = Op.getDebugLoc();
1453    Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1454    SDValue CPAddr;
1455    unsigned PCAdj = (RelocM != Reloc::PIC_)
1456      ? 0 : (Subtarget->isThumb() ? 4 : 8);
1457    ARMConstantPoolValue *CPV =
1458      new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex,
1459                               ARMCP::CPLSDA, PCAdj);
1460    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1461    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1462    SDValue Result =
1463      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
1464                  PseudoSourceValue::getConstantPool(), 0);
1465    SDValue Chain = Result.getValue(1);
1466
1467    if (RelocM == Reloc::PIC_) {
1468      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1469      Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
1470    }
1471    return Result;
1472  }
1473  case Intrinsic::eh_sjlj_setjmp:
1474    return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1));
1475  }
1476}
1477
1478static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
1479                          const ARMSubtarget *Subtarget) {
1480  DebugLoc dl = Op.getDebugLoc();
1481  SDValue Op5 = Op.getOperand(5);
1482  SDValue Res;
1483  unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
1484  if (isDeviceBarrier) {
1485    if (Subtarget->hasV7Ops())
1486      Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0));
1487    else
1488      Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0),
1489                        DAG.getConstant(0, MVT::i32));
1490  } else {
1491    if (Subtarget->hasV7Ops())
1492      Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
1493    else
1494      Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
1495                        DAG.getConstant(0, MVT::i32));
1496  }
1497  return Res;
1498}
1499
1500static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
1501                            unsigned VarArgsFrameIndex) {
1502  // vastart just stores the address of the VarArgsFrameIndex slot into the
1503  // memory location argument.
1504  DebugLoc dl = Op.getDebugLoc();
1505  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1506  SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1507  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1508  return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
1509}
1510
1511SDValue
1512ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
1513  SDNode *Node = Op.getNode();
1514  DebugLoc dl = Node->getDebugLoc();
1515  EVT VT = Node->getValueType(0);
1516  SDValue Chain = Op.getOperand(0);
1517  SDValue Size  = Op.getOperand(1);
1518  SDValue Align = Op.getOperand(2);
1519
1520  // Chain the dynamic stack allocation so that it doesn't modify the stack
1521  // pointer when other instructions are using the stack.
1522  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
1523
1524  unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue();
1525  unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment();
1526  if (AlignVal > StackAlign)
1527    // Do this now since selection pass cannot introduce new target
1528    // independent node.
1529    Align = DAG.getConstant(-(uint64_t)AlignVal, VT);
1530
1531  // In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up
1532  // using a "add r, sp, r" instead. Negate the size now so we don't have to
1533  // do even more horrible hack later.
1534  MachineFunction &MF = DAG.getMachineFunction();
1535  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1536  if (AFI->isThumb1OnlyFunction()) {
1537    bool Negate = true;
1538    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size);
1539    if (C) {
1540      uint32_t Val = C->getZExtValue();
1541      if (Val <= 508 && ((Val & 3) == 0))
1542        Negate = false;
1543    }
1544    if (Negate)
1545      Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
1546  }
1547
1548  SDVTList VTList = DAG.getVTList(VT, MVT::Other);
1549  SDValue Ops1[] = { Chain, Size, Align };
1550  SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
1551  Chain = Res.getValue(1);
1552  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
1553                             DAG.getIntPtrConstant(0, true), SDValue());
1554  SDValue Ops2[] = { Res, Chain };
1555  return DAG.getMergeValues(Ops2, 2, dl);
1556}
1557
1558SDValue
1559ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
1560                                        SDValue &Root, SelectionDAG &DAG,
1561                                        DebugLoc dl) {
1562  MachineFunction &MF = DAG.getMachineFunction();
1563  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1564
1565  TargetRegisterClass *RC;
1566  if (AFI->isThumb1OnlyFunction())
1567    RC = ARM::tGPRRegisterClass;
1568  else
1569    RC = ARM::GPRRegisterClass;
1570
1571  // Transform the arguments stored in physical registers into virtual ones.
1572  unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1573  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
1574
1575  SDValue ArgValue2;
1576  if (NextVA.isMemLoc()) {
1577    unsigned ArgSize = NextVA.getLocVT().getSizeInBits()/8;
1578    MachineFrameInfo *MFI = MF.getFrameInfo();
1579    int FI = MFI->CreateFixedObject(ArgSize, NextVA.getLocMemOffset(),
1580                                    true, false);
1581
1582    // Create load node to retrieve arguments from the stack.
1583    SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1584    ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
1585                            PseudoSourceValue::getFixedStack(FI), 0);
1586  } else {
1587    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
1588    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
1589  }
1590
1591  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
1592}
1593
1594SDValue
1595ARMTargetLowering::LowerFormalArguments(SDValue Chain,
1596                                        CallingConv::ID CallConv, bool isVarArg,
1597                                        const SmallVectorImpl<ISD::InputArg>
1598                                          &Ins,
1599                                        DebugLoc dl, SelectionDAG &DAG,
1600                                        SmallVectorImpl<SDValue> &InVals) {
1601
1602  MachineFunction &MF = DAG.getMachineFunction();
1603  MachineFrameInfo *MFI = MF.getFrameInfo();
1604
1605  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1606
1607  // Assign locations to all of the incoming arguments.
1608  SmallVector<CCValAssign, 16> ArgLocs;
1609  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
1610                 *DAG.getContext());
1611  CCInfo.AnalyzeFormalArguments(Ins,
1612                                CCAssignFnForNode(CallConv, /* Return*/ false,
1613                                                  isVarArg));
1614
1615  SmallVector<SDValue, 16> ArgValues;
1616
1617  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1618    CCValAssign &VA = ArgLocs[i];
1619
1620    // Arguments stored in registers.
1621    if (VA.isRegLoc()) {
1622      EVT RegVT = VA.getLocVT();
1623
1624      SDValue ArgValue;
1625      if (VA.needsCustom()) {
1626        // f64 and vector types are split up into multiple registers or
1627        // combinations of registers and stack slots.
1628        RegVT = MVT::i32;
1629
1630        if (VA.getLocVT() == MVT::v2f64) {
1631          SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
1632                                                   Chain, DAG, dl);
1633          VA = ArgLocs[++i]; // skip ahead to next loc
1634          SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
1635                                                   Chain, DAG, dl);
1636          ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1637          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
1638                                 ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
1639          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
1640                                 ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
1641        } else
1642          ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
1643
1644      } else {
1645        TargetRegisterClass *RC;
1646
1647        if (RegVT == MVT::f32)
1648          RC = ARM::SPRRegisterClass;
1649        else if (RegVT == MVT::f64)
1650          RC = ARM::DPRRegisterClass;
1651        else if (RegVT == MVT::v2f64)
1652          RC = ARM::QPRRegisterClass;
1653        else if (RegVT == MVT::i32)
1654          RC = (AFI->isThumb1OnlyFunction() ?
1655                ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
1656        else
1657          llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
1658
1659        // Transform the arguments in physical registers into virtual ones.
1660        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1661        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1662      }
1663
1664      // If this is an 8 or 16-bit value, it is really passed promoted
1665      // to 32 bits.  Insert an assert[sz]ext to capture this, then
1666      // truncate to the right size.
1667      switch (VA.getLocInfo()) {
1668      default: llvm_unreachable("Unknown loc info!");
1669      case CCValAssign::Full: break;
1670      case CCValAssign::BCvt:
1671        ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
1672        break;
1673      case CCValAssign::SExt:
1674        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1675                               DAG.getValueType(VA.getValVT()));
1676        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1677        break;
1678      case CCValAssign::ZExt:
1679        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1680                               DAG.getValueType(VA.getValVT()));
1681        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1682        break;
1683      }
1684
1685      InVals.push_back(ArgValue);
1686
1687    } else { // VA.isRegLoc()
1688
1689      // sanity check
1690      assert(VA.isMemLoc());
1691      assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
1692
1693      unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
1694      int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
1695                                      true, false);
1696
1697      // Create load nodes to retrieve arguments from the stack.
1698      SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1699      InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
1700                                   PseudoSourceValue::getFixedStack(FI), 0));
1701    }
1702  }
1703
1704  // varargs
1705  if (isVarArg) {
1706    static const unsigned GPRArgRegs[] = {
1707      ARM::R0, ARM::R1, ARM::R2, ARM::R3
1708    };
1709
1710    unsigned NumGPRs = CCInfo.getFirstUnallocated
1711      (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
1712
1713    unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1714    unsigned VARegSize = (4 - NumGPRs) * 4;
1715    unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
1716    unsigned ArgOffset = CCInfo.getNextStackOffset();
1717    if (VARegSaveSize) {
1718      // If this function is vararg, store any remaining integer argument regs
1719      // to their spots on the stack so that they may be loaded by deferencing
1720      // the result of va_next.
1721      AFI->setVarArgsRegSaveSize(VARegSaveSize);
1722      VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
1723                                                 VARegSaveSize - VARegSize,
1724                                                 true, false);
1725      SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
1726
1727      SmallVector<SDValue, 4> MemOps;
1728      for (; NumGPRs < 4; ++NumGPRs) {
1729        TargetRegisterClass *RC;
1730        if (AFI->isThumb1OnlyFunction())
1731          RC = ARM::tGPRRegisterClass;
1732        else
1733          RC = ARM::GPRRegisterClass;
1734
1735        unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
1736        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1737        SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1738                        PseudoSourceValue::getFixedStack(VarArgsFrameIndex), 0);
1739        MemOps.push_back(Store);
1740        FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
1741                          DAG.getConstant(4, getPointerTy()));
1742      }
1743      if (!MemOps.empty())
1744        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1745                            &MemOps[0], MemOps.size());
1746    } else
1747      // This will point to the next argument passed via stack.
1748      VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset, true, false);
1749  }
1750
1751  return Chain;
1752}
1753
1754/// isFloatingPointZero - Return true if this is +0.0.
1755static bool isFloatingPointZero(SDValue Op) {
1756  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1757    return CFP->getValueAPF().isPosZero();
1758  else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1759    // Maybe this has already been legalized into the constant pool?
1760    if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
1761      SDValue WrapperOp = Op.getOperand(1).getOperand(0);
1762      if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
1763        if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1764          return CFP->getValueAPF().isPosZero();
1765    }
1766  }
1767  return false;
1768}
1769
1770/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
1771/// the given operands.
1772SDValue
1773ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
1774                             SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl) {
1775  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1776    unsigned C = RHSC->getZExtValue();
1777    if (!isLegalICmpImmediate(C)) {
1778      // Constant does not fit, try adjusting it by one?
1779      switch (CC) {
1780      default: break;
1781      case ISD::SETLT:
1782      case ISD::SETGE:
1783        if (isLegalICmpImmediate(C-1)) {
1784          CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1785          RHS = DAG.getConstant(C-1, MVT::i32);
1786        }
1787        break;
1788      case ISD::SETULT:
1789      case ISD::SETUGE:
1790        if (C > 0 && isLegalICmpImmediate(C-1)) {
1791          CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1792          RHS = DAG.getConstant(C-1, MVT::i32);
1793        }
1794        break;
1795      case ISD::SETLE:
1796      case ISD::SETGT:
1797        if (isLegalICmpImmediate(C+1)) {
1798          CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1799          RHS = DAG.getConstant(C+1, MVT::i32);
1800        }
1801        break;
1802      case ISD::SETULE:
1803      case ISD::SETUGT:
1804        if (C < 0xffffffff && isLegalICmpImmediate(C+1)) {
1805          CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1806          RHS = DAG.getConstant(C+1, MVT::i32);
1807        }
1808        break;
1809      }
1810    }
1811  }
1812
1813  ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
1814  ARMISD::NodeType CompareType;
1815  switch (CondCode) {
1816  default:
1817    CompareType = ARMISD::CMP;
1818    break;
1819  case ARMCC::EQ:
1820  case ARMCC::NE:
1821    // Uses only Z Flag
1822    CompareType = ARMISD::CMPZ;
1823    break;
1824  }
1825  ARMCC = DAG.getConstant(CondCode, MVT::i32);
1826  return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
1827}
1828
1829/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
1830static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
1831                         DebugLoc dl) {
1832  SDValue Cmp;
1833  if (!isFloatingPointZero(RHS))
1834    Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
1835  else
1836    Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
1837  return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
1838}
1839
1840SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
1841  EVT VT = Op.getValueType();
1842  SDValue LHS = Op.getOperand(0);
1843  SDValue RHS = Op.getOperand(1);
1844  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1845  SDValue TrueVal = Op.getOperand(2);
1846  SDValue FalseVal = Op.getOperand(3);
1847  DebugLoc dl = Op.getDebugLoc();
1848
1849  if (LHS.getValueType() == MVT::i32) {
1850    SDValue ARMCC;
1851    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1852    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
1853    return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
1854  }
1855
1856  ARMCC::CondCodes CondCode, CondCode2;
1857  FPCCToARMCC(CC, CondCode, CondCode2);
1858
1859  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
1860  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1861  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
1862  SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
1863                                 ARMCC, CCR, Cmp);
1864  if (CondCode2 != ARMCC::AL) {
1865    SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
1866    // FIXME: Needs another CMP because flag can have but one use.
1867    SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
1868    Result = DAG.getNode(ARMISD::CMOV, dl, VT,
1869                         Result, TrueVal, ARMCC2, CCR, Cmp2);
1870  }
1871  return Result;
1872}
1873
1874SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
1875  SDValue  Chain = Op.getOperand(0);
1876  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1877  SDValue    LHS = Op.getOperand(2);
1878  SDValue    RHS = Op.getOperand(3);
1879  SDValue   Dest = Op.getOperand(4);
1880  DebugLoc dl = Op.getDebugLoc();
1881
1882  if (LHS.getValueType() == MVT::i32) {
1883    SDValue ARMCC;
1884    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1885    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
1886    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
1887                       Chain, Dest, ARMCC, CCR,Cmp);
1888  }
1889
1890  assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
1891  ARMCC::CondCodes CondCode, CondCode2;
1892  FPCCToARMCC(CC, CondCode, CondCode2);
1893
1894  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
1895  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
1896  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1897  SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
1898  SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
1899  SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
1900  if (CondCode2 != ARMCC::AL) {
1901    ARMCC = DAG.getConstant(CondCode2, MVT::i32);
1902    SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
1903    Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
1904  }
1905  return Res;
1906}
1907
1908SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) {
1909  SDValue Chain = Op.getOperand(0);
1910  SDValue Table = Op.getOperand(1);
1911  SDValue Index = Op.getOperand(2);
1912  DebugLoc dl = Op.getDebugLoc();
1913
1914  EVT PTy = getPointerTy();
1915  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
1916  ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
1917  SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
1918  SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
1919  Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
1920  Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
1921  SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
1922  if (Subtarget->isThumb2()) {
1923    // Thumb2 uses a two-level jump. That is, it jumps into the jump table
1924    // which does another jump to the destination. This also makes it easier
1925    // to translate it to TBB / TBH later.
1926    // FIXME: This might not work if the function is extremely large.
1927    return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
1928                       Addr, Op.getOperand(2), JTI, UId);
1929  }
1930  if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
1931    Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
1932                       PseudoSourceValue::getJumpTable(), 0);
1933    Chain = Addr.getValue(1);
1934    Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
1935    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
1936  } else {
1937    Addr = DAG.getLoad(PTy, dl, Chain, Addr,
1938                       PseudoSourceValue::getJumpTable(), 0);
1939    Chain = Addr.getValue(1);
1940    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
1941  }
1942}
1943
1944static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
1945  DebugLoc dl = Op.getDebugLoc();
1946  unsigned Opc =
1947    Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
1948  Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
1949  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
1950}
1951
1952static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
1953  EVT VT = Op.getValueType();
1954  DebugLoc dl = Op.getDebugLoc();
1955  unsigned Opc =
1956    Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
1957
1958  Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
1959  return DAG.getNode(Opc, dl, VT, Op);
1960}
1961
1962static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
1963  // Implement fcopysign with a fabs and a conditional fneg.
1964  SDValue Tmp0 = Op.getOperand(0);
1965  SDValue Tmp1 = Op.getOperand(1);
1966  DebugLoc dl = Op.getDebugLoc();
1967  EVT VT = Op.getValueType();
1968  EVT SrcVT = Tmp1.getValueType();
1969  SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
1970  SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
1971  SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
1972  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1973  return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
1974}
1975
1976SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
1977  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1978  MFI->setFrameAddressIsTaken(true);
1979  EVT VT = Op.getValueType();
1980  DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
1981  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1982  unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
1983    ? ARM::R7 : ARM::R11;
1984  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
1985  while (Depth--)
1986    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0);
1987  return FrameAddr;
1988}
1989
1990SDValue
1991ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
1992                                           SDValue Chain,
1993                                           SDValue Dst, SDValue Src,
1994                                           SDValue Size, unsigned Align,
1995                                           bool AlwaysInline,
1996                                         const Value *DstSV, uint64_t DstSVOff,
1997                                         const Value *SrcSV, uint64_t SrcSVOff){
1998  // Do repeated 4-byte loads and stores. To be improved.
1999  // This requires 4-byte alignment.
2000  if ((Align & 3) != 0)
2001    return SDValue();
2002  // This requires the copy size to be a constant, preferrably
2003  // within a subtarget-specific limit.
2004  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
2005  if (!ConstantSize)
2006    return SDValue();
2007  uint64_t SizeVal = ConstantSize->getZExtValue();
2008  if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
2009    return SDValue();
2010
2011  unsigned BytesLeft = SizeVal & 3;
2012  unsigned NumMemOps = SizeVal >> 2;
2013  unsigned EmittedNumMemOps = 0;
2014  EVT VT = MVT::i32;
2015  unsigned VTSize = 4;
2016  unsigned i = 0;
2017  const unsigned MAX_LOADS_IN_LDM = 6;
2018  SDValue TFOps[MAX_LOADS_IN_LDM];
2019  SDValue Loads[MAX_LOADS_IN_LDM];
2020  uint64_t SrcOff = 0, DstOff = 0;
2021
2022  // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
2023  // same number of stores.  The loads and stores will get combined into
2024  // ldm/stm later on.
2025  while (EmittedNumMemOps < NumMemOps) {
2026    for (i = 0;
2027         i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
2028      Loads[i] = DAG.getLoad(VT, dl, Chain,
2029                             DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
2030                                         DAG.getConstant(SrcOff, MVT::i32)),
2031                             SrcSV, SrcSVOff + SrcOff);
2032      TFOps[i] = Loads[i].getValue(1);
2033      SrcOff += VTSize;
2034    }
2035    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
2036
2037    for (i = 0;
2038         i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
2039      TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
2040                           DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
2041                                       DAG.getConstant(DstOff, MVT::i32)),
2042                           DstSV, DstSVOff + DstOff);
2043      DstOff += VTSize;
2044    }
2045    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
2046
2047    EmittedNumMemOps += i;
2048  }
2049
2050  if (BytesLeft == 0)
2051    return Chain;
2052
2053  // Issue loads / stores for the trailing (1 - 3) bytes.
2054  unsigned BytesLeftSave = BytesLeft;
2055  i = 0;
2056  while (BytesLeft) {
2057    if (BytesLeft >= 2) {
2058      VT = MVT::i16;
2059      VTSize = 2;
2060    } else {
2061      VT = MVT::i8;
2062      VTSize = 1;
2063    }
2064
2065    Loads[i] = DAG.getLoad(VT, dl, Chain,
2066                           DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
2067                                       DAG.getConstant(SrcOff, MVT::i32)),
2068                           SrcSV, SrcSVOff + SrcOff);
2069    TFOps[i] = Loads[i].getValue(1);
2070    ++i;
2071    SrcOff += VTSize;
2072    BytesLeft -= VTSize;
2073  }
2074  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
2075
2076  i = 0;
2077  BytesLeft = BytesLeftSave;
2078  while (BytesLeft) {
2079    if (BytesLeft >= 2) {
2080      VT = MVT::i16;
2081      VTSize = 2;
2082    } else {
2083      VT = MVT::i8;
2084      VTSize = 1;
2085    }
2086
2087    TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
2088                            DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
2089                                        DAG.getConstant(DstOff, MVT::i32)),
2090                            DstSV, DstSVOff + DstOff);
2091    ++i;
2092    DstOff += VTSize;
2093    BytesLeft -= VTSize;
2094  }
2095  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
2096}
2097
2098static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
2099  SDValue Op = N->getOperand(0);
2100  DebugLoc dl = N->getDebugLoc();
2101  if (N->getValueType(0) == MVT::f64) {
2102    // Turn i64->f64 into VMOVDRR.
2103    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2104                             DAG.getConstant(0, MVT::i32));
2105    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
2106                             DAG.getConstant(1, MVT::i32));
2107    return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2108  }
2109
2110  // Turn f64->i64 into VMOVRRD.
2111  SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
2112                            DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
2113
2114  // Merge the pieces into a single i64 value.
2115  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
2116}
2117
2118/// getZeroVector - Returns a vector of specified type with all zero elements.
2119///
2120static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2121  assert(VT.isVector() && "Expected a vector type");
2122
2123  // Zero vectors are used to represent vector negation and in those cases
2124  // will be implemented with the NEON VNEG instruction.  However, VNEG does
2125  // not support i64 elements, so sometimes the zero vectors will need to be
2126  // explicitly constructed.  For those cases, and potentially other uses in
2127  // the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted
2128  // to their dest type.  This ensures they get CSE'd.
2129  SDValue Vec;
2130  SDValue Cst = DAG.getTargetConstant(0, MVT::i8);
2131  SmallVector<SDValue, 8> Ops;
2132  MVT TVT;
2133
2134  if (VT.getSizeInBits() == 64) {
2135    Ops.assign(8, Cst); TVT = MVT::v8i8;
2136  } else {
2137    Ops.assign(16, Cst); TVT = MVT::v16i8;
2138  }
2139  Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
2140
2141  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
2142}
2143
2144/// getOnesVector - Returns a vector of specified type with all bits set.
2145///
2146static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2147  assert(VT.isVector() && "Expected a vector type");
2148
2149  // Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their
2150  // dest type. This ensures they get CSE'd.
2151  SDValue Vec;
2152  SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8);
2153  SmallVector<SDValue, 8> Ops;
2154  MVT TVT;
2155
2156  if (VT.getSizeInBits() == 64) {
2157    Ops.assign(8, Cst); TVT = MVT::v8i8;
2158  } else {
2159    Ops.assign(16, Cst); TVT = MVT::v16i8;
2160  }
2161  Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
2162
2163  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
2164}
2165
2166/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
2167/// i32 values and take a 2 x i32 value to shift plus a shift amount.
2168SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) {
2169  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2170  EVT VT = Op.getValueType();
2171  unsigned VTBits = VT.getSizeInBits();
2172  DebugLoc dl = Op.getDebugLoc();
2173  SDValue ShOpLo = Op.getOperand(0);
2174  SDValue ShOpHi = Op.getOperand(1);
2175  SDValue ShAmt  = Op.getOperand(2);
2176  SDValue ARMCC;
2177  unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2178
2179  assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2180
2181  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2182                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2183  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2184  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2185                                   DAG.getConstant(VTBits, MVT::i32));
2186  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2187  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2188  SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2189
2190  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2191  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2192                          ARMCC, DAG, dl);
2193  SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2194  SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC,
2195                           CCR, Cmp);
2196
2197  SDValue Ops[2] = { Lo, Hi };
2198  return DAG.getMergeValues(Ops, 2, dl);
2199}
2200
2201/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
2202/// i32 values and take a 2 x i32 value to shift plus a shift amount.
2203SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) {
2204  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2205  EVT VT = Op.getValueType();
2206  unsigned VTBits = VT.getSizeInBits();
2207  DebugLoc dl = Op.getDebugLoc();
2208  SDValue ShOpLo = Op.getOperand(0);
2209  SDValue ShOpHi = Op.getOperand(1);
2210  SDValue ShAmt  = Op.getOperand(2);
2211  SDValue ARMCC;
2212
2213  assert(Op.getOpcode() == ISD::SHL_PARTS);
2214  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2215                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
2216  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2217  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2218                                   DAG.getConstant(VTBits, MVT::i32));
2219  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2220  SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2221
2222  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2223  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2224  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
2225                          ARMCC, DAG, dl);
2226  SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2227  SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC,
2228                           CCR, Cmp);
2229
2230  SDValue Ops[2] = { Lo, Hi };
2231  return DAG.getMergeValues(Ops, 2, dl);
2232}
2233
2234static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
2235                          const ARMSubtarget *ST) {
2236  EVT VT = N->getValueType(0);
2237  DebugLoc dl = N->getDebugLoc();
2238
2239  // Lower vector shifts on NEON to use VSHL.
2240  if (VT.isVector()) {
2241    assert(ST->hasNEON() && "unexpected vector shift");
2242
2243    // Left shifts translate directly to the vshiftu intrinsic.
2244    if (N->getOpcode() == ISD::SHL)
2245      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
2246                         DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
2247                         N->getOperand(0), N->getOperand(1));
2248
2249    assert((N->getOpcode() == ISD::SRA ||
2250            N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
2251
2252    // NEON uses the same intrinsics for both left and right shifts.  For
2253    // right shifts, the shift amounts are negative, so negate the vector of
2254    // shift amounts.
2255    EVT ShiftVT = N->getOperand(1).getValueType();
2256    SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
2257                                       getZeroVector(ShiftVT, DAG, dl),
2258                                       N->getOperand(1));
2259    Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
2260                               Intrinsic::arm_neon_vshifts :
2261                               Intrinsic::arm_neon_vshiftu);
2262    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
2263                       DAG.getConstant(vshiftInt, MVT::i32),
2264                       N->getOperand(0), NegatedCount);
2265  }
2266
2267  // We can get here for a node like i32 = ISD::SHL i32, i64
2268  if (VT != MVT::i64)
2269    return SDValue();
2270
2271  assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
2272         "Unknown shift to lower!");
2273
2274  // We only lower SRA, SRL of 1 here, all others use generic lowering.
2275  if (!isa<ConstantSDNode>(N->getOperand(1)) ||
2276      cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
2277    return SDValue();
2278
2279  // If we are in thumb mode, we don't have RRX.
2280  if (ST->isThumb1Only()) return SDValue();
2281
2282  // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
2283  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
2284                             DAG.getConstant(0, MVT::i32));
2285  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
2286                             DAG.getConstant(1, MVT::i32));
2287
2288  // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
2289  // captures the result into a carry flag.
2290  unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
2291  Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
2292
2293  // The low part is an ARMISD::RRX operand, which shifts the carry in.
2294  Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
2295
2296  // Merge the pieces into a single i64 value.
2297 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
2298}
2299
2300static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
2301  SDValue TmpOp0, TmpOp1;
2302  bool Invert = false;
2303  bool Swap = false;
2304  unsigned Opc = 0;
2305
2306  SDValue Op0 = Op.getOperand(0);
2307  SDValue Op1 = Op.getOperand(1);
2308  SDValue CC = Op.getOperand(2);
2309  EVT VT = Op.getValueType();
2310  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
2311  DebugLoc dl = Op.getDebugLoc();
2312
2313  if (Op.getOperand(1).getValueType().isFloatingPoint()) {
2314    switch (SetCCOpcode) {
2315    default: llvm_unreachable("Illegal FP comparison"); break;
2316    case ISD::SETUNE:
2317    case ISD::SETNE:  Invert = true; // Fallthrough
2318    case ISD::SETOEQ:
2319    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
2320    case ISD::SETOLT:
2321    case ISD::SETLT: Swap = true; // Fallthrough
2322    case ISD::SETOGT:
2323    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
2324    case ISD::SETOLE:
2325    case ISD::SETLE:  Swap = true; // Fallthrough
2326    case ISD::SETOGE:
2327    case ISD::SETGE: Opc = ARMISD::VCGE; break;
2328    case ISD::SETUGE: Swap = true; // Fallthrough
2329    case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
2330    case ISD::SETUGT: Swap = true; // Fallthrough
2331    case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
2332    case ISD::SETUEQ: Invert = true; // Fallthrough
2333    case ISD::SETONE:
2334      // Expand this to (OLT | OGT).
2335      TmpOp0 = Op0;
2336      TmpOp1 = Op1;
2337      Opc = ISD::OR;
2338      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
2339      Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
2340      break;
2341    case ISD::SETUO: Invert = true; // Fallthrough
2342    case ISD::SETO:
2343      // Expand this to (OLT | OGE).
2344      TmpOp0 = Op0;
2345      TmpOp1 = Op1;
2346      Opc = ISD::OR;
2347      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
2348      Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
2349      break;
2350    }
2351  } else {
2352    // Integer comparisons.
2353    switch (SetCCOpcode) {
2354    default: llvm_unreachable("Illegal integer comparison"); break;
2355    case ISD::SETNE:  Invert = true;
2356    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
2357    case ISD::SETLT:  Swap = true;
2358    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
2359    case ISD::SETLE:  Swap = true;
2360    case ISD::SETGE:  Opc = ARMISD::VCGE; break;
2361    case ISD::SETULT: Swap = true;
2362    case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
2363    case ISD::SETULE: Swap = true;
2364    case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
2365    }
2366
2367    // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
2368    if (Opc == ARMISD::VCEQ) {
2369
2370      SDValue AndOp;
2371      if (ISD::isBuildVectorAllZeros(Op1.getNode()))
2372        AndOp = Op0;
2373      else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
2374        AndOp = Op1;
2375
2376      // Ignore bitconvert.
2377      if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
2378        AndOp = AndOp.getOperand(0);
2379
2380      if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
2381        Opc = ARMISD::VTST;
2382        Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
2383        Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
2384        Invert = !Invert;
2385      }
2386    }
2387  }
2388
2389  if (Swap)
2390    std::swap(Op0, Op1);
2391
2392  SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
2393
2394  if (Invert)
2395    Result = DAG.getNOT(dl, Result, VT);
2396
2397  return Result;
2398}
2399
2400/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
2401/// VMOV instruction, and if so, return the constant being splatted.
2402static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
2403                           unsigned SplatBitSize, SelectionDAG &DAG) {
2404  switch (SplatBitSize) {
2405  case 8:
2406    // Any 1-byte value is OK.
2407    assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
2408    return DAG.getTargetConstant(SplatBits, MVT::i8);
2409
2410  case 16:
2411    // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
2412    if ((SplatBits & ~0xff) == 0 ||
2413        (SplatBits & ~0xff00) == 0)
2414      return DAG.getTargetConstant(SplatBits, MVT::i16);
2415    break;
2416
2417  case 32:
2418    // NEON's 32-bit VMOV supports splat values where:
2419    // * only one byte is nonzero, or
2420    // * the least significant byte is 0xff and the second byte is nonzero, or
2421    // * the least significant 2 bytes are 0xff and the third is nonzero.
2422    if ((SplatBits & ~0xff) == 0 ||
2423        (SplatBits & ~0xff00) == 0 ||
2424        (SplatBits & ~0xff0000) == 0 ||
2425        (SplatBits & ~0xff000000) == 0)
2426      return DAG.getTargetConstant(SplatBits, MVT::i32);
2427
2428    if ((SplatBits & ~0xffff) == 0 &&
2429        ((SplatBits | SplatUndef) & 0xff) == 0xff)
2430      return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
2431
2432    if ((SplatBits & ~0xffffff) == 0 &&
2433        ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
2434      return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
2435
2436    // Note: there are a few 32-bit splat values (specifically: 00ffff00,
2437    // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
2438    // VMOV.I32.  A (very) minor optimization would be to replicate the value
2439    // and fall through here to test for a valid 64-bit splat.  But, then the
2440    // caller would also need to check and handle the change in size.
2441    break;
2442
2443  case 64: {
2444    // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
2445    uint64_t BitMask = 0xff;
2446    uint64_t Val = 0;
2447    for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
2448      if (((SplatBits | SplatUndef) & BitMask) == BitMask)
2449        Val |= BitMask;
2450      else if ((SplatBits & BitMask) != 0)
2451        return SDValue();
2452      BitMask <<= 8;
2453    }
2454    return DAG.getTargetConstant(Val, MVT::i64);
2455  }
2456
2457  default:
2458    llvm_unreachable("unexpected size for isVMOVSplat");
2459    break;
2460  }
2461
2462  return SDValue();
2463}
2464
2465/// getVMOVImm - If this is a build_vector of constants which can be
2466/// formed by using a VMOV instruction of the specified element size,
2467/// return the constant being splatted.  The ByteSize field indicates the
2468/// number of bytes of each element [1248].
2469SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2470  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
2471  APInt SplatBits, SplatUndef;
2472  unsigned SplatBitSize;
2473  bool HasAnyUndefs;
2474  if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
2475                                      HasAnyUndefs, ByteSize * 8))
2476    return SDValue();
2477
2478  if (SplatBitSize > ByteSize * 8)
2479    return SDValue();
2480
2481  return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
2482                     SplatBitSize, DAG);
2483}
2484
2485static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
2486                       bool &ReverseVEXT, unsigned &Imm) {
2487  unsigned NumElts = VT.getVectorNumElements();
2488  ReverseVEXT = false;
2489  Imm = M[0];
2490
2491  // If this is a VEXT shuffle, the immediate value is the index of the first
2492  // element.  The other shuffle indices must be the successive elements after
2493  // the first one.
2494  unsigned ExpectedElt = Imm;
2495  for (unsigned i = 1; i < NumElts; ++i) {
2496    // Increment the expected index.  If it wraps around, it may still be
2497    // a VEXT but the source vectors must be swapped.
2498    ExpectedElt += 1;
2499    if (ExpectedElt == NumElts * 2) {
2500      ExpectedElt = 0;
2501      ReverseVEXT = true;
2502    }
2503
2504    if (ExpectedElt != static_cast<unsigned>(M[i]))
2505      return false;
2506  }
2507
2508  // Adjust the index value if the source operands will be swapped.
2509  if (ReverseVEXT)
2510    Imm -= NumElts;
2511
2512  return true;
2513}
2514
2515/// isVREVMask - Check if a vector shuffle corresponds to a VREV
2516/// instruction with the specified blocksize.  (The order of the elements
2517/// within each block of the vector is reversed.)
2518static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
2519                       unsigned BlockSize) {
2520  assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
2521         "Only possible block sizes for VREV are: 16, 32, 64");
2522
2523  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2524  if (EltSz == 64)
2525    return false;
2526
2527  unsigned NumElts = VT.getVectorNumElements();
2528  unsigned BlockElts = M[0] + 1;
2529
2530  if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
2531    return false;
2532
2533  for (unsigned i = 0; i < NumElts; ++i) {
2534    if ((unsigned) M[i] !=
2535        (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
2536      return false;
2537  }
2538
2539  return true;
2540}
2541
2542static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
2543                       unsigned &WhichResult) {
2544  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2545  if (EltSz == 64)
2546    return false;
2547
2548  unsigned NumElts = VT.getVectorNumElements();
2549  WhichResult = (M[0] == 0 ? 0 : 1);
2550  for (unsigned i = 0; i < NumElts; i += 2) {
2551    if ((unsigned) M[i] != i + WhichResult ||
2552        (unsigned) M[i+1] != i + NumElts + WhichResult)
2553      return false;
2554  }
2555  return true;
2556}
2557
2558/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
2559/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2560/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
2561static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2562                                unsigned &WhichResult) {
2563  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2564  if (EltSz == 64)
2565    return false;
2566
2567  unsigned NumElts = VT.getVectorNumElements();
2568  WhichResult = (M[0] == 0 ? 0 : 1);
2569  for (unsigned i = 0; i < NumElts; i += 2) {
2570    if ((unsigned) M[i] != i + WhichResult ||
2571        (unsigned) M[i+1] != i + WhichResult)
2572      return false;
2573  }
2574  return true;
2575}
2576
2577static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
2578                       unsigned &WhichResult) {
2579  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2580  if (EltSz == 64)
2581    return false;
2582
2583  unsigned NumElts = VT.getVectorNumElements();
2584  WhichResult = (M[0] == 0 ? 0 : 1);
2585  for (unsigned i = 0; i != NumElts; ++i) {
2586    if ((unsigned) M[i] != 2 * i + WhichResult)
2587      return false;
2588  }
2589
2590  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2591  if (VT.is64BitVector() && EltSz == 32)
2592    return false;
2593
2594  return true;
2595}
2596
2597/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
2598/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2599/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
2600static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2601                                unsigned &WhichResult) {
2602  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2603  if (EltSz == 64)
2604    return false;
2605
2606  unsigned Half = VT.getVectorNumElements() / 2;
2607  WhichResult = (M[0] == 0 ? 0 : 1);
2608  for (unsigned j = 0; j != 2; ++j) {
2609    unsigned Idx = WhichResult;
2610    for (unsigned i = 0; i != Half; ++i) {
2611      if ((unsigned) M[i + j * Half] != Idx)
2612        return false;
2613      Idx += 2;
2614    }
2615  }
2616
2617  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2618  if (VT.is64BitVector() && EltSz == 32)
2619    return false;
2620
2621  return true;
2622}
2623
2624static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
2625                       unsigned &WhichResult) {
2626  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2627  if (EltSz == 64)
2628    return false;
2629
2630  unsigned NumElts = VT.getVectorNumElements();
2631  WhichResult = (M[0] == 0 ? 0 : 1);
2632  unsigned Idx = WhichResult * NumElts / 2;
2633  for (unsigned i = 0; i != NumElts; i += 2) {
2634    if ((unsigned) M[i] != Idx ||
2635        (unsigned) M[i+1] != Idx + NumElts)
2636      return false;
2637    Idx += 1;
2638  }
2639
2640  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2641  if (VT.is64BitVector() && EltSz == 32)
2642    return false;
2643
2644  return true;
2645}
2646
2647/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
2648/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
2649/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
2650static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
2651                                unsigned &WhichResult) {
2652  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
2653  if (EltSz == 64)
2654    return false;
2655
2656  unsigned NumElts = VT.getVectorNumElements();
2657  WhichResult = (M[0] == 0 ? 0 : 1);
2658  unsigned Idx = WhichResult * NumElts / 2;
2659  for (unsigned i = 0; i != NumElts; i += 2) {
2660    if ((unsigned) M[i] != Idx ||
2661        (unsigned) M[i+1] != Idx)
2662      return false;
2663    Idx += 1;
2664  }
2665
2666  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
2667  if (VT.is64BitVector() && EltSz == 32)
2668    return false;
2669
2670  return true;
2671}
2672
2673
2674static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
2675  // Canonicalize all-zeros and all-ones vectors.
2676  ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
2677  if (ConstVal->isNullValue())
2678    return getZeroVector(VT, DAG, dl);
2679  if (ConstVal->isAllOnesValue())
2680    return getOnesVector(VT, DAG, dl);
2681
2682  EVT CanonicalVT;
2683  if (VT.is64BitVector()) {
2684    switch (Val.getValueType().getSizeInBits()) {
2685    case 8:  CanonicalVT = MVT::v8i8; break;
2686    case 16: CanonicalVT = MVT::v4i16; break;
2687    case 32: CanonicalVT = MVT::v2i32; break;
2688    case 64: CanonicalVT = MVT::v1i64; break;
2689    default: llvm_unreachable("unexpected splat element type"); break;
2690    }
2691  } else {
2692    assert(VT.is128BitVector() && "unknown splat vector size");
2693    switch (Val.getValueType().getSizeInBits()) {
2694    case 8:  CanonicalVT = MVT::v16i8; break;
2695    case 16: CanonicalVT = MVT::v8i16; break;
2696    case 32: CanonicalVT = MVT::v4i32; break;
2697    case 64: CanonicalVT = MVT::v2i64; break;
2698    default: llvm_unreachable("unexpected splat element type"); break;
2699    }
2700  }
2701
2702  // Build a canonical splat for this value.
2703  SmallVector<SDValue, 8> Ops;
2704  Ops.assign(CanonicalVT.getVectorNumElements(), Val);
2705  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
2706                            Ops.size());
2707  return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
2708}
2709
2710// If this is a case we can't handle, return null and let the default
2711// expansion code take care of it.
2712static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
2713  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
2714  DebugLoc dl = Op.getDebugLoc();
2715  EVT VT = Op.getValueType();
2716
2717  APInt SplatBits, SplatUndef;
2718  unsigned SplatBitSize;
2719  bool HasAnyUndefs;
2720  if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
2721    if (SplatBitSize <= 64) {
2722      SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
2723                                SplatUndef.getZExtValue(), SplatBitSize, DAG);
2724      if (Val.getNode())
2725        return BuildSplat(Val, VT, DAG, dl);
2726    }
2727  }
2728
2729  // If there are only 2 elements in a 128-bit vector, insert them into an
2730  // undef vector.  This handles the common case for 128-bit vector argument
2731  // passing, where the insertions should be translated to subreg accesses
2732  // with no real instructions.
2733  if (VT.is128BitVector() && Op.getNumOperands() == 2) {
2734    SDValue Val = DAG.getUNDEF(VT);
2735    SDValue Op0 = Op.getOperand(0);
2736    SDValue Op1 = Op.getOperand(1);
2737    if (Op0.getOpcode() != ISD::UNDEF)
2738      Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0,
2739                        DAG.getIntPtrConstant(0));
2740    if (Op1.getOpcode() != ISD::UNDEF)
2741      Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1,
2742                        DAG.getIntPtrConstant(1));
2743    return Val;
2744  }
2745
2746  return SDValue();
2747}
2748
2749/// isShuffleMaskLegal - Targets can use this to indicate that they only
2750/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
2751/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
2752/// are assumed to be legal.
2753bool
2754ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
2755                                      EVT VT) const {
2756  if (VT.getVectorNumElements() == 4 &&
2757      (VT.is128BitVector() || VT.is64BitVector())) {
2758    unsigned PFIndexes[4];
2759    for (unsigned i = 0; i != 4; ++i) {
2760      if (M[i] < 0)
2761        PFIndexes[i] = 8;
2762      else
2763        PFIndexes[i] = M[i];
2764    }
2765
2766    // Compute the index in the perfect shuffle table.
2767    unsigned PFTableIndex =
2768      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
2769    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
2770    unsigned Cost = (PFEntry >> 30);
2771
2772    if (Cost <= 4)
2773      return true;
2774  }
2775
2776  bool ReverseVEXT;
2777  unsigned Imm, WhichResult;
2778
2779  return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
2780          isVREVMask(M, VT, 64) ||
2781          isVREVMask(M, VT, 32) ||
2782          isVREVMask(M, VT, 16) ||
2783          isVEXTMask(M, VT, ReverseVEXT, Imm) ||
2784          isVTRNMask(M, VT, WhichResult) ||
2785          isVUZPMask(M, VT, WhichResult) ||
2786          isVZIPMask(M, VT, WhichResult) ||
2787          isVTRN_v_undef_Mask(M, VT, WhichResult) ||
2788          isVUZP_v_undef_Mask(M, VT, WhichResult) ||
2789          isVZIP_v_undef_Mask(M, VT, WhichResult));
2790}
2791
2792/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
2793/// the specified operations to build the shuffle.
2794static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
2795                                      SDValue RHS, SelectionDAG &DAG,
2796                                      DebugLoc dl) {
2797  unsigned OpNum = (PFEntry >> 26) & 0x0F;
2798  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
2799  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
2800
2801  enum {
2802    OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
2803    OP_VREV,
2804    OP_VDUP0,
2805    OP_VDUP1,
2806    OP_VDUP2,
2807    OP_VDUP3,
2808    OP_VEXT1,
2809    OP_VEXT2,
2810    OP_VEXT3,
2811    OP_VUZPL, // VUZP, left result
2812    OP_VUZPR, // VUZP, right result
2813    OP_VZIPL, // VZIP, left result
2814    OP_VZIPR, // VZIP, right result
2815    OP_VTRNL, // VTRN, left result
2816    OP_VTRNR  // VTRN, right result
2817  };
2818
2819  if (OpNum == OP_COPY) {
2820    if (LHSID == (1*9+2)*9+3) return LHS;
2821    assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
2822    return RHS;
2823  }
2824
2825  SDValue OpLHS, OpRHS;
2826  OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
2827  OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
2828  EVT VT = OpLHS.getValueType();
2829
2830  switch (OpNum) {
2831  default: llvm_unreachable("Unknown shuffle opcode!");
2832  case OP_VREV:
2833    return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
2834  case OP_VDUP0:
2835  case OP_VDUP1:
2836  case OP_VDUP2:
2837  case OP_VDUP3:
2838    return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
2839                       OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
2840  case OP_VEXT1:
2841  case OP_VEXT2:
2842  case OP_VEXT3:
2843    return DAG.getNode(ARMISD::VEXT, dl, VT,
2844                       OpLHS, OpRHS,
2845                       DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
2846  case OP_VUZPL:
2847  case OP_VUZPR:
2848    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2849                       OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
2850  case OP_VZIPL:
2851  case OP_VZIPR:
2852    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2853                       OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
2854  case OP_VTRNL:
2855  case OP_VTRNR:
2856    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2857                       OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
2858  }
2859}
2860
2861static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
2862  SDValue V1 = Op.getOperand(0);
2863  SDValue V2 = Op.getOperand(1);
2864  DebugLoc dl = Op.getDebugLoc();
2865  EVT VT = Op.getValueType();
2866  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2867  SmallVector<int, 8> ShuffleMask;
2868
2869  // Convert shuffles that are directly supported on NEON to target-specific
2870  // DAG nodes, instead of keeping them as shuffles and matching them again
2871  // during code selection.  This is more efficient and avoids the possibility
2872  // of inconsistencies between legalization and selection.
2873  // FIXME: floating-point vectors should be canonicalized to integer vectors
2874  // of the same time so that they get CSEd properly.
2875  SVN->getMask(ShuffleMask);
2876
2877  if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
2878    int Lane = SVN->getSplatIndex();
2879    // If this is undef splat, generate it via "just" vdup, if possible.
2880    if (Lane == -1) Lane = 0;
2881
2882    if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
2883      return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
2884    }
2885    return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
2886                       DAG.getConstant(Lane, MVT::i32));
2887  }
2888
2889  bool ReverseVEXT;
2890  unsigned Imm;
2891  if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
2892    if (ReverseVEXT)
2893      std::swap(V1, V2);
2894    return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
2895                       DAG.getConstant(Imm, MVT::i32));
2896  }
2897
2898  if (isVREVMask(ShuffleMask, VT, 64))
2899    return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
2900  if (isVREVMask(ShuffleMask, VT, 32))
2901    return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
2902  if (isVREVMask(ShuffleMask, VT, 16))
2903    return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
2904
2905  // Check for Neon shuffles that modify both input vectors in place.
2906  // If both results are used, i.e., if there are two shuffles with the same
2907  // source operands and with masks corresponding to both results of one of
2908  // these operations, DAG memoization will ensure that a single node is
2909  // used for both shuffles.
2910  unsigned WhichResult;
2911  if (isVTRNMask(ShuffleMask, VT, WhichResult))
2912    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2913                       V1, V2).getValue(WhichResult);
2914  if (isVUZPMask(ShuffleMask, VT, WhichResult))
2915    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2916                       V1, V2).getValue(WhichResult);
2917  if (isVZIPMask(ShuffleMask, VT, WhichResult))
2918    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2919                       V1, V2).getValue(WhichResult);
2920
2921  if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
2922    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
2923                       V1, V1).getValue(WhichResult);
2924  if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
2925    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
2926                       V1, V1).getValue(WhichResult);
2927  if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
2928    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
2929                       V1, V1).getValue(WhichResult);
2930
2931  // If the shuffle is not directly supported and it has 4 elements, use
2932  // the PerfectShuffle-generated table to synthesize it from other shuffles.
2933  if (VT.getVectorNumElements() == 4 &&
2934      (VT.is128BitVector() || VT.is64BitVector())) {
2935    unsigned PFIndexes[4];
2936    for (unsigned i = 0; i != 4; ++i) {
2937      if (ShuffleMask[i] < 0)
2938        PFIndexes[i] = 8;
2939      else
2940        PFIndexes[i] = ShuffleMask[i];
2941    }
2942
2943    // Compute the index in the perfect shuffle table.
2944    unsigned PFTableIndex =
2945      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
2946
2947    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
2948    unsigned Cost = (PFEntry >> 30);
2949
2950    if (Cost <= 4)
2951      return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
2952  }
2953
2954  return SDValue();
2955}
2956
2957static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
2958  EVT VT = Op.getValueType();
2959  DebugLoc dl = Op.getDebugLoc();
2960  SDValue Vec = Op.getOperand(0);
2961  SDValue Lane = Op.getOperand(1);
2962  assert(VT == MVT::i32 &&
2963         Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
2964         "unexpected type for custom-lowering vector extract");
2965  return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
2966}
2967
2968static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
2969  // The only time a CONCAT_VECTORS operation can have legal types is when
2970  // two 64-bit vectors are concatenated to a 128-bit vector.
2971  assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
2972         "unexpected CONCAT_VECTORS");
2973  DebugLoc dl = Op.getDebugLoc();
2974  SDValue Val = DAG.getUNDEF(MVT::v2f64);
2975  SDValue Op0 = Op.getOperand(0);
2976  SDValue Op1 = Op.getOperand(1);
2977  if (Op0.getOpcode() != ISD::UNDEF)
2978    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
2979                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
2980                      DAG.getIntPtrConstant(0));
2981  if (Op1.getOpcode() != ISD::UNDEF)
2982    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
2983                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
2984                      DAG.getIntPtrConstant(1));
2985  return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
2986}
2987
2988SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
2989  switch (Op.getOpcode()) {
2990  default: llvm_unreachable("Don't know how to custom lower this!");
2991  case ISD::ConstantPool:  return LowerConstantPool(Op, DAG);
2992  case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
2993  case ISD::GlobalAddress:
2994    return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
2995      LowerGlobalAddressELF(Op, DAG);
2996  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
2997  case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
2998  case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
2999  case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
3000  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3001  case ISD::VASTART:       return LowerVASTART(Op, DAG, VarArgsFrameIndex);
3002  case ISD::MEMBARRIER:    return LowerMEMBARRIER(Op, DAG, Subtarget);
3003  case ISD::SINT_TO_FP:
3004  case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
3005  case ISD::FP_TO_SINT:
3006  case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
3007  case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
3008  case ISD::RETURNADDR:    break;
3009  case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
3010  case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3011  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3012  case ISD::BIT_CONVERT:   return ExpandBIT_CONVERT(Op.getNode(), DAG);
3013  case ISD::SHL:
3014  case ISD::SRL:
3015  case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
3016  case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
3017  case ISD::SRL_PARTS:
3018  case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
3019  case ISD::VSETCC:        return LowerVSETCC(Op, DAG);
3020  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG);
3021  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3022  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3023  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3024  }
3025  return SDValue();
3026}
3027
3028/// ReplaceNodeResults - Replace the results of node with an illegal result
3029/// type with new values built out of custom code.
3030void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
3031                                           SmallVectorImpl<SDValue>&Results,
3032                                           SelectionDAG &DAG) {
3033  switch (N->getOpcode()) {
3034  default:
3035    llvm_unreachable("Don't know how to custom expand this!");
3036    return;
3037  case ISD::BIT_CONVERT:
3038    Results.push_back(ExpandBIT_CONVERT(N, DAG));
3039    return;
3040  case ISD::SRL:
3041  case ISD::SRA: {
3042    SDValue Res = LowerShift(N, DAG, Subtarget);
3043    if (Res.getNode())
3044      Results.push_back(Res);
3045    return;
3046  }
3047  }
3048}
3049
3050//===----------------------------------------------------------------------===//
3051//                           ARM Scheduler Hooks
3052//===----------------------------------------------------------------------===//
3053
3054MachineBasicBlock *
3055ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
3056                                     MachineBasicBlock *BB,
3057                                     unsigned Size) const {
3058  unsigned dest    = MI->getOperand(0).getReg();
3059  unsigned ptr     = MI->getOperand(1).getReg();
3060  unsigned oldval  = MI->getOperand(2).getReg();
3061  unsigned newval  = MI->getOperand(3).getReg();
3062  unsigned scratch = BB->getParent()->getRegInfo()
3063    .createVirtualRegister(ARM::GPRRegisterClass);
3064  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3065  DebugLoc dl = MI->getDebugLoc();
3066  bool isThumb2 = Subtarget->isThumb2();
3067
3068  unsigned ldrOpc, strOpc;
3069  switch (Size) {
3070  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
3071  case 1:
3072    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
3073    strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB;
3074    break;
3075  case 2:
3076    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
3077    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
3078    break;
3079  case 4:
3080    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
3081    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
3082    break;
3083  }
3084
3085  MachineFunction *MF = BB->getParent();
3086  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3087  MachineFunction::iterator It = BB;
3088  ++It; // insert the new blocks after the current block
3089
3090  MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
3091  MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
3092  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3093  MF->insert(It, loop1MBB);
3094  MF->insert(It, loop2MBB);
3095  MF->insert(It, exitMBB);
3096  exitMBB->transferSuccessors(BB);
3097
3098  //  thisMBB:
3099  //   ...
3100  //   fallthrough --> loop1MBB
3101  BB->addSuccessor(loop1MBB);
3102
3103  // loop1MBB:
3104  //   ldrex dest, [ptr]
3105  //   cmp dest, oldval
3106  //   bne exitMBB
3107  BB = loop1MBB;
3108  AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
3109  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
3110                 .addReg(dest).addReg(oldval));
3111  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3112    .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3113  BB->addSuccessor(loop2MBB);
3114  BB->addSuccessor(exitMBB);
3115
3116  // loop2MBB:
3117  //   strex scratch, newval, [ptr]
3118  //   cmp scratch, #0
3119  //   bne loop1MBB
3120  BB = loop2MBB;
3121  AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval)
3122                 .addReg(ptr));
3123  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
3124                 .addReg(scratch).addImm(0));
3125  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3126    .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3127  BB->addSuccessor(loop1MBB);
3128  BB->addSuccessor(exitMBB);
3129
3130  //  exitMBB:
3131  //   ...
3132  BB = exitMBB;
3133
3134  MF->DeleteMachineInstr(MI);   // The instruction is gone now.
3135
3136  return BB;
3137}
3138
3139MachineBasicBlock *
3140ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
3141                                    unsigned Size, unsigned BinOpcode) const {
3142  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3143  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3144
3145  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3146  MachineFunction *F = BB->getParent();
3147  MachineFunction::iterator It = BB;
3148  ++It;
3149
3150  unsigned dest = MI->getOperand(0).getReg();
3151  unsigned ptr = MI->getOperand(1).getReg();
3152  unsigned incr = MI->getOperand(2).getReg();
3153  DebugLoc dl = MI->getDebugLoc();
3154
3155  bool isThumb2 = Subtarget->isThumb2();
3156  unsigned ldrOpc, strOpc;
3157  switch (Size) {
3158  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
3159  case 1:
3160    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
3161    strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
3162    break;
3163  case 2:
3164    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
3165    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
3166    break;
3167  case 4:
3168    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
3169    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
3170    break;
3171  }
3172
3173  MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3174  MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3175  F->insert(It, loopMBB);
3176  F->insert(It, exitMBB);
3177  exitMBB->transferSuccessors(BB);
3178
3179  MachineRegisterInfo &RegInfo = F->getRegInfo();
3180  unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
3181  unsigned scratch2 = (!BinOpcode) ? incr :
3182    RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
3183
3184  //  thisMBB:
3185  //   ...
3186  //   fallthrough --> loopMBB
3187  BB->addSuccessor(loopMBB);
3188
3189  //  loopMBB:
3190  //   ldrex dest, ptr
3191  //   <binop> scratch2, dest, incr
3192  //   strex scratch, scratch2, ptr
3193  //   cmp scratch, #0
3194  //   bne- loopMBB
3195  //   fallthrough --> exitMBB
3196  BB = loopMBB;
3197  AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
3198  if (BinOpcode) {
3199    // operand order needs to go the other way for NAND
3200    if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
3201      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
3202                     addReg(incr).addReg(dest)).addReg(0);
3203    else
3204      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
3205                     addReg(dest).addReg(incr)).addReg(0);
3206  }
3207
3208  AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2)
3209                 .addReg(ptr));
3210  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
3211                 .addReg(scratch).addImm(0));
3212  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
3213    .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
3214
3215  BB->addSuccessor(loopMBB);
3216  BB->addSuccessor(exitMBB);
3217
3218  //  exitMBB:
3219  //   ...
3220  BB = exitMBB;
3221
3222  F->DeleteMachineInstr(MI);   // The instruction is gone now.
3223
3224  return BB;
3225}
3226
3227MachineBasicBlock *
3228ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
3229                                               MachineBasicBlock *BB,
3230                   DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
3231  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3232  DebugLoc dl = MI->getDebugLoc();
3233  bool isThumb2 = Subtarget->isThumb2();
3234  switch (MI->getOpcode()) {
3235  default:
3236    MI->dump();
3237    llvm_unreachable("Unexpected instr type to insert");
3238
3239  case ARM::ATOMIC_LOAD_ADD_I8:
3240     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3241  case ARM::ATOMIC_LOAD_ADD_I16:
3242     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3243  case ARM::ATOMIC_LOAD_ADD_I32:
3244     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
3245
3246  case ARM::ATOMIC_LOAD_AND_I8:
3247     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3248  case ARM::ATOMIC_LOAD_AND_I16:
3249     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3250  case ARM::ATOMIC_LOAD_AND_I32:
3251     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
3252
3253  case ARM::ATOMIC_LOAD_OR_I8:
3254     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3255  case ARM::ATOMIC_LOAD_OR_I16:
3256     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3257  case ARM::ATOMIC_LOAD_OR_I32:
3258     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
3259
3260  case ARM::ATOMIC_LOAD_XOR_I8:
3261     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3262  case ARM::ATOMIC_LOAD_XOR_I16:
3263     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3264  case ARM::ATOMIC_LOAD_XOR_I32:
3265     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
3266
3267  case ARM::ATOMIC_LOAD_NAND_I8:
3268     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3269  case ARM::ATOMIC_LOAD_NAND_I16:
3270     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3271  case ARM::ATOMIC_LOAD_NAND_I32:
3272     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
3273
3274  case ARM::ATOMIC_LOAD_SUB_I8:
3275     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3276  case ARM::ATOMIC_LOAD_SUB_I16:
3277     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3278  case ARM::ATOMIC_LOAD_SUB_I32:
3279     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
3280
3281  case ARM::ATOMIC_SWAP_I8:  return EmitAtomicBinary(MI, BB, 1, 0);
3282  case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
3283  case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
3284
3285  case ARM::ATOMIC_CMP_SWAP_I8:  return EmitAtomicCmpSwap(MI, BB, 1);
3286  case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
3287  case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
3288
3289  case ARM::tMOVCCr_pseudo: {
3290    // To "insert" a SELECT_CC instruction, we actually have to insert the
3291    // diamond control-flow pattern.  The incoming instruction knows the
3292    // destination vreg to set, the condition code register to branch on, the
3293    // true/false values to select between, and a branch opcode to use.
3294    const BasicBlock *LLVM_BB = BB->getBasicBlock();
3295    MachineFunction::iterator It = BB;
3296    ++It;
3297
3298    //  thisMBB:
3299    //  ...
3300    //   TrueVal = ...
3301    //   cmpTY ccX, r1, r2
3302    //   bCC copy1MBB
3303    //   fallthrough --> copy0MBB
3304    MachineBasicBlock *thisMBB  = BB;
3305    MachineFunction *F = BB->getParent();
3306    MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
3307    MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
3308    BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
3309      .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
3310    F->insert(It, copy0MBB);
3311    F->insert(It, sinkMBB);
3312    // Update machine-CFG edges by first adding all successors of the current
3313    // block to the new block which will contain the Phi node for the select.
3314    // Also inform sdisel of the edge changes.
3315    for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
3316           E = BB->succ_end(); I != E; ++I) {
3317      EM->insert(std::make_pair(*I, sinkMBB));
3318      sinkMBB->addSuccessor(*I);
3319    }
3320    // Next, remove all successors of the current block, and add the true
3321    // and fallthrough blocks as its successors.
3322    while (!BB->succ_empty())
3323      BB->removeSuccessor(BB->succ_begin());
3324    BB->addSuccessor(copy0MBB);
3325    BB->addSuccessor(sinkMBB);
3326
3327    //  copy0MBB:
3328    //   %FalseValue = ...
3329    //   # fallthrough to sinkMBB
3330    BB = copy0MBB;
3331
3332    // Update machine-CFG edges
3333    BB->addSuccessor(sinkMBB);
3334
3335    //  sinkMBB:
3336    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
3337    //  ...
3338    BB = sinkMBB;
3339    BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
3340      .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
3341      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
3342
3343    F->DeleteMachineInstr(MI);   // The pseudo instruction is gone now.
3344    return BB;
3345  }
3346
3347  case ARM::tANDsp:
3348  case ARM::tADDspr_:
3349  case ARM::tSUBspi_:
3350  case ARM::t2SUBrSPi_:
3351  case ARM::t2SUBrSPi12_:
3352  case ARM::t2SUBrSPs_: {
3353    MachineFunction *MF = BB->getParent();
3354    unsigned DstReg = MI->getOperand(0).getReg();
3355    unsigned SrcReg = MI->getOperand(1).getReg();
3356    bool DstIsDead = MI->getOperand(0).isDead();
3357    bool SrcIsKill = MI->getOperand(1).isKill();
3358
3359    if (SrcReg != ARM::SP) {
3360      // Copy the source to SP from virtual register.
3361      const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
3362      unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
3363        ? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
3364      BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
3365        .addReg(SrcReg, getKillRegState(SrcIsKill));
3366    }
3367
3368    unsigned OpOpc = 0;
3369    bool NeedPred = false, NeedCC = false, NeedOp3 = false;
3370    switch (MI->getOpcode()) {
3371    default:
3372      llvm_unreachable("Unexpected pseudo instruction!");
3373    case ARM::tANDsp:
3374      OpOpc = ARM::tAND;
3375      NeedPred = true;
3376      break;
3377    case ARM::tADDspr_:
3378      OpOpc = ARM::tADDspr;
3379      break;
3380    case ARM::tSUBspi_:
3381      OpOpc = ARM::tSUBspi;
3382      break;
3383    case ARM::t2SUBrSPi_:
3384      OpOpc = ARM::t2SUBrSPi;
3385      NeedPred = true; NeedCC = true;
3386      break;
3387    case ARM::t2SUBrSPi12_:
3388      OpOpc = ARM::t2SUBrSPi12;
3389      NeedPred = true;
3390      break;
3391    case ARM::t2SUBrSPs_:
3392      OpOpc = ARM::t2SUBrSPs;
3393      NeedPred = true; NeedCC = true; NeedOp3 = true;
3394      break;
3395    }
3396    MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
3397    if (OpOpc == ARM::tAND)
3398      AddDefaultT1CC(MIB);
3399    MIB.addReg(ARM::SP);
3400    MIB.addOperand(MI->getOperand(2));
3401    if (NeedOp3)
3402      MIB.addOperand(MI->getOperand(3));
3403    if (NeedPred)
3404      AddDefaultPred(MIB);
3405    if (NeedCC)
3406      AddDefaultCC(MIB);
3407
3408    // Copy the result from SP to virtual register.
3409    const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
3410    unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
3411      ? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
3412    BuildMI(BB, dl, TII->get(CopyOpc))
3413      .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
3414      .addReg(ARM::SP);
3415    MF->DeleteMachineInstr(MI);   // The pseudo instruction is gone now.
3416    return BB;
3417  }
3418  }
3419}
3420
3421//===----------------------------------------------------------------------===//
3422//                           ARM Optimization Hooks
3423//===----------------------------------------------------------------------===//
3424
3425static
3426SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
3427                            TargetLowering::DAGCombinerInfo &DCI) {
3428  SelectionDAG &DAG = DCI.DAG;
3429  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3430  EVT VT = N->getValueType(0);
3431  unsigned Opc = N->getOpcode();
3432  bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
3433  SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
3434  SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
3435  ISD::CondCode CC = ISD::SETCC_INVALID;
3436
3437  if (isSlctCC) {
3438    CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
3439  } else {
3440    SDValue CCOp = Slct.getOperand(0);
3441    if (CCOp.getOpcode() == ISD::SETCC)
3442      CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
3443  }
3444
3445  bool DoXform = false;
3446  bool InvCC = false;
3447  assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
3448          "Bad input!");
3449
3450  if (LHS.getOpcode() == ISD::Constant &&
3451      cast<ConstantSDNode>(LHS)->isNullValue()) {
3452    DoXform = true;
3453  } else if (CC != ISD::SETCC_INVALID &&
3454             RHS.getOpcode() == ISD::Constant &&
3455             cast<ConstantSDNode>(RHS)->isNullValue()) {
3456    std::swap(LHS, RHS);
3457    SDValue Op0 = Slct.getOperand(0);
3458    EVT OpVT = isSlctCC ? Op0.getValueType() :
3459                          Op0.getOperand(0).getValueType();
3460    bool isInt = OpVT.isInteger();
3461    CC = ISD::getSetCCInverse(CC, isInt);
3462
3463    if (!TLI.isCondCodeLegal(CC, OpVT))
3464      return SDValue();         // Inverse operator isn't legal.
3465
3466    DoXform = true;
3467    InvCC = true;
3468  }
3469
3470  if (DoXform) {
3471    SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
3472    if (isSlctCC)
3473      return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
3474                             Slct.getOperand(0), Slct.getOperand(1), CC);
3475    SDValue CCOp = Slct.getOperand(0);
3476    if (InvCC)
3477      CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
3478                          CCOp.getOperand(0), CCOp.getOperand(1), CC);
3479    return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
3480                       CCOp, OtherOp, Result);
3481  }
3482  return SDValue();
3483}
3484
3485/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
3486static SDValue PerformADDCombine(SDNode *N,
3487                                 TargetLowering::DAGCombinerInfo &DCI) {
3488  // added by evan in r37685 with no testcase.
3489  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3490
3491  // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
3492  if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
3493    SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
3494    if (Result.getNode()) return Result;
3495  }
3496  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
3497    SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
3498    if (Result.getNode()) return Result;
3499  }
3500
3501  return SDValue();
3502}
3503
3504/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
3505static SDValue PerformSUBCombine(SDNode *N,
3506                                 TargetLowering::DAGCombinerInfo &DCI) {
3507  // added by evan in r37685 with no testcase.
3508  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3509
3510  // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
3511  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
3512    SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
3513    if (Result.getNode()) return Result;
3514  }
3515
3516  return SDValue();
3517}
3518
3519/// PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
3520static SDValue PerformVMOVRRDCombine(SDNode *N,
3521                                   TargetLowering::DAGCombinerInfo &DCI) {
3522  // fmrrd(fmdrr x, y) -> x,y
3523  SDValue InDouble = N->getOperand(0);
3524  if (InDouble.getOpcode() == ARMISD::VMOVDRR)
3525    return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
3526  return SDValue();
3527}
3528
3529/// getVShiftImm - Check if this is a valid build_vector for the immediate
3530/// operand of a vector shift operation, where all the elements of the
3531/// build_vector must have the same constant integer value.
3532static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3533  // Ignore bit_converts.
3534  while (Op.getOpcode() == ISD::BIT_CONVERT)
3535    Op = Op.getOperand(0);
3536  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3537  APInt SplatBits, SplatUndef;
3538  unsigned SplatBitSize;
3539  bool HasAnyUndefs;
3540  if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3541                                      HasAnyUndefs, ElementBits) ||
3542      SplatBitSize > ElementBits)
3543    return false;
3544  Cnt = SplatBits.getSExtValue();
3545  return true;
3546}
3547
3548/// isVShiftLImm - Check if this is a valid build_vector for the immediate
3549/// operand of a vector shift left operation.  That value must be in the range:
3550///   0 <= Value < ElementBits for a left shift; or
3551///   0 <= Value <= ElementBits for a long left shift.
3552static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
3553  assert(VT.isVector() && "vector shift count is not a vector type");
3554  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3555  if (! getVShiftImm(Op, ElementBits, Cnt))
3556    return false;
3557  return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
3558}
3559
3560/// isVShiftRImm - Check if this is a valid build_vector for the immediate
3561/// operand of a vector shift right operation.  For a shift opcode, the value
3562/// is positive, but for an intrinsic the value count must be negative. The
3563/// absolute value must be in the range:
3564///   1 <= |Value| <= ElementBits for a right shift; or
3565///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
3566static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
3567                         int64_t &Cnt) {
3568  assert(VT.isVector() && "vector shift count is not a vector type");
3569  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3570  if (! getVShiftImm(Op, ElementBits, Cnt))
3571    return false;
3572  if (isIntrinsic)
3573    Cnt = -Cnt;
3574  return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
3575}
3576
3577/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
3578static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
3579  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3580  switch (IntNo) {
3581  default:
3582    // Don't do anything for most intrinsics.
3583    break;
3584
3585  // Vector shifts: check for immediate versions and lower them.
3586  // Note: This is done during DAG combining instead of DAG legalizing because
3587  // the build_vectors for 64-bit vector element shift counts are generally
3588  // not legal, and it is hard to see their values after they get legalized to
3589  // loads from a constant pool.
3590  case Intrinsic::arm_neon_vshifts:
3591  case Intrinsic::arm_neon_vshiftu:
3592  case Intrinsic::arm_neon_vshiftls:
3593  case Intrinsic::arm_neon_vshiftlu:
3594  case Intrinsic::arm_neon_vshiftn:
3595  case Intrinsic::arm_neon_vrshifts:
3596  case Intrinsic::arm_neon_vrshiftu:
3597  case Intrinsic::arm_neon_vrshiftn:
3598  case Intrinsic::arm_neon_vqshifts:
3599  case Intrinsic::arm_neon_vqshiftu:
3600  case Intrinsic::arm_neon_vqshiftsu:
3601  case Intrinsic::arm_neon_vqshiftns:
3602  case Intrinsic::arm_neon_vqshiftnu:
3603  case Intrinsic::arm_neon_vqshiftnsu:
3604  case Intrinsic::arm_neon_vqrshiftns:
3605  case Intrinsic::arm_neon_vqrshiftnu:
3606  case Intrinsic::arm_neon_vqrshiftnsu: {
3607    EVT VT = N->getOperand(1).getValueType();
3608    int64_t Cnt;
3609    unsigned VShiftOpc = 0;
3610
3611    switch (IntNo) {
3612    case Intrinsic::arm_neon_vshifts:
3613    case Intrinsic::arm_neon_vshiftu:
3614      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
3615        VShiftOpc = ARMISD::VSHL;
3616        break;
3617      }
3618      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
3619        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
3620                     ARMISD::VSHRs : ARMISD::VSHRu);
3621        break;
3622      }
3623      return SDValue();
3624
3625    case Intrinsic::arm_neon_vshiftls:
3626    case Intrinsic::arm_neon_vshiftlu:
3627      if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
3628        break;
3629      llvm_unreachable("invalid shift count for vshll intrinsic");
3630
3631    case Intrinsic::arm_neon_vrshifts:
3632    case Intrinsic::arm_neon_vrshiftu:
3633      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
3634        break;
3635      return SDValue();
3636
3637    case Intrinsic::arm_neon_vqshifts:
3638    case Intrinsic::arm_neon_vqshiftu:
3639      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
3640        break;
3641      return SDValue();
3642
3643    case Intrinsic::arm_neon_vqshiftsu:
3644      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
3645        break;
3646      llvm_unreachable("invalid shift count for vqshlu intrinsic");
3647
3648    case Intrinsic::arm_neon_vshiftn:
3649    case Intrinsic::arm_neon_vrshiftn:
3650    case Intrinsic::arm_neon_vqshiftns:
3651    case Intrinsic::arm_neon_vqshiftnu:
3652    case Intrinsic::arm_neon_vqshiftnsu:
3653    case Intrinsic::arm_neon_vqrshiftns:
3654    case Intrinsic::arm_neon_vqrshiftnu:
3655    case Intrinsic::arm_neon_vqrshiftnsu:
3656      // Narrowing shifts require an immediate right shift.
3657      if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
3658        break;
3659      llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
3660
3661    default:
3662      llvm_unreachable("unhandled vector shift");
3663    }
3664
3665    switch (IntNo) {
3666    case Intrinsic::arm_neon_vshifts:
3667    case Intrinsic::arm_neon_vshiftu:
3668      // Opcode already set above.
3669      break;
3670    case Intrinsic::arm_neon_vshiftls:
3671    case Intrinsic::arm_neon_vshiftlu:
3672      if (Cnt == VT.getVectorElementType().getSizeInBits())
3673        VShiftOpc = ARMISD::VSHLLi;
3674      else
3675        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
3676                     ARMISD::VSHLLs : ARMISD::VSHLLu);
3677      break;
3678    case Intrinsic::arm_neon_vshiftn:
3679      VShiftOpc = ARMISD::VSHRN; break;
3680    case Intrinsic::arm_neon_vrshifts:
3681      VShiftOpc = ARMISD::VRSHRs; break;
3682    case Intrinsic::arm_neon_vrshiftu:
3683      VShiftOpc = ARMISD::VRSHRu; break;
3684    case Intrinsic::arm_neon_vrshiftn:
3685      VShiftOpc = ARMISD::VRSHRN; break;
3686    case Intrinsic::arm_neon_vqshifts:
3687      VShiftOpc = ARMISD::VQSHLs; break;
3688    case Intrinsic::arm_neon_vqshiftu:
3689      VShiftOpc = ARMISD::VQSHLu; break;
3690    case Intrinsic::arm_neon_vqshiftsu:
3691      VShiftOpc = ARMISD::VQSHLsu; break;
3692    case Intrinsic::arm_neon_vqshiftns:
3693      VShiftOpc = ARMISD::VQSHRNs; break;
3694    case Intrinsic::arm_neon_vqshiftnu:
3695      VShiftOpc = ARMISD::VQSHRNu; break;
3696    case Intrinsic::arm_neon_vqshiftnsu:
3697      VShiftOpc = ARMISD::VQSHRNsu; break;
3698    case Intrinsic::arm_neon_vqrshiftns:
3699      VShiftOpc = ARMISD::VQRSHRNs; break;
3700    case Intrinsic::arm_neon_vqrshiftnu:
3701      VShiftOpc = ARMISD::VQRSHRNu; break;
3702    case Intrinsic::arm_neon_vqrshiftnsu:
3703      VShiftOpc = ARMISD::VQRSHRNsu; break;
3704    }
3705
3706    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
3707                       N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
3708  }
3709
3710  case Intrinsic::arm_neon_vshiftins: {
3711    EVT VT = N->getOperand(1).getValueType();
3712    int64_t Cnt;
3713    unsigned VShiftOpc = 0;
3714
3715    if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
3716      VShiftOpc = ARMISD::VSLI;
3717    else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
3718      VShiftOpc = ARMISD::VSRI;
3719    else {
3720      llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
3721    }
3722
3723    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
3724                       N->getOperand(1), N->getOperand(2),
3725                       DAG.getConstant(Cnt, MVT::i32));
3726  }
3727
3728  case Intrinsic::arm_neon_vqrshifts:
3729  case Intrinsic::arm_neon_vqrshiftu:
3730    // No immediate versions of these to check for.
3731    break;
3732  }
3733
3734  return SDValue();
3735}
3736
3737/// PerformShiftCombine - Checks for immediate versions of vector shifts and
3738/// lowers them.  As with the vector shift intrinsics, this is done during DAG
3739/// combining instead of DAG legalizing because the build_vectors for 64-bit
3740/// vector element shift counts are generally not legal, and it is hard to see
3741/// their values after they get legalized to loads from a constant pool.
3742static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
3743                                   const ARMSubtarget *ST) {
3744  EVT VT = N->getValueType(0);
3745
3746  // Nothing to be done for scalar shifts.
3747  if (! VT.isVector())
3748    return SDValue();
3749
3750  assert(ST->hasNEON() && "unexpected vector shift");
3751  int64_t Cnt;
3752
3753  switch (N->getOpcode()) {
3754  default: llvm_unreachable("unexpected shift opcode");
3755
3756  case ISD::SHL:
3757    if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
3758      return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
3759                         DAG.getConstant(Cnt, MVT::i32));
3760    break;
3761
3762  case ISD::SRA:
3763  case ISD::SRL:
3764    if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
3765      unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
3766                            ARMISD::VSHRs : ARMISD::VSHRu);
3767      return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
3768                         DAG.getConstant(Cnt, MVT::i32));
3769    }
3770  }
3771  return SDValue();
3772}
3773
3774/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
3775/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
3776static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
3777                                    const ARMSubtarget *ST) {
3778  SDValue N0 = N->getOperand(0);
3779
3780  // Check for sign- and zero-extensions of vector extract operations of 8-
3781  // and 16-bit vector elements.  NEON supports these directly.  They are
3782  // handled during DAG combining because type legalization will promote them
3783  // to 32-bit types and it is messy to recognize the operations after that.
3784  if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
3785    SDValue Vec = N0.getOperand(0);
3786    SDValue Lane = N0.getOperand(1);
3787    EVT VT = N->getValueType(0);
3788    EVT EltVT = N0.getValueType();
3789    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3790
3791    if (VT == MVT::i32 &&
3792        (EltVT == MVT::i8 || EltVT == MVT::i16) &&
3793        TLI.isTypeLegal(Vec.getValueType())) {
3794
3795      unsigned Opc = 0;
3796      switch (N->getOpcode()) {
3797      default: llvm_unreachable("unexpected opcode");
3798      case ISD::SIGN_EXTEND:
3799        Opc = ARMISD::VGETLANEs;
3800        break;
3801      case ISD::ZERO_EXTEND:
3802      case ISD::ANY_EXTEND:
3803        Opc = ARMISD::VGETLANEu;
3804        break;
3805      }
3806      return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
3807    }
3808  }
3809
3810  return SDValue();
3811}
3812
3813SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
3814                                             DAGCombinerInfo &DCI) const {
3815  switch (N->getOpcode()) {
3816  default: break;
3817  case ISD::ADD:      return PerformADDCombine(N, DCI);
3818  case ISD::SUB:      return PerformSUBCombine(N, DCI);
3819  case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
3820  case ISD::INTRINSIC_WO_CHAIN:
3821    return PerformIntrinsicCombine(N, DCI.DAG);
3822  case ISD::SHL:
3823  case ISD::SRA:
3824  case ISD::SRL:
3825    return PerformShiftCombine(N, DCI.DAG, Subtarget);
3826  case ISD::SIGN_EXTEND:
3827  case ISD::ZERO_EXTEND:
3828  case ISD::ANY_EXTEND:
3829    return PerformExtendCombine(N, DCI.DAG, Subtarget);
3830  }
3831  return SDValue();
3832}
3833
3834bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
3835  if (!Subtarget->hasV6Ops())
3836    // Pre-v6 does not support unaligned mem access.
3837    return false;
3838  else if (!Subtarget->hasV6Ops()) {
3839    // v6 may or may not support unaligned mem access.
3840    if (!Subtarget->isTargetDarwin())
3841      return false;
3842  }
3843
3844  switch (VT.getSimpleVT().SimpleTy) {
3845  default:
3846    return false;
3847  case MVT::i8:
3848  case MVT::i16:
3849  case MVT::i32:
3850    return true;
3851  // FIXME: VLD1 etc with standard alignment is legal.
3852  }
3853}
3854
3855static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
3856  if (V < 0)
3857    return false;
3858
3859  unsigned Scale = 1;
3860  switch (VT.getSimpleVT().SimpleTy) {
3861  default: return false;
3862  case MVT::i1:
3863  case MVT::i8:
3864    // Scale == 1;
3865    break;
3866  case MVT::i16:
3867    // Scale == 2;
3868    Scale = 2;
3869    break;
3870  case MVT::i32:
3871    // Scale == 4;
3872    Scale = 4;
3873    break;
3874  }
3875
3876  if ((V & (Scale - 1)) != 0)
3877    return false;
3878  V /= Scale;
3879  return V == (V & ((1LL << 5) - 1));
3880}
3881
3882static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
3883                                      const ARMSubtarget *Subtarget) {
3884  bool isNeg = false;
3885  if (V < 0) {
3886    isNeg = true;
3887    V = - V;
3888  }
3889
3890  switch (VT.getSimpleVT().SimpleTy) {
3891  default: return false;
3892  case MVT::i1:
3893  case MVT::i8:
3894  case MVT::i16:
3895  case MVT::i32:
3896    // + imm12 or - imm8
3897    if (isNeg)
3898      return V == (V & ((1LL << 8) - 1));
3899    return V == (V & ((1LL << 12) - 1));
3900  case MVT::f32:
3901  case MVT::f64:
3902    // Same as ARM mode. FIXME: NEON?
3903    if (!Subtarget->hasVFP2())
3904      return false;
3905    if ((V & 3) != 0)
3906      return false;
3907    V >>= 2;
3908    return V == (V & ((1LL << 8) - 1));
3909  }
3910}
3911
3912/// isLegalAddressImmediate - Return true if the integer value can be used
3913/// as the offset of the target addressing mode for load / store of the
3914/// given type.
3915static bool isLegalAddressImmediate(int64_t V, EVT VT,
3916                                    const ARMSubtarget *Subtarget) {
3917  if (V == 0)
3918    return true;
3919
3920  if (!VT.isSimple())
3921    return false;
3922
3923  if (Subtarget->isThumb1Only())
3924    return isLegalT1AddressImmediate(V, VT);
3925  else if (Subtarget->isThumb2())
3926    return isLegalT2AddressImmediate(V, VT, Subtarget);
3927
3928  // ARM mode.
3929  if (V < 0)
3930    V = - V;
3931  switch (VT.getSimpleVT().SimpleTy) {
3932  default: return false;
3933  case MVT::i1:
3934  case MVT::i8:
3935  case MVT::i32:
3936    // +- imm12
3937    return V == (V & ((1LL << 12) - 1));
3938  case MVT::i16:
3939    // +- imm8
3940    return V == (V & ((1LL << 8) - 1));
3941  case MVT::f32:
3942  case MVT::f64:
3943    if (!Subtarget->hasVFP2()) // FIXME: NEON?
3944      return false;
3945    if ((V & 3) != 0)
3946      return false;
3947    V >>= 2;
3948    return V == (V & ((1LL << 8) - 1));
3949  }
3950}
3951
3952bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
3953                                                      EVT VT) const {
3954  int Scale = AM.Scale;
3955  if (Scale < 0)
3956    return false;
3957
3958  switch (VT.getSimpleVT().SimpleTy) {
3959  default: return false;
3960  case MVT::i1:
3961  case MVT::i8:
3962  case MVT::i16:
3963  case MVT::i32:
3964    if (Scale == 1)
3965      return true;
3966    // r + r << imm
3967    Scale = Scale & ~1;
3968    return Scale == 2 || Scale == 4 || Scale == 8;
3969  case MVT::i64:
3970    // r + r
3971    if (((unsigned)AM.HasBaseReg + Scale) <= 2)
3972      return true;
3973    return false;
3974  case MVT::isVoid:
3975    // Note, we allow "void" uses (basically, uses that aren't loads or
3976    // stores), because arm allows folding a scale into many arithmetic
3977    // operations.  This should be made more precise and revisited later.
3978
3979    // Allow r << imm, but the imm has to be a multiple of two.
3980    if (Scale & 1) return false;
3981    return isPowerOf2_32(Scale);
3982  }
3983}
3984
3985/// isLegalAddressingMode - Return true if the addressing mode represented
3986/// by AM is legal for this target, for a load/store of the specified type.
3987bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3988                                              const Type *Ty) const {
3989  EVT VT = getValueType(Ty, true);
3990  if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
3991    return false;
3992
3993  // Can never fold addr of global into load/store.
3994  if (AM.BaseGV)
3995    return false;
3996
3997  switch (AM.Scale) {
3998  case 0:  // no scale reg, must be "r+i" or "r", or "i".
3999    break;
4000  case 1:
4001    if (Subtarget->isThumb1Only())
4002      return false;
4003    // FALL THROUGH.
4004  default:
4005    // ARM doesn't support any R+R*scale+imm addr modes.
4006    if (AM.BaseOffs)
4007      return false;
4008
4009    if (!VT.isSimple())
4010      return false;
4011
4012    if (Subtarget->isThumb2())
4013      return isLegalT2ScaledAddressingMode(AM, VT);
4014
4015    int Scale = AM.Scale;
4016    switch (VT.getSimpleVT().SimpleTy) {
4017    default: return false;
4018    case MVT::i1:
4019    case MVT::i8:
4020    case MVT::i32:
4021      if (Scale < 0) Scale = -Scale;
4022      if (Scale == 1)
4023        return true;
4024      // r + r << imm
4025      return isPowerOf2_32(Scale & ~1);
4026    case MVT::i16:
4027    case MVT::i64:
4028      // r + r
4029      if (((unsigned)AM.HasBaseReg + Scale) <= 2)
4030        return true;
4031      return false;
4032
4033    case MVT::isVoid:
4034      // Note, we allow "void" uses (basically, uses that aren't loads or
4035      // stores), because arm allows folding a scale into many arithmetic
4036      // operations.  This should be made more precise and revisited later.
4037
4038      // Allow r << imm, but the imm has to be a multiple of two.
4039      if (Scale & 1) return false;
4040      return isPowerOf2_32(Scale);
4041    }
4042    break;
4043  }
4044  return true;
4045}
4046
4047/// isLegalICmpImmediate - Return true if the specified immediate is legal
4048/// icmp immediate, that is the target has icmp instructions which can compare
4049/// a register against the immediate without having to materialize the
4050/// immediate into a register.
4051bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
4052  if (!Subtarget->isThumb())
4053    return ARM_AM::getSOImmVal(Imm) != -1;
4054  if (Subtarget->isThumb2())
4055    return ARM_AM::getT2SOImmVal(Imm) != -1;
4056  return Imm >= 0 && Imm <= 255;
4057}
4058
4059static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
4060                                      bool isSEXTLoad, SDValue &Base,
4061                                      SDValue &Offset, bool &isInc,
4062                                      SelectionDAG &DAG) {
4063  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
4064    return false;
4065
4066  if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
4067    // AddressingMode 3
4068    Base = Ptr->getOperand(0);
4069    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4070      int RHSC = (int)RHS->getZExtValue();
4071      if (RHSC < 0 && RHSC > -256) {
4072        assert(Ptr->getOpcode() == ISD::ADD);
4073        isInc = false;
4074        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4075        return true;
4076      }
4077    }
4078    isInc = (Ptr->getOpcode() == ISD::ADD);
4079    Offset = Ptr->getOperand(1);
4080    return true;
4081  } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
4082    // AddressingMode 2
4083    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4084      int RHSC = (int)RHS->getZExtValue();
4085      if (RHSC < 0 && RHSC > -0x1000) {
4086        assert(Ptr->getOpcode() == ISD::ADD);
4087        isInc = false;
4088        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4089        Base = Ptr->getOperand(0);
4090        return true;
4091      }
4092    }
4093
4094    if (Ptr->getOpcode() == ISD::ADD) {
4095      isInc = true;
4096      ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
4097      if (ShOpcVal != ARM_AM::no_shift) {
4098        Base = Ptr->getOperand(1);
4099        Offset = Ptr->getOperand(0);
4100      } else {
4101        Base = Ptr->getOperand(0);
4102        Offset = Ptr->getOperand(1);
4103      }
4104      return true;
4105    }
4106
4107    isInc = (Ptr->getOpcode() == ISD::ADD);
4108    Base = Ptr->getOperand(0);
4109    Offset = Ptr->getOperand(1);
4110    return true;
4111  }
4112
4113  // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
4114  return false;
4115}
4116
4117static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
4118                                     bool isSEXTLoad, SDValue &Base,
4119                                     SDValue &Offset, bool &isInc,
4120                                     SelectionDAG &DAG) {
4121  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
4122    return false;
4123
4124  Base = Ptr->getOperand(0);
4125  if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
4126    int RHSC = (int)RHS->getZExtValue();
4127    if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
4128      assert(Ptr->getOpcode() == ISD::ADD);
4129      isInc = false;
4130      Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
4131      return true;
4132    } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
4133      isInc = Ptr->getOpcode() == ISD::ADD;
4134      Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
4135      return true;
4136    }
4137  }
4138
4139  return false;
4140}
4141
4142/// getPreIndexedAddressParts - returns true by value, base pointer and
4143/// offset pointer and addressing mode by reference if the node's address
4144/// can be legally represented as pre-indexed load / store address.
4145bool
4146ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
4147                                             SDValue &Offset,
4148                                             ISD::MemIndexedMode &AM,
4149                                             SelectionDAG &DAG) const {
4150  if (Subtarget->isThumb1Only())
4151    return false;
4152
4153  EVT VT;
4154  SDValue Ptr;
4155  bool isSEXTLoad = false;
4156  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
4157    Ptr = LD->getBasePtr();
4158    VT  = LD->getMemoryVT();
4159    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
4160  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
4161    Ptr = ST->getBasePtr();
4162    VT  = ST->getMemoryVT();
4163  } else
4164    return false;
4165
4166  bool isInc;
4167  bool isLegal = false;
4168  if (Subtarget->isThumb2())
4169    isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
4170                                       Offset, isInc, DAG);
4171  else
4172    isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
4173                                        Offset, isInc, DAG);
4174  if (!isLegal)
4175    return false;
4176
4177  AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
4178  return true;
4179}
4180
4181/// getPostIndexedAddressParts - returns true by value, base pointer and
4182/// offset pointer and addressing mode by reference if this node can be
4183/// combined with a load / store to form a post-indexed load / store.
4184bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
4185                                                   SDValue &Base,
4186                                                   SDValue &Offset,
4187                                                   ISD::MemIndexedMode &AM,
4188                                                   SelectionDAG &DAG) const {
4189  if (Subtarget->isThumb1Only())
4190    return false;
4191
4192  EVT VT;
4193  SDValue Ptr;
4194  bool isSEXTLoad = false;
4195  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
4196    VT  = LD->getMemoryVT();
4197    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
4198  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
4199    VT  = ST->getMemoryVT();
4200  } else
4201    return false;
4202
4203  bool isInc;
4204  bool isLegal = false;
4205  if (Subtarget->isThumb2())
4206    isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
4207                                        isInc, DAG);
4208  else
4209    isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
4210                                        isInc, DAG);
4211  if (!isLegal)
4212    return false;
4213
4214  AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
4215  return true;
4216}
4217
4218void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
4219                                                       const APInt &Mask,
4220                                                       APInt &KnownZero,
4221                                                       APInt &KnownOne,
4222                                                       const SelectionDAG &DAG,
4223                                                       unsigned Depth) const {
4224  KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
4225  switch (Op.getOpcode()) {
4226  default: break;
4227  case ARMISD::CMOV: {
4228    // Bits are known zero/one if known on the LHS and RHS.
4229    DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
4230    if (KnownZero == 0 && KnownOne == 0) return;
4231
4232    APInt KnownZeroRHS, KnownOneRHS;
4233    DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
4234                          KnownZeroRHS, KnownOneRHS, Depth+1);
4235    KnownZero &= KnownZeroRHS;
4236    KnownOne  &= KnownOneRHS;
4237    return;
4238  }
4239  }
4240}
4241
4242//===----------------------------------------------------------------------===//
4243//                           ARM Inline Assembly Support
4244//===----------------------------------------------------------------------===//
4245
4246/// getConstraintType - Given a constraint letter, return the type of
4247/// constraint it is for this target.
4248ARMTargetLowering::ConstraintType
4249ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
4250  if (Constraint.size() == 1) {
4251    switch (Constraint[0]) {
4252    default:  break;
4253    case 'l': return C_RegisterClass;
4254    case 'w': return C_RegisterClass;
4255    }
4256  }
4257  return TargetLowering::getConstraintType(Constraint);
4258}
4259
4260std::pair<unsigned, const TargetRegisterClass*>
4261ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4262                                                EVT VT) const {
4263  if (Constraint.size() == 1) {
4264    // GCC ARM Constraint Letters
4265    switch (Constraint[0]) {
4266    case 'l':
4267      if (Subtarget->isThumb())
4268        return std::make_pair(0U, ARM::tGPRRegisterClass);
4269      else
4270        return std::make_pair(0U, ARM::GPRRegisterClass);
4271    case 'r':
4272      return std::make_pair(0U, ARM::GPRRegisterClass);
4273    case 'w':
4274      if (VT == MVT::f32)
4275        return std::make_pair(0U, ARM::SPRRegisterClass);
4276      if (VT.getSizeInBits() == 64)
4277        return std::make_pair(0U, ARM::DPRRegisterClass);
4278      if (VT.getSizeInBits() == 128)
4279        return std::make_pair(0U, ARM::QPRRegisterClass);
4280      break;
4281    }
4282  }
4283  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4284}
4285
4286std::vector<unsigned> ARMTargetLowering::
4287getRegClassForInlineAsmConstraint(const std::string &Constraint,
4288                                  EVT VT) const {
4289  if (Constraint.size() != 1)
4290    return std::vector<unsigned>();
4291
4292  switch (Constraint[0]) {      // GCC ARM Constraint Letters
4293  default: break;
4294  case 'l':
4295    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
4296                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
4297                                 0);
4298  case 'r':
4299    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
4300                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
4301                                 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
4302                                 ARM::R12, ARM::LR, 0);
4303  case 'w':
4304    if (VT == MVT::f32)
4305      return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
4306                                   ARM::S4, ARM::S5, ARM::S6, ARM::S7,
4307                                   ARM::S8, ARM::S9, ARM::S10, ARM::S11,
4308                                   ARM::S12,ARM::S13,ARM::S14,ARM::S15,
4309                                   ARM::S16,ARM::S17,ARM::S18,ARM::S19,
4310                                   ARM::S20,ARM::S21,ARM::S22,ARM::S23,
4311                                   ARM::S24,ARM::S25,ARM::S26,ARM::S27,
4312                                   ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
4313    if (VT.getSizeInBits() == 64)
4314      return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
4315                                   ARM::D4, ARM::D5, ARM::D6, ARM::D7,
4316                                   ARM::D8, ARM::D9, ARM::D10,ARM::D11,
4317                                   ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
4318    if (VT.getSizeInBits() == 128)
4319      return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
4320                                   ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
4321      break;
4322  }
4323
4324  return std::vector<unsigned>();
4325}
4326
4327/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4328/// vector.  If it is invalid, don't add anything to Ops.
4329void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4330                                                     char Constraint,
4331                                                     bool hasMemory,
4332                                                     std::vector<SDValue>&Ops,
4333                                                     SelectionDAG &DAG) const {
4334  SDValue Result(0, 0);
4335
4336  switch (Constraint) {
4337  default: break;
4338  case 'I': case 'J': case 'K': case 'L':
4339  case 'M': case 'N': case 'O':
4340    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4341    if (!C)
4342      return;
4343
4344    int64_t CVal64 = C->getSExtValue();
4345    int CVal = (int) CVal64;
4346    // None of these constraints allow values larger than 32 bits.  Check
4347    // that the value fits in an int.
4348    if (CVal != CVal64)
4349      return;
4350
4351    switch (Constraint) {
4352      case 'I':
4353        if (Subtarget->isThumb1Only()) {
4354          // This must be a constant between 0 and 255, for ADD
4355          // immediates.
4356          if (CVal >= 0 && CVal <= 255)
4357            break;
4358        } else if (Subtarget->isThumb2()) {
4359          // A constant that can be used as an immediate value in a
4360          // data-processing instruction.
4361          if (ARM_AM::getT2SOImmVal(CVal) != -1)
4362            break;
4363        } else {
4364          // A constant that can be used as an immediate value in a
4365          // data-processing instruction.
4366          if (ARM_AM::getSOImmVal(CVal) != -1)
4367            break;
4368        }
4369        return;
4370
4371      case 'J':
4372        if (Subtarget->isThumb()) {  // FIXME thumb2
4373          // This must be a constant between -255 and -1, for negated ADD
4374          // immediates. This can be used in GCC with an "n" modifier that
4375          // prints the negated value, for use with SUB instructions. It is
4376          // not useful otherwise but is implemented for compatibility.
4377          if (CVal >= -255 && CVal <= -1)
4378            break;
4379        } else {
4380          // This must be a constant between -4095 and 4095. It is not clear
4381          // what this constraint is intended for. Implemented for
4382          // compatibility with GCC.
4383          if (CVal >= -4095 && CVal <= 4095)
4384            break;
4385        }
4386        return;
4387
4388      case 'K':
4389        if (Subtarget->isThumb1Only()) {
4390          // A 32-bit value where only one byte has a nonzero value. Exclude
4391          // zero to match GCC. This constraint is used by GCC internally for
4392          // constants that can be loaded with a move/shift combination.
4393          // It is not useful otherwise but is implemented for compatibility.
4394          if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
4395            break;
4396        } else if (Subtarget->isThumb2()) {
4397          // A constant whose bitwise inverse can be used as an immediate
4398          // value in a data-processing instruction. This can be used in GCC
4399          // with a "B" modifier that prints the inverted value, for use with
4400          // BIC and MVN instructions. It is not useful otherwise but is
4401          // implemented for compatibility.
4402          if (ARM_AM::getT2SOImmVal(~CVal) != -1)
4403            break;
4404        } else {
4405          // A constant whose bitwise inverse can be used as an immediate
4406          // value in a data-processing instruction. This can be used in GCC
4407          // with a "B" modifier that prints the inverted value, for use with
4408          // BIC and MVN instructions. It is not useful otherwise but is
4409          // implemented for compatibility.
4410          if (ARM_AM::getSOImmVal(~CVal) != -1)
4411            break;
4412        }
4413        return;
4414
4415      case 'L':
4416        if (Subtarget->isThumb1Only()) {
4417          // This must be a constant between -7 and 7,
4418          // for 3-operand ADD/SUB immediate instructions.
4419          if (CVal >= -7 && CVal < 7)
4420            break;
4421        } else if (Subtarget->isThumb2()) {
4422          // A constant whose negation can be used as an immediate value in a
4423          // data-processing instruction. This can be used in GCC with an "n"
4424          // modifier that prints the negated value, for use with SUB
4425          // instructions. It is not useful otherwise but is implemented for
4426          // compatibility.
4427          if (ARM_AM::getT2SOImmVal(-CVal) != -1)
4428            break;
4429        } else {
4430          // A constant whose negation can be used as an immediate value in a
4431          // data-processing instruction. This can be used in GCC with an "n"
4432          // modifier that prints the negated value, for use with SUB
4433          // instructions. It is not useful otherwise but is implemented for
4434          // compatibility.
4435          if (ARM_AM::getSOImmVal(-CVal) != -1)
4436            break;
4437        }
4438        return;
4439
4440      case 'M':
4441        if (Subtarget->isThumb()) { // FIXME thumb2
4442          // This must be a multiple of 4 between 0 and 1020, for
4443          // ADD sp + immediate.
4444          if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
4445            break;
4446        } else {
4447          // A power of two or a constant between 0 and 32.  This is used in
4448          // GCC for the shift amount on shifted register operands, but it is
4449          // useful in general for any shift amounts.
4450          if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
4451            break;
4452        }
4453        return;
4454
4455      case 'N':
4456        if (Subtarget->isThumb()) {  // FIXME thumb2
4457          // This must be a constant between 0 and 31, for shift amounts.
4458          if (CVal >= 0 && CVal <= 31)
4459            break;
4460        }
4461        return;
4462
4463      case 'O':
4464        if (Subtarget->isThumb()) {  // FIXME thumb2
4465          // This must be a multiple of 4 between -508 and 508, for
4466          // ADD/SUB sp = sp + immediate.
4467          if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
4468            break;
4469        }
4470        return;
4471    }
4472    Result = DAG.getTargetConstant(CVal, Op.getValueType());
4473    break;
4474  }
4475
4476  if (Result.getNode()) {
4477    Ops.push_back(Result);
4478    return;
4479  }
4480  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
4481                                                      Ops, DAG);
4482}
4483
4484bool
4485ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4486  // The ARM target isn't yet aware of offsets.
4487  return false;
4488}
4489
4490int ARM::getVFPf32Imm(const APFloat &FPImm) {
4491  APInt Imm = FPImm.bitcastToAPInt();
4492  uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
4493  int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127;  // -126 to 127
4494  int64_t Mantissa = Imm.getZExtValue() & 0x7fffff;  // 23 bits
4495
4496  // We can handle 4 bits of mantissa.
4497  // mantissa = (16+UInt(e:f:g:h))/16.
4498  if (Mantissa & 0x7ffff)
4499    return -1;
4500  Mantissa >>= 19;
4501  if ((Mantissa & 0xf) != Mantissa)
4502    return -1;
4503
4504  // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
4505  if (Exp < -3 || Exp > 4)
4506    return -1;
4507  Exp = ((Exp+3) & 0x7) ^ 4;
4508
4509  return ((int)Sign << 7) | (Exp << 4) | Mantissa;
4510}
4511
4512int ARM::getVFPf64Imm(const APFloat &FPImm) {
4513  APInt Imm = FPImm.bitcastToAPInt();
4514  uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
4515  int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023;   // -1022 to 1023
4516  uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL;
4517
4518  // We can handle 4 bits of mantissa.
4519  // mantissa = (16+UInt(e:f:g:h))/16.
4520  if (Mantissa & 0xffffffffffffLL)
4521    return -1;
4522  Mantissa >>= 48;
4523  if ((Mantissa & 0xf) != Mantissa)
4524    return -1;
4525
4526  // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
4527  if (Exp < -3 || Exp > 4)
4528    return -1;
4529  Exp = ((Exp+3) & 0x7) ^ 4;
4530
4531  return ((int)Sign << 7) | (Exp << 4) | Mantissa;
4532}
4533
4534/// isFPImmLegal - Returns true if the target can instruction select the
4535/// specified FP immediate natively. If false, the legalizer will
4536/// materialize the FP immediate as a load from a constant pool.
4537bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
4538  if (!Subtarget->hasVFP3())
4539    return false;
4540  if (VT == MVT::f32)
4541    return ARM::getVFPf32Imm(Imm) != -1;
4542  if (VT == MVT::f64)
4543    return ARM::getVFPf64Imm(Imm) != -1;
4544  return false;
4545}
4546