ARMISelLowering.cpp revision 62c1d00dfd38996f381edae55e1028b8e52a1107
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that ARM uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "arm-isel"
16#include "ARM.h"
17#include "ARMCallingConv.h"
18#include "ARMConstantPoolValue.h"
19#include "ARMISelLowering.h"
20#include "ARMMachineFunctionInfo.h"
21#include "ARMPerfectShuffle.h"
22#include "ARMRegisterInfo.h"
23#include "ARMSubtarget.h"
24#include "ARMTargetMachine.h"
25#include "ARMTargetObjectFile.h"
26#include "MCTargetDesc/ARMAddressingModes.h"
27#include "llvm/CallingConv.h"
28#include "llvm/Constants.h"
29#include "llvm/Function.h"
30#include "llvm/GlobalValue.h"
31#include "llvm/Instruction.h"
32#include "llvm/Instructions.h"
33#include "llvm/Intrinsics.h"
34#include "llvm/Type.h"
35#include "llvm/CodeGen/CallingConvLower.h"
36#include "llvm/CodeGen/IntrinsicLowering.h"
37#include "llvm/CodeGen/MachineBasicBlock.h"
38#include "llvm/CodeGen/MachineFrameInfo.h"
39#include "llvm/CodeGen/MachineFunction.h"
40#include "llvm/CodeGen/MachineInstrBuilder.h"
41#include "llvm/CodeGen/MachineModuleInfo.h"
42#include "llvm/CodeGen/MachineRegisterInfo.h"
43#include "llvm/CodeGen/PseudoSourceValue.h"
44#include "llvm/CodeGen/SelectionDAG.h"
45#include "llvm/MC/MCSectionMachO.h"
46#include "llvm/Target/TargetOptions.h"
47#include "llvm/ADT/VectorExtras.h"
48#include "llvm/ADT/StringExtras.h"
49#include "llvm/ADT/Statistic.h"
50#include "llvm/Support/CommandLine.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/raw_ostream.h"
54#include <sstream>
55using namespace llvm;
56
57STATISTIC(NumTailCalls, "Number of tail calls");
58STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
59
60// This option should go away when tail calls fully work.
61static cl::opt<bool>
62EnableARMTailCalls("arm-tail-calls", cl::Hidden,
63  cl::desc("Generate tail calls (TEMPORARY OPTION)."),
64  cl::init(false));
65
66cl::opt<bool>
67EnableARMLongCalls("arm-long-calls", cl::Hidden,
68  cl::desc("Generate calls via indirect call instructions"),
69  cl::init(false));
70
71static cl::opt<bool>
72ARMInterworking("arm-interworking", cl::Hidden,
73  cl::desc("Enable / disable ARM interworking (for debugging only)"),
74  cl::init(true));
75
76namespace llvm {
77  class ARMCCState : public CCState {
78  public:
79    ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
80               const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
81               LLVMContext &C, ParmContext PC)
82        : CCState(CC, isVarArg, MF, TM, locs, C) {
83      assert(((PC == Call) || (PC == Prologue)) &&
84             "ARMCCState users must specify whether their context is call"
85             "or prologue generation.");
86      CallOrPrologue = PC;
87    }
88  };
89}
90
91// The APCS parameter registers.
92static const unsigned GPRArgRegs[] = {
93  ARM::R0, ARM::R1, ARM::R2, ARM::R3
94};
95
96void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
97                                       EVT PromotedBitwiseVT) {
98  if (VT != PromotedLdStVT) {
99    setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
100    AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
101                       PromotedLdStVT.getSimpleVT());
102
103    setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
104    AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
105                       PromotedLdStVT.getSimpleVT());
106  }
107
108  EVT ElemTy = VT.getVectorElementType();
109  if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
110    setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
111  setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
112  setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
113  if (ElemTy != MVT::i32) {
114    setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
115    setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
116    setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
117    setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
118  }
119  setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
120  setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
121  setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
122  setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
123  setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
124  setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
125  if (VT.isInteger()) {
126    setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
127    setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
128    setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
129    setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand);
130    setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand);
131    for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
132         InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
133      setTruncStoreAction(VT.getSimpleVT(),
134                          (MVT::SimpleValueType)InnerVT, Expand);
135  }
136  setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
137
138  // Promote all bit-wise operations.
139  if (VT.isInteger() && VT != PromotedBitwiseVT) {
140    setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
141    AddPromotedToType (ISD::AND, VT.getSimpleVT(),
142                       PromotedBitwiseVT.getSimpleVT());
143    setOperationAction(ISD::OR,  VT.getSimpleVT(), Promote);
144    AddPromotedToType (ISD::OR,  VT.getSimpleVT(),
145                       PromotedBitwiseVT.getSimpleVT());
146    setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
147    AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
148                       PromotedBitwiseVT.getSimpleVT());
149  }
150
151  // Neon does not support vector divide/remainder operations.
152  setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
153  setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
154  setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
155  setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
156  setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
157  setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
158}
159
160void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
161  addRegisterClass(VT, ARM::DPRRegisterClass);
162  addTypeForNEON(VT, MVT::f64, MVT::v2i32);
163}
164
165void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
166  addRegisterClass(VT, ARM::QPRRegisterClass);
167  addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
168}
169
170static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
171  if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
172    return new TargetLoweringObjectFileMachO();
173
174  return new ARMElfTargetObjectFile();
175}
176
177ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
178    : TargetLowering(TM, createTLOF(TM)) {
179  Subtarget = &TM.getSubtarget<ARMSubtarget>();
180  RegInfo = TM.getRegisterInfo();
181  Itins = TM.getInstrItineraryData();
182
183  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
184
185  if (Subtarget->isTargetDarwin()) {
186    // Uses VFP for Thumb libfuncs if available.
187    if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
188      // Single-precision floating-point arithmetic.
189      setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
190      setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
191      setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
192      setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
193
194      // Double-precision floating-point arithmetic.
195      setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
196      setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
197      setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
198      setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
199
200      // Single-precision comparisons.
201      setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
202      setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
203      setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
204      setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
205      setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
206      setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
207      setLibcallName(RTLIB::UO_F32,  "__unordsf2vfp");
208      setLibcallName(RTLIB::O_F32,   "__unordsf2vfp");
209
210      setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
211      setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
212      setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
213      setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
214      setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
215      setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
216      setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
217      setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
218
219      // Double-precision comparisons.
220      setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
221      setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
222      setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
223      setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
224      setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
225      setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
226      setLibcallName(RTLIB::UO_F64,  "__unorddf2vfp");
227      setLibcallName(RTLIB::O_F64,   "__unorddf2vfp");
228
229      setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
230      setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
231      setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
232      setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
233      setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
234      setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
235      setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
236      setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
237
238      // Floating-point to integer conversions.
239      // i64 conversions are done via library routines even when generating VFP
240      // instructions, so use the same ones.
241      setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
242      setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
243      setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
244      setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
245
246      // Conversions between floating types.
247      setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
248      setLibcallName(RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp");
249
250      // Integer to floating-point conversions.
251      // i64 conversions are done via library routines even when generating VFP
252      // instructions, so use the same ones.
253      // FIXME: There appears to be some naming inconsistency in ARM libgcc:
254      // e.g., __floatunsidf vs. __floatunssidfvfp.
255      setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
256      setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
257      setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
258      setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
259    }
260  }
261
262  // These libcalls are not available in 32-bit.
263  setLibcallName(RTLIB::SHL_I128, 0);
264  setLibcallName(RTLIB::SRL_I128, 0);
265  setLibcallName(RTLIB::SRA_I128, 0);
266
267  if (Subtarget->isAAPCS_ABI()) {
268    // Double-precision floating-point arithmetic helper functions
269    // RTABI chapter 4.1.2, Table 2
270    setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
271    setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
272    setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul");
273    setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub");
274    setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS);
275    setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS);
276    setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS);
277    setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS);
278
279    // Double-precision floating-point comparison helper functions
280    // RTABI chapter 4.1.2, Table 3
281    setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq");
282    setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
283    setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq");
284    setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ);
285    setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt");
286    setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
287    setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple");
288    setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
289    setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge");
290    setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
291    setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt");
292    setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
293    setLibcallName(RTLIB::UO_F64,  "__aeabi_dcmpun");
294    setCmpLibcallCC(RTLIB::UO_F64,  ISD::SETNE);
295    setLibcallName(RTLIB::O_F64,   "__aeabi_dcmpun");
296    setCmpLibcallCC(RTLIB::O_F64,   ISD::SETEQ);
297    setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS);
298    setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS);
299    setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS);
300    setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS);
301    setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS);
302    setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS);
303    setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS);
304    setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS);
305
306    // Single-precision floating-point arithmetic helper functions
307    // RTABI chapter 4.1.2, Table 4
308    setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd");
309    setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv");
310    setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul");
311    setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub");
312    setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS);
313    setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS);
314    setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS);
315    setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS);
316
317    // Single-precision floating-point comparison helper functions
318    // RTABI chapter 4.1.2, Table 5
319    setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq");
320    setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
321    setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq");
322    setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ);
323    setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt");
324    setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
325    setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple");
326    setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
327    setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge");
328    setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
329    setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt");
330    setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
331    setLibcallName(RTLIB::UO_F32,  "__aeabi_fcmpun");
332    setCmpLibcallCC(RTLIB::UO_F32,  ISD::SETNE);
333    setLibcallName(RTLIB::O_F32,   "__aeabi_fcmpun");
334    setCmpLibcallCC(RTLIB::O_F32,   ISD::SETEQ);
335    setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS);
336    setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS);
337    setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS);
338    setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS);
339    setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS);
340    setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS);
341    setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS);
342    setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS);
343
344    // Floating-point to integer conversions.
345    // RTABI chapter 4.1.2, Table 6
346    setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz");
347    setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz");
348    setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz");
349    setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz");
350    setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz");
351    setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz");
352    setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz");
353    setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz");
354    setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS);
355    setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS);
356    setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS);
357    setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS);
358    setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS);
359    setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS);
360    setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS);
361    setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS);
362
363    // Conversions between floating types.
364    // RTABI chapter 4.1.2, Table 7
365    setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
366    setLibcallName(RTLIB::FPEXT_F32_F64,   "__aeabi_f2d");
367    setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
368    setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
369
370    // Integer to floating-point conversions.
371    // RTABI chapter 4.1.2, Table 8
372    setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d");
373    setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d");
374    setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d");
375    setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d");
376    setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f");
377    setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f");
378    setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f");
379    setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f");
380    setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
381    setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS);
382    setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
383    setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS);
384    setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
385    setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS);
386    setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
387    setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS);
388
389    // Long long helper functions
390    // RTABI chapter 4.2, Table 9
391    setLibcallName(RTLIB::MUL_I64,  "__aeabi_lmul");
392    setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
393    setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
394    setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
395    setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
396    setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
397    setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS);
398    setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
399    setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
400    setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS);
401    setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS);
402    setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS);
403
404    // Integer division functions
405    // RTABI chapter 4.3.1
406    setLibcallName(RTLIB::SDIV_I8,  "__aeabi_idiv");
407    setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
408    setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
409    setLibcallName(RTLIB::UDIV_I8,  "__aeabi_uidiv");
410    setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
411    setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
412    setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
413    setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
414    setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
415    setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
416    setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
417    setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
418
419    // Memory operations
420    // RTABI chapter 4.3.4
421    setLibcallName(RTLIB::MEMCPY,  "__aeabi_memcpy");
422    setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove");
423    setLibcallName(RTLIB::MEMSET,  "__aeabi_memset");
424  }
425
426  // Use divmod compiler-rt calls for iOS 5.0 and later.
427  if (Subtarget->getTargetTriple().getOS() == Triple::IOS &&
428      !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) {
429    setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
430    setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
431  }
432
433  if (Subtarget->isThumb1Only())
434    addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
435  else
436    addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
437  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
438    addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
439    if (!Subtarget->isFPOnlySP())
440      addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
441
442    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
443  }
444
445  if (Subtarget->hasNEON()) {
446    addDRTypeForNEON(MVT::v2f32);
447    addDRTypeForNEON(MVT::v8i8);
448    addDRTypeForNEON(MVT::v4i16);
449    addDRTypeForNEON(MVT::v2i32);
450    addDRTypeForNEON(MVT::v1i64);
451
452    addQRTypeForNEON(MVT::v4f32);
453    addQRTypeForNEON(MVT::v2f64);
454    addQRTypeForNEON(MVT::v16i8);
455    addQRTypeForNEON(MVT::v8i16);
456    addQRTypeForNEON(MVT::v4i32);
457    addQRTypeForNEON(MVT::v2i64);
458
459    // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
460    // neither Neon nor VFP support any arithmetic operations on it.
461    setOperationAction(ISD::FADD, MVT::v2f64, Expand);
462    setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
463    setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
464    setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
465    setOperationAction(ISD::FREM, MVT::v2f64, Expand);
466    setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
467    setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
468    setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
469    setOperationAction(ISD::FABS, MVT::v2f64, Expand);
470    setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
471    setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
472    setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
473    setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
474    setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
475    setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
476    setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
477    setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
478    setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
479    setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
480    setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
481    setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
482    setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
483    setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
484    setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
485
486    setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
487
488    // Neon does not support some operations on v1i64 and v2i64 types.
489    setOperationAction(ISD::MUL, MVT::v1i64, Expand);
490    // Custom handling for some quad-vector types to detect VMULL.
491    setOperationAction(ISD::MUL, MVT::v8i16, Custom);
492    setOperationAction(ISD::MUL, MVT::v4i32, Custom);
493    setOperationAction(ISD::MUL, MVT::v2i64, Custom);
494    // Custom handling for some vector types to avoid expensive expansions
495    setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
496    setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
497    setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
498    setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
499    setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
500    setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
501    // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
502    // a destination type that is wider than the source.
503    setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
504    setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
505
506    setTargetDAGCombine(ISD::INTRINSIC_VOID);
507    setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
508    setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
509    setTargetDAGCombine(ISD::SHL);
510    setTargetDAGCombine(ISD::SRL);
511    setTargetDAGCombine(ISD::SRA);
512    setTargetDAGCombine(ISD::SIGN_EXTEND);
513    setTargetDAGCombine(ISD::ZERO_EXTEND);
514    setTargetDAGCombine(ISD::ANY_EXTEND);
515    setTargetDAGCombine(ISD::SELECT_CC);
516    setTargetDAGCombine(ISD::BUILD_VECTOR);
517    setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
518    setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
519    setTargetDAGCombine(ISD::STORE);
520    setTargetDAGCombine(ISD::FP_TO_SINT);
521    setTargetDAGCombine(ISD::FP_TO_UINT);
522    setTargetDAGCombine(ISD::FDIV);
523
524    setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
525  }
526
527  computeRegisterProperties();
528
529  // ARM does not have f32 extending load.
530  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
531
532  // ARM does not have i1 sign extending load.
533  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
534
535  // ARM supports all 4 flavors of integer indexed load / store.
536  if (!Subtarget->isThumb1Only()) {
537    for (unsigned im = (unsigned)ISD::PRE_INC;
538         im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
539      setIndexedLoadAction(im,  MVT::i1,  Legal);
540      setIndexedLoadAction(im,  MVT::i8,  Legal);
541      setIndexedLoadAction(im,  MVT::i16, Legal);
542      setIndexedLoadAction(im,  MVT::i32, Legal);
543      setIndexedStoreAction(im, MVT::i1,  Legal);
544      setIndexedStoreAction(im, MVT::i8,  Legal);
545      setIndexedStoreAction(im, MVT::i16, Legal);
546      setIndexedStoreAction(im, MVT::i32, Legal);
547    }
548  }
549
550  // i64 operation support.
551  setOperationAction(ISD::MUL,     MVT::i64, Expand);
552  setOperationAction(ISD::MULHU,   MVT::i32, Expand);
553  if (Subtarget->isThumb1Only()) {
554    setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
555    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
556  }
557  if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
558      || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
559    setOperationAction(ISD::MULHS, MVT::i32, Expand);
560
561  setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
562  setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
563  setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
564  setOperationAction(ISD::SRL,       MVT::i64, Custom);
565  setOperationAction(ISD::SRA,       MVT::i64, Custom);
566
567  if (!Subtarget->isThumb1Only()) {
568    // FIXME: We should do this for Thumb1 as well.
569    setOperationAction(ISD::ADDC,    MVT::i32, Custom);
570    setOperationAction(ISD::ADDE,    MVT::i32, Custom);
571    setOperationAction(ISD::SUBC,    MVT::i32, Custom);
572    setOperationAction(ISD::SUBE,    MVT::i32, Custom);
573  }
574
575  // ARM does not have ROTL.
576  setOperationAction(ISD::ROTL,  MVT::i32, Expand);
577  setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
578  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
579  if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
580    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
581
582  // Only ARMv6 has BSWAP.
583  if (!Subtarget->hasV6Ops())
584    setOperationAction(ISD::BSWAP, MVT::i32, Expand);
585
586  // These are expanded into libcalls.
587  if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) {
588    // v7M has a hardware divider
589    setOperationAction(ISD::SDIV,  MVT::i32, Expand);
590    setOperationAction(ISD::UDIV,  MVT::i32, Expand);
591  }
592  setOperationAction(ISD::SREM,  MVT::i32, Expand);
593  setOperationAction(ISD::UREM,  MVT::i32, Expand);
594  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
595  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
596
597  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
598  setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
599  setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
600  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
601  setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
602
603  setOperationAction(ISD::TRAP, MVT::Other, Legal);
604
605  // Use the default implementation.
606  setOperationAction(ISD::VASTART,            MVT::Other, Custom);
607  setOperationAction(ISD::VAARG,              MVT::Other, Expand);
608  setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
609  setOperationAction(ISD::VAEND,              MVT::Other, Expand);
610  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
611  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
612  setOperationAction(ISD::EHSELECTION,        MVT::i32,   Expand);
613  setOperationAction(ISD::EXCEPTIONADDR,      MVT::i32,   Expand);
614  setExceptionPointerRegister(ARM::R0);
615  setExceptionSelectorRegister(ARM::R1);
616
617  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
618  // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
619  // the default expansion.
620  // FIXME: This should be checking for v6k, not just v6.
621  if (Subtarget->hasDataBarrier() ||
622      (Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
623    // membarrier needs custom lowering; the rest are legal and handled
624    // normally.
625    setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
626    setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
627    // Custom lowering for 64-bit ops
628    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i64, Custom);
629    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i64, Custom);
630    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i64, Custom);
631    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i64, Custom);
632    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i64, Custom);
633    setOperationAction(ISD::ATOMIC_SWAP,  MVT::i64, Custom);
634    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
635    // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
636    setInsertFencesForAtomic(true);
637  } else {
638    // Set them all for expansion, which will force libcalls.
639    setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
640    setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other, Expand);
641    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
642    setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
643    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
644    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
645    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
646    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
647    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
648    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
649    setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
650    setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
651    setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
652    setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
653    // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
654    // Unordered/Monotonic case.
655    setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
656    setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
657    // Since the libcalls include locking, fold in the fences
658    setShouldFoldAtomicFences(true);
659  }
660
661  setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
662
663  // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
664  if (!Subtarget->hasV6Ops()) {
665    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
666    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
667  }
668  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
669
670  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
671    // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
672    // iff target supports vfp2.
673    setOperationAction(ISD::BITCAST, MVT::i64, Custom);
674    setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
675  }
676
677  // We want to custom lower some of our intrinsics.
678  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
679  if (Subtarget->isTargetDarwin()) {
680    setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
681    setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
682    setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom);
683    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
684  }
685
686  setOperationAction(ISD::SETCC,     MVT::i32, Expand);
687  setOperationAction(ISD::SETCC,     MVT::f32, Expand);
688  setOperationAction(ISD::SETCC,     MVT::f64, Expand);
689  setOperationAction(ISD::SELECT,    MVT::i32, Custom);
690  setOperationAction(ISD::SELECT,    MVT::f32, Custom);
691  setOperationAction(ISD::SELECT,    MVT::f64, Custom);
692  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
693  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
694  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
695
696  setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
697  setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
698  setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
699  setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
700  setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
701
702  // We don't support sin/cos/fmod/copysign/pow
703  setOperationAction(ISD::FSIN,      MVT::f64, Expand);
704  setOperationAction(ISD::FSIN,      MVT::f32, Expand);
705  setOperationAction(ISD::FCOS,      MVT::f32, Expand);
706  setOperationAction(ISD::FCOS,      MVT::f64, Expand);
707  setOperationAction(ISD::FREM,      MVT::f64, Expand);
708  setOperationAction(ISD::FREM,      MVT::f32, Expand);
709  if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
710    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
711    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
712  }
713  setOperationAction(ISD::FPOW,      MVT::f64, Expand);
714  setOperationAction(ISD::FPOW,      MVT::f32, Expand);
715
716  setOperationAction(ISD::FMA, MVT::f64, Expand);
717  setOperationAction(ISD::FMA, MVT::f32, Expand);
718
719  // Various VFP goodness
720  if (!UseSoftFloat && !Subtarget->isThumb1Only()) {
721    // int <-> fp are custom expanded into bit_convert + ARMISD ops.
722    if (Subtarget->hasVFP2()) {
723      setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
724      setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
725      setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
726      setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
727    }
728    // Special handling for half-precision FP.
729    if (!Subtarget->hasFP16()) {
730      setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand);
731      setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand);
732    }
733  }
734
735  // We have target-specific dag combine patterns for the following nodes:
736  // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
737  setTargetDAGCombine(ISD::ADD);
738  setTargetDAGCombine(ISD::SUB);
739  setTargetDAGCombine(ISD::MUL);
740
741  if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON())
742    setTargetDAGCombine(ISD::OR);
743  if (Subtarget->hasNEON())
744    setTargetDAGCombine(ISD::AND);
745
746  setStackPointerRegisterToSaveRestore(ARM::SP);
747
748  if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2())
749    setSchedulingPreference(Sched::RegPressure);
750  else
751    setSchedulingPreference(Sched::Hybrid);
752
753  //// temporary - rewrite interface to use type
754  maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1;
755  maxStoresPerMemset = 16;
756  maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
757
758  // On ARM arguments smaller than 4 bytes are extended, so all arguments
759  // are at least 4 bytes aligned.
760  setMinStackArgumentAlignment(4);
761
762  benefitFromCodePlacementOpt = true;
763
764  setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
765}
766
767// FIXME: It might make sense to define the representative register class as the
768// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
769// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
770// SPR's representative would be DPR_VFP2. This should work well if register
771// pressure tracking were modified such that a register use would increment the
772// pressure of the register class's representative and all of it's super
773// classes' representatives transitively. We have not implemented this because
774// of the difficulty prior to coalescing of modeling operand register classes
775// due to the common occurrence of cross class copies and subregister insertions
776// and extractions.
777std::pair<const TargetRegisterClass*, uint8_t>
778ARMTargetLowering::findRepresentativeClass(EVT VT) const{
779  const TargetRegisterClass *RRC = 0;
780  uint8_t Cost = 1;
781  switch (VT.getSimpleVT().SimpleTy) {
782  default:
783    return TargetLowering::findRepresentativeClass(VT);
784  // Use DPR as representative register class for all floating point
785  // and vector types. Since there are 32 SPR registers and 32 DPR registers so
786  // the cost is 1 for both f32 and f64.
787  case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
788  case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
789    RRC = ARM::DPRRegisterClass;
790    // When NEON is used for SP, only half of the register file is available
791    // because operations that define both SP and DP results will be constrained
792    // to the VFP2 class (D0-D15). We currently model this constraint prior to
793    // coalescing by double-counting the SP regs. See the FIXME above.
794    if (Subtarget->useNEONForSinglePrecisionFP())
795      Cost = 2;
796    break;
797  case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
798  case MVT::v4f32: case MVT::v2f64:
799    RRC = ARM::DPRRegisterClass;
800    Cost = 2;
801    break;
802  case MVT::v4i64:
803    RRC = ARM::DPRRegisterClass;
804    Cost = 4;
805    break;
806  case MVT::v8i64:
807    RRC = ARM::DPRRegisterClass;
808    Cost = 8;
809    break;
810  }
811  return std::make_pair(RRC, Cost);
812}
813
814const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
815  switch (Opcode) {
816  default: return 0;
817  case ARMISD::Wrapper:       return "ARMISD::Wrapper";
818  case ARMISD::WrapperDYN:    return "ARMISD::WrapperDYN";
819  case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
820  case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
821  case ARMISD::CALL:          return "ARMISD::CALL";
822  case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
823  case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
824  case ARMISD::tCALL:         return "ARMISD::tCALL";
825  case ARMISD::BRCOND:        return "ARMISD::BRCOND";
826  case ARMISD::BR_JT:         return "ARMISD::BR_JT";
827  case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
828  case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
829  case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
830  case ARMISD::CMP:           return "ARMISD::CMP";
831  case ARMISD::CMPZ:          return "ARMISD::CMPZ";
832  case ARMISD::CMPFP:         return "ARMISD::CMPFP";
833  case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
834  case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
835  case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
836  case ARMISD::CMOV:          return "ARMISD::CMOV";
837
838  case ARMISD::RBIT:          return "ARMISD::RBIT";
839
840  case ARMISD::FTOSI:         return "ARMISD::FTOSI";
841  case ARMISD::FTOUI:         return "ARMISD::FTOUI";
842  case ARMISD::SITOF:         return "ARMISD::SITOF";
843  case ARMISD::UITOF:         return "ARMISD::UITOF";
844
845  case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
846  case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
847  case ARMISD::RRX:           return "ARMISD::RRX";
848
849  case ARMISD::ADDC:          return "ARMISD::ADDC";
850  case ARMISD::ADDE:          return "ARMISD::ADDE";
851  case ARMISD::SUBC:          return "ARMISD::SUBC";
852  case ARMISD::SUBE:          return "ARMISD::SUBE";
853
854  case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
855  case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
856
857  case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
858  case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
859  case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP";
860
861  case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
862
863  case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
864
865  case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
866
867  case ARMISD::MEMBARRIER:    return "ARMISD::MEMBARRIER";
868  case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
869
870  case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
871
872  case ARMISD::VCEQ:          return "ARMISD::VCEQ";
873  case ARMISD::VCEQZ:         return "ARMISD::VCEQZ";
874  case ARMISD::VCGE:          return "ARMISD::VCGE";
875  case ARMISD::VCGEZ:         return "ARMISD::VCGEZ";
876  case ARMISD::VCLEZ:         return "ARMISD::VCLEZ";
877  case ARMISD::VCGEU:         return "ARMISD::VCGEU";
878  case ARMISD::VCGT:          return "ARMISD::VCGT";
879  case ARMISD::VCGTZ:         return "ARMISD::VCGTZ";
880  case ARMISD::VCLTZ:         return "ARMISD::VCLTZ";
881  case ARMISD::VCGTU:         return "ARMISD::VCGTU";
882  case ARMISD::VTST:          return "ARMISD::VTST";
883
884  case ARMISD::VSHL:          return "ARMISD::VSHL";
885  case ARMISD::VSHRs:         return "ARMISD::VSHRs";
886  case ARMISD::VSHRu:         return "ARMISD::VSHRu";
887  case ARMISD::VSHLLs:        return "ARMISD::VSHLLs";
888  case ARMISD::VSHLLu:        return "ARMISD::VSHLLu";
889  case ARMISD::VSHLLi:        return "ARMISD::VSHLLi";
890  case ARMISD::VSHRN:         return "ARMISD::VSHRN";
891  case ARMISD::VRSHRs:        return "ARMISD::VRSHRs";
892  case ARMISD::VRSHRu:        return "ARMISD::VRSHRu";
893  case ARMISD::VRSHRN:        return "ARMISD::VRSHRN";
894  case ARMISD::VQSHLs:        return "ARMISD::VQSHLs";
895  case ARMISD::VQSHLu:        return "ARMISD::VQSHLu";
896  case ARMISD::VQSHLsu:       return "ARMISD::VQSHLsu";
897  case ARMISD::VQSHRNs:       return "ARMISD::VQSHRNs";
898  case ARMISD::VQSHRNu:       return "ARMISD::VQSHRNu";
899  case ARMISD::VQSHRNsu:      return "ARMISD::VQSHRNsu";
900  case ARMISD::VQRSHRNs:      return "ARMISD::VQRSHRNs";
901  case ARMISD::VQRSHRNu:      return "ARMISD::VQRSHRNu";
902  case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
903  case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
904  case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
905  case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
906  case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
907  case ARMISD::VDUP:          return "ARMISD::VDUP";
908  case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
909  case ARMISD::VEXT:          return "ARMISD::VEXT";
910  case ARMISD::VREV64:        return "ARMISD::VREV64";
911  case ARMISD::VREV32:        return "ARMISD::VREV32";
912  case ARMISD::VREV16:        return "ARMISD::VREV16";
913  case ARMISD::VZIP:          return "ARMISD::VZIP";
914  case ARMISD::VUZP:          return "ARMISD::VUZP";
915  case ARMISD::VTRN:          return "ARMISD::VTRN";
916  case ARMISD::VTBL1:         return "ARMISD::VTBL1";
917  case ARMISD::VTBL2:         return "ARMISD::VTBL2";
918  case ARMISD::VMULLs:        return "ARMISD::VMULLs";
919  case ARMISD::VMULLu:        return "ARMISD::VMULLu";
920  case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
921  case ARMISD::FMAX:          return "ARMISD::FMAX";
922  case ARMISD::FMIN:          return "ARMISD::FMIN";
923  case ARMISD::BFI:           return "ARMISD::BFI";
924  case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
925  case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
926  case ARMISD::VBSL:          return "ARMISD::VBSL";
927  case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
928  case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
929  case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
930  case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
931  case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
932  case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
933  case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
934  case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
935  case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
936  case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
937  case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
938  case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
939  case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
940  case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
941  case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
942  case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
943  case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
944  case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
945  case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
946  case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
947  }
948}
949
950EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
951  if (!VT.isVector()) return getPointerTy();
952  return VT.changeVectorElementTypeToInteger();
953}
954
955/// getRegClassFor - Return the register class that should be used for the
956/// specified value type.
957TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
958  // Map v4i64 to QQ registers but do not make the type legal. Similarly map
959  // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
960  // load / store 4 to 8 consecutive D registers.
961  if (Subtarget->hasNEON()) {
962    if (VT == MVT::v4i64)
963      return ARM::QQPRRegisterClass;
964    else if (VT == MVT::v8i64)
965      return ARM::QQQQPRRegisterClass;
966  }
967  return TargetLowering::getRegClassFor(VT);
968}
969
970// Create a fast isel object.
971FastISel *
972ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
973  return ARM::createFastISel(funcInfo);
974}
975
976/// getMaximalGlobalOffset - Returns the maximal possible offset which can
977/// be used for loads / stores from the global.
978unsigned ARMTargetLowering::getMaximalGlobalOffset() const {
979  return (Subtarget->isThumb1Only() ? 127 : 4095);
980}
981
982Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
983  unsigned NumVals = N->getNumValues();
984  if (!NumVals)
985    return Sched::RegPressure;
986
987  for (unsigned i = 0; i != NumVals; ++i) {
988    EVT VT = N->getValueType(i);
989    if (VT == MVT::Glue || VT == MVT::Other)
990      continue;
991    if (VT.isFloatingPoint() || VT.isVector())
992      return Sched::ILP;
993  }
994
995  if (!N->isMachineOpcode())
996    return Sched::RegPressure;
997
998  // Load are scheduled for latency even if there instruction itinerary
999  // is not available.
1000  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1001  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1002
1003  if (MCID.getNumDefs() == 0)
1004    return Sched::RegPressure;
1005  if (!Itins->isEmpty() &&
1006      Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1007    return Sched::ILP;
1008
1009  return Sched::RegPressure;
1010}
1011
1012//===----------------------------------------------------------------------===//
1013// Lowering Code
1014//===----------------------------------------------------------------------===//
1015
1016/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1017static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1018  switch (CC) {
1019  default: llvm_unreachable("Unknown condition code!");
1020  case ISD::SETNE:  return ARMCC::NE;
1021  case ISD::SETEQ:  return ARMCC::EQ;
1022  case ISD::SETGT:  return ARMCC::GT;
1023  case ISD::SETGE:  return ARMCC::GE;
1024  case ISD::SETLT:  return ARMCC::LT;
1025  case ISD::SETLE:  return ARMCC::LE;
1026  case ISD::SETUGT: return ARMCC::HI;
1027  case ISD::SETUGE: return ARMCC::HS;
1028  case ISD::SETULT: return ARMCC::LO;
1029  case ISD::SETULE: return ARMCC::LS;
1030  }
1031}
1032
1033/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1034static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1035                        ARMCC::CondCodes &CondCode2) {
1036  CondCode2 = ARMCC::AL;
1037  switch (CC) {
1038  default: llvm_unreachable("Unknown FP condition!");
1039  case ISD::SETEQ:
1040  case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1041  case ISD::SETGT:
1042  case ISD::SETOGT: CondCode = ARMCC::GT; break;
1043  case ISD::SETGE:
1044  case ISD::SETOGE: CondCode = ARMCC::GE; break;
1045  case ISD::SETOLT: CondCode = ARMCC::MI; break;
1046  case ISD::SETOLE: CondCode = ARMCC::LS; break;
1047  case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1048  case ISD::SETO:   CondCode = ARMCC::VC; break;
1049  case ISD::SETUO:  CondCode = ARMCC::VS; break;
1050  case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1051  case ISD::SETUGT: CondCode = ARMCC::HI; break;
1052  case ISD::SETUGE: CondCode = ARMCC::PL; break;
1053  case ISD::SETLT:
1054  case ISD::SETULT: CondCode = ARMCC::LT; break;
1055  case ISD::SETLE:
1056  case ISD::SETULE: CondCode = ARMCC::LE; break;
1057  case ISD::SETNE:
1058  case ISD::SETUNE: CondCode = ARMCC::NE; break;
1059  }
1060}
1061
1062//===----------------------------------------------------------------------===//
1063//                      Calling Convention Implementation
1064//===----------------------------------------------------------------------===//
1065
1066#include "ARMGenCallingConv.inc"
1067
1068/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
1069/// given CallingConvention value.
1070CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1071                                                 bool Return,
1072                                                 bool isVarArg) const {
1073  switch (CC) {
1074  default:
1075    llvm_unreachable("Unsupported calling convention");
1076  case CallingConv::Fast:
1077    if (Subtarget->hasVFP2() && !isVarArg) {
1078      if (!Subtarget->isAAPCS_ABI())
1079        return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1080      // For AAPCS ABI targets, just use VFP variant of the calling convention.
1081      return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1082    }
1083    // Fallthrough
1084  case CallingConv::C: {
1085    // Use target triple & subtarget features to do actual dispatch.
1086    if (!Subtarget->isAAPCS_ABI())
1087      return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1088    else if (Subtarget->hasVFP2() &&
1089             FloatABIType == FloatABI::Hard && !isVarArg)
1090      return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1091    return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1092  }
1093  case CallingConv::ARM_AAPCS_VFP:
1094    return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1095  case CallingConv::ARM_AAPCS:
1096    return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1097  case CallingConv::ARM_APCS:
1098    return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1099  }
1100}
1101
1102/// LowerCallResult - Lower the result values of a call into the
1103/// appropriate copies out of appropriate physical registers.
1104SDValue
1105ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1106                                   CallingConv::ID CallConv, bool isVarArg,
1107                                   const SmallVectorImpl<ISD::InputArg> &Ins,
1108                                   DebugLoc dl, SelectionDAG &DAG,
1109                                   SmallVectorImpl<SDValue> &InVals) const {
1110
1111  // Assign locations to each value returned by this call.
1112  SmallVector<CCValAssign, 16> RVLocs;
1113  ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1114                    getTargetMachine(), RVLocs, *DAG.getContext(), Call);
1115  CCInfo.AnalyzeCallResult(Ins,
1116                           CCAssignFnForNode(CallConv, /* Return*/ true,
1117                                             isVarArg));
1118
1119  // Copy all of the result registers out of their specified physreg.
1120  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1121    CCValAssign VA = RVLocs[i];
1122
1123    SDValue Val;
1124    if (VA.needsCustom()) {
1125      // Handle f64 or half of a v2f64.
1126      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1127                                      InFlag);
1128      Chain = Lo.getValue(1);
1129      InFlag = Lo.getValue(2);
1130      VA = RVLocs[++i]; // skip ahead to next loc
1131      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1132                                      InFlag);
1133      Chain = Hi.getValue(1);
1134      InFlag = Hi.getValue(2);
1135      Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1136
1137      if (VA.getLocVT() == MVT::v2f64) {
1138        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1139        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1140                          DAG.getConstant(0, MVT::i32));
1141
1142        VA = RVLocs[++i]; // skip ahead to next loc
1143        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1144        Chain = Lo.getValue(1);
1145        InFlag = Lo.getValue(2);
1146        VA = RVLocs[++i]; // skip ahead to next loc
1147        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1148        Chain = Hi.getValue(1);
1149        InFlag = Hi.getValue(2);
1150        Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1151        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1152                          DAG.getConstant(1, MVT::i32));
1153      }
1154    } else {
1155      Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1156                               InFlag);
1157      Chain = Val.getValue(1);
1158      InFlag = Val.getValue(2);
1159    }
1160
1161    switch (VA.getLocInfo()) {
1162    default: llvm_unreachable("Unknown loc info!");
1163    case CCValAssign::Full: break;
1164    case CCValAssign::BCvt:
1165      Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1166      break;
1167    }
1168
1169    InVals.push_back(Val);
1170  }
1171
1172  return Chain;
1173}
1174
1175/// LowerMemOpCallTo - Store the argument to the stack.
1176SDValue
1177ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
1178                                    SDValue StackPtr, SDValue Arg,
1179                                    DebugLoc dl, SelectionDAG &DAG,
1180                                    const CCValAssign &VA,
1181                                    ISD::ArgFlagsTy Flags) const {
1182  unsigned LocMemOffset = VA.getLocMemOffset();
1183  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
1184  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1185  return DAG.getStore(Chain, dl, Arg, PtrOff,
1186                      MachinePointerInfo::getStack(LocMemOffset),
1187                      false, false, 0);
1188}
1189
1190void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
1191                                         SDValue Chain, SDValue &Arg,
1192                                         RegsToPassVector &RegsToPass,
1193                                         CCValAssign &VA, CCValAssign &NextVA,
1194                                         SDValue &StackPtr,
1195                                         SmallVector<SDValue, 8> &MemOpChains,
1196                                         ISD::ArgFlagsTy Flags) const {
1197
1198  SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1199                              DAG.getVTList(MVT::i32, MVT::i32), Arg);
1200  RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
1201
1202  if (NextVA.isRegLoc())
1203    RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
1204  else {
1205    assert(NextVA.isMemLoc());
1206    if (StackPtr.getNode() == 0)
1207      StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1208
1209    MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
1210                                           dl, DAG, NextVA,
1211                                           Flags));
1212  }
1213}
1214
1215/// LowerCall - Lowering a call into a callseq_start <-
1216/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1217/// nodes.
1218SDValue
1219ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
1220                             CallingConv::ID CallConv, bool isVarArg,
1221                             bool &isTailCall,
1222                             const SmallVectorImpl<ISD::OutputArg> &Outs,
1223                             const SmallVectorImpl<SDValue> &OutVals,
1224                             const SmallVectorImpl<ISD::InputArg> &Ins,
1225                             DebugLoc dl, SelectionDAG &DAG,
1226                             SmallVectorImpl<SDValue> &InVals) const {
1227  MachineFunction &MF = DAG.getMachineFunction();
1228  bool IsStructRet    = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1229  bool IsSibCall = false;
1230  // Disable tail calls if they're not supported.
1231  if (!EnableARMTailCalls && !Subtarget->supportsTailCall())
1232    isTailCall = false;
1233  if (isTailCall) {
1234    // Check if it's really possible to do a tail call.
1235    isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1236                    isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1237                                                   Outs, OutVals, Ins, DAG);
1238    // We don't support GuaranteedTailCallOpt for ARM, only automatically
1239    // detected sibcalls.
1240    if (isTailCall) {
1241      ++NumTailCalls;
1242      IsSibCall = true;
1243    }
1244  }
1245
1246  // Analyze operands of the call, assigning locations to each operand.
1247  SmallVector<CCValAssign, 16> ArgLocs;
1248  ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1249                 getTargetMachine(), ArgLocs, *DAG.getContext(), Call);
1250  CCInfo.AnalyzeCallOperands(Outs,
1251                             CCAssignFnForNode(CallConv, /* Return*/ false,
1252                                               isVarArg));
1253
1254  // Get a count of how many bytes are to be pushed on the stack.
1255  unsigned NumBytes = CCInfo.getNextStackOffset();
1256
1257  // For tail calls, memory operands are available in our caller's stack.
1258  if (IsSibCall)
1259    NumBytes = 0;
1260
1261  // Adjust the stack pointer for the new arguments...
1262  // These operations are automatically eliminated by the prolog/epilog pass
1263  if (!IsSibCall)
1264    Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
1265
1266  SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
1267
1268  RegsToPassVector RegsToPass;
1269  SmallVector<SDValue, 8> MemOpChains;
1270
1271  // Walk the register/memloc assignments, inserting copies/loads.  In the case
1272  // of tail call optimization, arguments are handled later.
1273  for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1274       i != e;
1275       ++i, ++realArgIdx) {
1276    CCValAssign &VA = ArgLocs[i];
1277    SDValue Arg = OutVals[realArgIdx];
1278    ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1279    bool isByVal = Flags.isByVal();
1280
1281    // Promote the value if needed.
1282    switch (VA.getLocInfo()) {
1283    default: llvm_unreachable("Unknown loc info!");
1284    case CCValAssign::Full: break;
1285    case CCValAssign::SExt:
1286      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1287      break;
1288    case CCValAssign::ZExt:
1289      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1290      break;
1291    case CCValAssign::AExt:
1292      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1293      break;
1294    case CCValAssign::BCvt:
1295      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1296      break;
1297    }
1298
1299    // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1300    if (VA.needsCustom()) {
1301      if (VA.getLocVT() == MVT::v2f64) {
1302        SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1303                                  DAG.getConstant(0, MVT::i32));
1304        SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1305                                  DAG.getConstant(1, MVT::i32));
1306
1307        PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1308                         VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1309
1310        VA = ArgLocs[++i]; // skip ahead to next loc
1311        if (VA.isRegLoc()) {
1312          PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1313                           VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1314        } else {
1315          assert(VA.isMemLoc());
1316
1317          MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1318                                                 dl, DAG, VA, Flags));
1319        }
1320      } else {
1321        PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1322                         StackPtr, MemOpChains, Flags);
1323      }
1324    } else if (VA.isRegLoc()) {
1325      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1326    } else if (isByVal) {
1327      assert(VA.isMemLoc());
1328      unsigned offset = 0;
1329
1330      // True if this byval aggregate will be split between registers
1331      // and memory.
1332      if (CCInfo.isFirstByValRegValid()) {
1333        EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1334        unsigned int i, j;
1335        for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) {
1336          SDValue Const = DAG.getConstant(4*i, MVT::i32);
1337          SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1338          SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1339                                     MachinePointerInfo(),
1340                                     false, false, 0);
1341          MemOpChains.push_back(Load.getValue(1));
1342          RegsToPass.push_back(std::make_pair(j, Load));
1343        }
1344        offset = ARM::R4 - CCInfo.getFirstByValReg();
1345        CCInfo.clearFirstByValReg();
1346      }
1347
1348      unsigned LocMemOffset = VA.getLocMemOffset();
1349      SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
1350      SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
1351                                StkPtrOff);
1352      SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
1353      SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
1354      SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
1355                                         MVT::i32);
1356      // TODO: Disable AlwaysInline when it becomes possible
1357      //       to emit a nested call sequence.
1358      MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
1359                                          Flags.getByValAlign(),
1360                                          /*isVolatile=*/false,
1361                                          /*AlwaysInline=*/true,
1362                                          MachinePointerInfo(0),
1363                                          MachinePointerInfo(0)));
1364
1365    } else if (!IsSibCall) {
1366      assert(VA.isMemLoc());
1367
1368      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1369                                             dl, DAG, VA, Flags));
1370    }
1371  }
1372
1373  if (!MemOpChains.empty())
1374    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1375                        &MemOpChains[0], MemOpChains.size());
1376
1377  // Build a sequence of copy-to-reg nodes chained together with token chain
1378  // and flag operands which copy the outgoing args into the appropriate regs.
1379  SDValue InFlag;
1380  // Tail call byval lowering might overwrite argument registers so in case of
1381  // tail call optimization the copies to registers are lowered later.
1382  if (!isTailCall)
1383    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1384      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1385                               RegsToPass[i].second, InFlag);
1386      InFlag = Chain.getValue(1);
1387    }
1388
1389  // For tail calls lower the arguments to the 'real' stack slot.
1390  if (isTailCall) {
1391    // Force all the incoming stack arguments to be loaded from the stack
1392    // before any new outgoing arguments are stored to the stack, because the
1393    // outgoing stack slots may alias the incoming argument stack slots, and
1394    // the alias isn't otherwise explicit. This is slightly more conservative
1395    // than necessary, because it means that each store effectively depends
1396    // on every argument instead of just those arguments it would clobber.
1397
1398    // Do not flag preceding copytoreg stuff together with the following stuff.
1399    InFlag = SDValue();
1400    for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1401      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1402                               RegsToPass[i].second, InFlag);
1403      InFlag = Chain.getValue(1);
1404    }
1405    InFlag =SDValue();
1406  }
1407
1408  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1409  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1410  // node so that legalize doesn't hack it.
1411  bool isDirect = false;
1412  bool isARMFunc = false;
1413  bool isLocalARMFunc = false;
1414  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1415
1416  if (EnableARMLongCalls) {
1417    assert (getTargetMachine().getRelocationModel() == Reloc::Static
1418            && "long-calls with non-static relocation model!");
1419    // Handle a global address or an external symbol. If it's not one of
1420    // those, the target's already in a register, so we don't need to do
1421    // anything extra.
1422    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1423      const GlobalValue *GV = G->getGlobal();
1424      // Create a constant pool entry for the callee address
1425      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1426      ARMConstantPoolValue *CPV =
1427        ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
1428
1429      // Get the address of the callee into a register
1430      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1431      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1432      Callee = DAG.getLoad(getPointerTy(), dl,
1433                           DAG.getEntryNode(), CPAddr,
1434                           MachinePointerInfo::getConstantPool(),
1435                           false, false, 0);
1436    } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
1437      const char *Sym = S->getSymbol();
1438
1439      // Create a constant pool entry for the callee address
1440      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1441      ARMConstantPoolValue *CPV =
1442        ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
1443                                      ARMPCLabelIndex, 0);
1444      // Get the address of the callee into a register
1445      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1446      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1447      Callee = DAG.getLoad(getPointerTy(), dl,
1448                           DAG.getEntryNode(), CPAddr,
1449                           MachinePointerInfo::getConstantPool(),
1450                           false, false, 0);
1451    }
1452  } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1453    const GlobalValue *GV = G->getGlobal();
1454    isDirect = true;
1455    bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
1456    bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
1457                   getTargetMachine().getRelocationModel() != Reloc::Static;
1458    isARMFunc = !Subtarget->isThumb() || isStub;
1459    // ARM call to a local ARM function is predicable.
1460    isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
1461    // tBX takes a register source operand.
1462    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1463      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1464      ARMConstantPoolValue *CPV =
1465        ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4);
1466      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1467      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1468      Callee = DAG.getLoad(getPointerTy(), dl,
1469                           DAG.getEntryNode(), CPAddr,
1470                           MachinePointerInfo::getConstantPool(),
1471                           false, false, 0);
1472      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1473      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1474                           getPointerTy(), Callee, PICLabel);
1475    } else {
1476      // On ELF targets for PIC code, direct calls should go through the PLT
1477      unsigned OpFlags = 0;
1478      if (Subtarget->isTargetELF() &&
1479                  getTargetMachine().getRelocationModel() == Reloc::PIC_)
1480        OpFlags = ARMII::MO_PLT;
1481      Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
1482    }
1483  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1484    isDirect = true;
1485    bool isStub = Subtarget->isTargetDarwin() &&
1486                  getTargetMachine().getRelocationModel() != Reloc::Static;
1487    isARMFunc = !Subtarget->isThumb() || isStub;
1488    // tBX takes a register source operand.
1489    const char *Sym = S->getSymbol();
1490    if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
1491      unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1492      ARMConstantPoolValue *CPV =
1493        ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
1494                                      ARMPCLabelIndex, 4);
1495      SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
1496      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
1497      Callee = DAG.getLoad(getPointerTy(), dl,
1498                           DAG.getEntryNode(), CPAddr,
1499                           MachinePointerInfo::getConstantPool(),
1500                           false, false, 0);
1501      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1502      Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
1503                           getPointerTy(), Callee, PICLabel);
1504    } else {
1505      unsigned OpFlags = 0;
1506      // On ELF targets for PIC code, direct calls should go through the PLT
1507      if (Subtarget->isTargetELF() &&
1508                  getTargetMachine().getRelocationModel() == Reloc::PIC_)
1509        OpFlags = ARMII::MO_PLT;
1510      Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags);
1511    }
1512  }
1513
1514  // FIXME: handle tail calls differently.
1515  unsigned CallOpc;
1516  if (Subtarget->isThumb()) {
1517    if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
1518      CallOpc = ARMISD::CALL_NOLINK;
1519    else
1520      CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
1521  } else {
1522    CallOpc = (isDirect || Subtarget->hasV5TOps())
1523      ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
1524      : ARMISD::CALL_NOLINK;
1525  }
1526
1527  std::vector<SDValue> Ops;
1528  Ops.push_back(Chain);
1529  Ops.push_back(Callee);
1530
1531  // Add argument registers to the end of the list so that they are known live
1532  // into the call.
1533  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1534    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1535                                  RegsToPass[i].second.getValueType()));
1536
1537  if (InFlag.getNode())
1538    Ops.push_back(InFlag);
1539
1540  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1541  if (isTailCall)
1542    return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1543
1544  // Returns a chain and a flag for retval copy to use.
1545  Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
1546  InFlag = Chain.getValue(1);
1547
1548  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1549                             DAG.getIntPtrConstant(0, true), InFlag);
1550  if (!Ins.empty())
1551    InFlag = Chain.getValue(1);
1552
1553  // Handle result values, copying them out of physregs into vregs that we
1554  // return.
1555  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
1556                         dl, DAG, InVals);
1557}
1558
1559/// HandleByVal - Every parameter *after* a byval parameter is passed
1560/// on the stack.  Remember the next parameter register to allocate,
1561/// and then confiscate the rest of the parameter registers to insure
1562/// this.
1563void
1564llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
1565  unsigned reg = State->AllocateReg(GPRArgRegs, 4);
1566  assert((State->getCallOrPrologue() == Prologue ||
1567          State->getCallOrPrologue() == Call) &&
1568         "unhandled ParmContext");
1569  if ((!State->isFirstByValRegValid()) &&
1570      (ARM::R0 <= reg) && (reg <= ARM::R3)) {
1571    State->setFirstByValReg(reg);
1572    // At a call site, a byval parameter that is split between
1573    // registers and memory needs its size truncated here.  In a
1574    // function prologue, such byval parameters are reassembled in
1575    // memory, and are not truncated.
1576    if (State->getCallOrPrologue() == Call) {
1577      unsigned excess = 4 * (ARM::R4 - reg);
1578      assert(size >= excess && "expected larger existing stack allocation");
1579      size -= excess;
1580    }
1581  }
1582  // Confiscate any remaining parameter registers to preclude their
1583  // assignment to subsequent parameters.
1584  while (State->AllocateReg(GPRArgRegs, 4))
1585    ;
1586}
1587
1588/// MatchingStackOffset - Return true if the given stack call argument is
1589/// already available in the same position (relatively) of the caller's
1590/// incoming argument stack.
1591static
1592bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
1593                         MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
1594                         const ARMInstrInfo *TII) {
1595  unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
1596  int FI = INT_MAX;
1597  if (Arg.getOpcode() == ISD::CopyFromReg) {
1598    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
1599    if (!TargetRegisterInfo::isVirtualRegister(VR))
1600      return false;
1601    MachineInstr *Def = MRI->getVRegDef(VR);
1602    if (!Def)
1603      return false;
1604    if (!Flags.isByVal()) {
1605      if (!TII->isLoadFromStackSlot(Def, FI))
1606        return false;
1607    } else {
1608      return false;
1609    }
1610  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
1611    if (Flags.isByVal())
1612      // ByVal argument is passed in as a pointer but it's now being
1613      // dereferenced. e.g.
1614      // define @foo(%struct.X* %A) {
1615      //   tail call @bar(%struct.X* byval %A)
1616      // }
1617      return false;
1618    SDValue Ptr = Ld->getBasePtr();
1619    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
1620    if (!FINode)
1621      return false;
1622    FI = FINode->getIndex();
1623  } else
1624    return false;
1625
1626  assert(FI != INT_MAX);
1627  if (!MFI->isFixedObjectIndex(FI))
1628    return false;
1629  return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
1630}
1631
1632/// IsEligibleForTailCallOptimization - Check whether the call is eligible
1633/// for tail call optimization. Targets which want to do tail call
1634/// optimization should implement this function.
1635bool
1636ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1637                                                     CallingConv::ID CalleeCC,
1638                                                     bool isVarArg,
1639                                                     bool isCalleeStructRet,
1640                                                     bool isCallerStructRet,
1641                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
1642                                    const SmallVectorImpl<SDValue> &OutVals,
1643                                    const SmallVectorImpl<ISD::InputArg> &Ins,
1644                                                     SelectionDAG& DAG) const {
1645  const Function *CallerF = DAG.getMachineFunction().getFunction();
1646  CallingConv::ID CallerCC = CallerF->getCallingConv();
1647  bool CCMatch = CallerCC == CalleeCC;
1648
1649  // Look for obvious safe cases to perform tail call optimization that do not
1650  // require ABI changes. This is what gcc calls sibcall.
1651
1652  // Do not sibcall optimize vararg calls unless the call site is not passing
1653  // any arguments.
1654  if (isVarArg && !Outs.empty())
1655    return false;
1656
1657  // Also avoid sibcall optimization if either caller or callee uses struct
1658  // return semantics.
1659  if (isCalleeStructRet || isCallerStructRet)
1660    return false;
1661
1662  // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
1663  // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
1664  // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
1665  // support in the assembler and linker to be used. This would need to be
1666  // fixed to fully support tail calls in Thumb1.
1667  //
1668  // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
1669  // LR.  This means if we need to reload LR, it takes an extra instructions,
1670  // which outweighs the value of the tail call; but here we don't know yet
1671  // whether LR is going to be used.  Probably the right approach is to
1672  // generate the tail call here and turn it back into CALL/RET in
1673  // emitEpilogue if LR is used.
1674
1675  // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
1676  // but we need to make sure there are enough registers; the only valid
1677  // registers are the 4 used for parameters.  We don't currently do this
1678  // case.
1679  if (Subtarget->isThumb1Only())
1680    return false;
1681
1682  // If the calling conventions do not match, then we'd better make sure the
1683  // results are returned in the same way as what the caller expects.
1684  if (!CCMatch) {
1685    SmallVector<CCValAssign, 16> RVLocs1;
1686    ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1687                       getTargetMachine(), RVLocs1, *DAG.getContext(), Call);
1688    CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
1689
1690    SmallVector<CCValAssign, 16> RVLocs2;
1691    ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1692                       getTargetMachine(), RVLocs2, *DAG.getContext(), Call);
1693    CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
1694
1695    if (RVLocs1.size() != RVLocs2.size())
1696      return false;
1697    for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1698      if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1699        return false;
1700      if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1701        return false;
1702      if (RVLocs1[i].isRegLoc()) {
1703        if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1704          return false;
1705      } else {
1706        if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1707          return false;
1708      }
1709    }
1710  }
1711
1712  // If the callee takes no arguments then go on to check the results of the
1713  // call.
1714  if (!Outs.empty()) {
1715    // Check if stack adjustment is needed. For now, do not do this if any
1716    // argument is passed on the stack.
1717    SmallVector<CCValAssign, 16> ArgLocs;
1718    ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(),
1719                      getTargetMachine(), ArgLocs, *DAG.getContext(), Call);
1720    CCInfo.AnalyzeCallOperands(Outs,
1721                               CCAssignFnForNode(CalleeCC, false, isVarArg));
1722    if (CCInfo.getNextStackOffset()) {
1723      MachineFunction &MF = DAG.getMachineFunction();
1724
1725      // Check if the arguments are already laid out in the right way as
1726      // the caller's fixed stack objects.
1727      MachineFrameInfo *MFI = MF.getFrameInfo();
1728      const MachineRegisterInfo *MRI = &MF.getRegInfo();
1729      const ARMInstrInfo *TII =
1730        ((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
1731      for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1732           i != e;
1733           ++i, ++realArgIdx) {
1734        CCValAssign &VA = ArgLocs[i];
1735        EVT RegVT = VA.getLocVT();
1736        SDValue Arg = OutVals[realArgIdx];
1737        ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1738        if (VA.getLocInfo() == CCValAssign::Indirect)
1739          return false;
1740        if (VA.needsCustom()) {
1741          // f64 and vector types are split into multiple registers or
1742          // register/stack-slot combinations.  The types will not match
1743          // the registers; give up on memory f64 refs until we figure
1744          // out what to do about this.
1745          if (!VA.isRegLoc())
1746            return false;
1747          if (!ArgLocs[++i].isRegLoc())
1748            return false;
1749          if (RegVT == MVT::v2f64) {
1750            if (!ArgLocs[++i].isRegLoc())
1751              return false;
1752            if (!ArgLocs[++i].isRegLoc())
1753              return false;
1754          }
1755        } else if (!VA.isRegLoc()) {
1756          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
1757                                   MFI, MRI, TII))
1758            return false;
1759        }
1760      }
1761    }
1762  }
1763
1764  return true;
1765}
1766
1767SDValue
1768ARMTargetLowering::LowerReturn(SDValue Chain,
1769                               CallingConv::ID CallConv, bool isVarArg,
1770                               const SmallVectorImpl<ISD::OutputArg> &Outs,
1771                               const SmallVectorImpl<SDValue> &OutVals,
1772                               DebugLoc dl, SelectionDAG &DAG) const {
1773
1774  // CCValAssign - represent the assignment of the return value to a location.
1775  SmallVector<CCValAssign, 16> RVLocs;
1776
1777  // CCState - Info about the registers and stack slots.
1778  ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1779                    getTargetMachine(), RVLocs, *DAG.getContext(), Call);
1780
1781  // Analyze outgoing return values.
1782  CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
1783                                               isVarArg));
1784
1785  // If this is the first return lowered for this function, add
1786  // the regs to the liveout set for the function.
1787  if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1788    for (unsigned i = 0; i != RVLocs.size(); ++i)
1789      if (RVLocs[i].isRegLoc())
1790        DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1791  }
1792
1793  SDValue Flag;
1794
1795  // Copy the result values into the output registers.
1796  for (unsigned i = 0, realRVLocIdx = 0;
1797       i != RVLocs.size();
1798       ++i, ++realRVLocIdx) {
1799    CCValAssign &VA = RVLocs[i];
1800    assert(VA.isRegLoc() && "Can only return in registers!");
1801
1802    SDValue Arg = OutVals[realRVLocIdx];
1803
1804    switch (VA.getLocInfo()) {
1805    default: llvm_unreachable("Unknown loc info!");
1806    case CCValAssign::Full: break;
1807    case CCValAssign::BCvt:
1808      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1809      break;
1810    }
1811
1812    if (VA.needsCustom()) {
1813      if (VA.getLocVT() == MVT::v2f64) {
1814        // Extract the first half and return it in two registers.
1815        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1816                                   DAG.getConstant(0, MVT::i32));
1817        SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
1818                                       DAG.getVTList(MVT::i32, MVT::i32), Half);
1819
1820        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
1821        Flag = Chain.getValue(1);
1822        VA = RVLocs[++i]; // skip ahead to next loc
1823        Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1824                                 HalfGPRs.getValue(1), Flag);
1825        Flag = Chain.getValue(1);
1826        VA = RVLocs[++i]; // skip ahead to next loc
1827
1828        // Extract the 2nd half and fall through to handle it as an f64 value.
1829        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1830                          DAG.getConstant(1, MVT::i32));
1831      }
1832      // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
1833      // available.
1834      SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1835                                  DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
1836      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
1837      Flag = Chain.getValue(1);
1838      VA = RVLocs[++i]; // skip ahead to next loc
1839      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
1840                               Flag);
1841    } else
1842      Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1843
1844    // Guarantee that all emitted copies are
1845    // stuck together, avoiding something bad.
1846    Flag = Chain.getValue(1);
1847  }
1848
1849  SDValue result;
1850  if (Flag.getNode())
1851    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
1852  else // Return Void
1853    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
1854
1855  return result;
1856}
1857
1858bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
1859  if (N->getNumValues() != 1)
1860    return false;
1861  if (!N->hasNUsesOfValue(1, 0))
1862    return false;
1863
1864  unsigned NumCopies = 0;
1865  SDNode* Copies[2];
1866  SDNode *Use = *N->use_begin();
1867  if (Use->getOpcode() == ISD::CopyToReg) {
1868    Copies[NumCopies++] = Use;
1869  } else if (Use->getOpcode() == ARMISD::VMOVRRD) {
1870    // f64 returned in a pair of GPRs.
1871    for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end();
1872         UI != UE; ++UI) {
1873      if (UI->getOpcode() != ISD::CopyToReg)
1874        return false;
1875      Copies[UI.getUse().getResNo()] = *UI;
1876      ++NumCopies;
1877    }
1878  } else if (Use->getOpcode() == ISD::BITCAST) {
1879    // f32 returned in a single GPR.
1880    if (!Use->hasNUsesOfValue(1, 0))
1881      return false;
1882    Use = *Use->use_begin();
1883    if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0))
1884      return false;
1885    Copies[NumCopies++] = Use;
1886  } else {
1887    return false;
1888  }
1889
1890  if (NumCopies != 1 && NumCopies != 2)
1891    return false;
1892
1893  bool HasRet = false;
1894  for (unsigned i = 0; i < NumCopies; ++i) {
1895    SDNode *Copy = Copies[i];
1896    for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
1897         UI != UE; ++UI) {
1898      if (UI->getOpcode() == ISD::CopyToReg) {
1899        SDNode *Use = *UI;
1900        if (Use == Copies[0] || Use == Copies[1])
1901          continue;
1902        return false;
1903      }
1904      if (UI->getOpcode() != ARMISD::RET_FLAG)
1905        return false;
1906      HasRet = true;
1907    }
1908  }
1909
1910  return HasRet;
1911}
1912
1913bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
1914  if (!EnableARMTailCalls)
1915    return false;
1916
1917  if (!CI->isTailCall())
1918    return false;
1919
1920  return !Subtarget->isThumb1Only();
1921}
1922
1923// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
1924// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
1925// one of the above mentioned nodes. It has to be wrapped because otherwise
1926// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
1927// be used to form addressing mode. These wrapped nodes will be selected
1928// into MOVi.
1929static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
1930  EVT PtrVT = Op.getValueType();
1931  // FIXME there is no actual debug info here
1932  DebugLoc dl = Op.getDebugLoc();
1933  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1934  SDValue Res;
1935  if (CP->isMachineConstantPoolEntry())
1936    Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1937                                    CP->getAlignment());
1938  else
1939    Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1940                                    CP->getAlignment());
1941  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
1942}
1943
1944unsigned ARMTargetLowering::getJumpTableEncoding() const {
1945  return MachineJumpTableInfo::EK_Inline;
1946}
1947
1948SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
1949                                             SelectionDAG &DAG) const {
1950  MachineFunction &MF = DAG.getMachineFunction();
1951  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1952  unsigned ARMPCLabelIndex = 0;
1953  DebugLoc DL = Op.getDebugLoc();
1954  EVT PtrVT = getPointerTy();
1955  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1956  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
1957  SDValue CPAddr;
1958  if (RelocM == Reloc::Static) {
1959    CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
1960  } else {
1961    unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
1962    ARMPCLabelIndex = AFI->createPICLabelUId();
1963    ARMConstantPoolValue *CPV =
1964      ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
1965                                      ARMCP::CPBlockAddress, PCAdj);
1966    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1967  }
1968  CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
1969  SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
1970                               MachinePointerInfo::getConstantPool(),
1971                               false, false, 0);
1972  if (RelocM == Reloc::Static)
1973    return Result;
1974  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1975  return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
1976}
1977
1978// Lower ISD::GlobalTLSAddress using the "general dynamic" model
1979SDValue
1980ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1981                                                 SelectionDAG &DAG) const {
1982  DebugLoc dl = GA->getDebugLoc();
1983  EVT PtrVT = getPointerTy();
1984  unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
1985  MachineFunction &MF = DAG.getMachineFunction();
1986  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1987  unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
1988  ARMConstantPoolValue *CPV =
1989    ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
1990                                    ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
1991  SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
1992  Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
1993  Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
1994                         MachinePointerInfo::getConstantPool(),
1995                         false, false, 0);
1996  SDValue Chain = Argument.getValue(1);
1997
1998  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
1999  Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
2000
2001  // call __tls_get_addr.
2002  ArgListTy Args;
2003  ArgListEntry Entry;
2004  Entry.Node = Argument;
2005  Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2006  Args.push_back(Entry);
2007  // FIXME: is there useful debug info available here?
2008  std::pair<SDValue, SDValue> CallResult =
2009    LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
2010                false, false, false, false,
2011                0, CallingConv::C, false, /*isReturnValueUsed=*/true,
2012                DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
2013  return CallResult.first;
2014}
2015
2016// Lower ISD::GlobalTLSAddress using the "initial exec" or
2017// "local exec" model.
2018SDValue
2019ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2020                                        SelectionDAG &DAG) const {
2021  const GlobalValue *GV = GA->getGlobal();
2022  DebugLoc dl = GA->getDebugLoc();
2023  SDValue Offset;
2024  SDValue Chain = DAG.getEntryNode();
2025  EVT PtrVT = getPointerTy();
2026  // Get the Thread Pointer
2027  SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2028
2029  if (GV->isDeclaration()) {
2030    MachineFunction &MF = DAG.getMachineFunction();
2031    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2032    unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2033    // Initial exec model.
2034    unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2035    ARMConstantPoolValue *CPV =
2036      ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2037                                      ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2038                                      true);
2039    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2040    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2041    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2042                         MachinePointerInfo::getConstantPool(),
2043                         false, false, 0);
2044    Chain = Offset.getValue(1);
2045
2046    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2047    Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2048
2049    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2050                         MachinePointerInfo::getConstantPool(),
2051                         false, false, 0);
2052  } else {
2053    // local exec model
2054    ARMConstantPoolValue *CPV =
2055      ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2056    Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2057    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2058    Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
2059                         MachinePointerInfo::getConstantPool(),
2060                         false, false, 0);
2061  }
2062
2063  // The address of the thread local variable is the add of the thread
2064  // pointer with the offset of the variable.
2065  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
2066}
2067
2068SDValue
2069ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
2070  // TODO: implement the "local dynamic" model
2071  assert(Subtarget->isTargetELF() &&
2072         "TLS not implemented for non-ELF targets");
2073  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2074  // If the relocation model is PIC, use the "General Dynamic" TLS Model,
2075  // otherwise use the "Local Exec" TLS Model
2076  if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
2077    return LowerToTLSGeneralDynamicModel(GA, DAG);
2078  else
2079    return LowerToTLSExecModels(GA, DAG);
2080}
2081
2082SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
2083                                                 SelectionDAG &DAG) const {
2084  EVT PtrVT = getPointerTy();
2085  DebugLoc dl = Op.getDebugLoc();
2086  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2087  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2088  if (RelocM == Reloc::PIC_) {
2089    bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
2090    ARMConstantPoolValue *CPV =
2091      ARMConstantPoolConstant::Create(GV,
2092                                      UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);
2093    SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2094    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2095    SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
2096                                 CPAddr,
2097                                 MachinePointerInfo::getConstantPool(),
2098                                 false, false, 0);
2099    SDValue Chain = Result.getValue(1);
2100    SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2101    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
2102    if (!UseGOTOFF)
2103      Result = DAG.getLoad(PtrVT, dl, Chain, Result,
2104                           MachinePointerInfo::getGOT(), false, false, 0);
2105    return Result;
2106  }
2107
2108  // If we have T2 ops, we can materialize the address directly via movt/movw
2109  // pair. This is always cheaper.
2110  if (Subtarget->useMovt()) {
2111    ++NumMovwMovt;
2112    // FIXME: Once remat is capable of dealing with instructions with register
2113    // operands, expand this into two nodes.
2114    return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
2115                       DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2116  } else {
2117    SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
2118    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2119    return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2120                       MachinePointerInfo::getConstantPool(),
2121                       false, false, 0);
2122  }
2123}
2124
2125SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
2126                                                    SelectionDAG &DAG) const {
2127  EVT PtrVT = getPointerTy();
2128  DebugLoc dl = Op.getDebugLoc();
2129  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2130  Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2131  MachineFunction &MF = DAG.getMachineFunction();
2132  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2133
2134  // FIXME: Enable this for static codegen when tool issues are fixed.
2135  if (Subtarget->useMovt() && RelocM != Reloc::Static) {
2136    ++NumMovwMovt;
2137    // FIXME: Once remat is capable of dealing with instructions with register
2138    // operands, expand this into two nodes.
2139    if (RelocM == Reloc::Static)
2140      return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
2141                                 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2142
2143    unsigned Wrapper = (RelocM == Reloc::PIC_)
2144      ? ARMISD::WrapperPIC : ARMISD::WrapperDYN;
2145    SDValue Result = DAG.getNode(Wrapper, dl, PtrVT,
2146                                 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
2147    if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
2148      Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
2149                           MachinePointerInfo::getGOT(), false, false, 0);
2150    return Result;
2151  }
2152
2153  unsigned ARMPCLabelIndex = 0;
2154  SDValue CPAddr;
2155  if (RelocM == Reloc::Static) {
2156    CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
2157  } else {
2158    ARMPCLabelIndex = AFI->createPICLabelUId();
2159    unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
2160    ARMConstantPoolValue *CPV =
2161      ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue,
2162                                      PCAdj);
2163    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2164  }
2165  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2166
2167  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2168                               MachinePointerInfo::getConstantPool(),
2169                               false, false, 0);
2170  SDValue Chain = Result.getValue(1);
2171
2172  if (RelocM == Reloc::PIC_) {
2173    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2174    Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2175  }
2176
2177  if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
2178    Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
2179                         false, false, 0);
2180
2181  return Result;
2182}
2183
2184SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
2185                                                    SelectionDAG &DAG) const {
2186  assert(Subtarget->isTargetELF() &&
2187         "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
2188  MachineFunction &MF = DAG.getMachineFunction();
2189  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2190  unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2191  EVT PtrVT = getPointerTy();
2192  DebugLoc dl = Op.getDebugLoc();
2193  unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2194  ARMConstantPoolValue *CPV =
2195    ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_",
2196                                  ARMPCLabelIndex, PCAdj);
2197  SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2198  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2199  SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2200                               MachinePointerInfo::getConstantPool(),
2201                               false, false, 0);
2202  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2203  return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2204}
2205
2206SDValue
2207ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG)
2208  const {
2209  DebugLoc dl = Op.getDebugLoc();
2210  return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
2211                     Op.getOperand(0), Op.getOperand(1));
2212}
2213
2214SDValue
2215ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
2216  DebugLoc dl = Op.getDebugLoc();
2217  SDValue Val = DAG.getConstant(0, MVT::i32);
2218  return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
2219                     DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
2220                     Op.getOperand(1), Val);
2221}
2222
2223SDValue
2224ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
2225  DebugLoc dl = Op.getDebugLoc();
2226  return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
2227                     Op.getOperand(1), DAG.getConstant(0, MVT::i32));
2228}
2229
2230SDValue
2231ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
2232                                          const ARMSubtarget *Subtarget) const {
2233  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2234  DebugLoc dl = Op.getDebugLoc();
2235  switch (IntNo) {
2236  default: return SDValue();    // Don't custom lower most intrinsics.
2237  case Intrinsic::arm_thread_pointer: {
2238    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2239    return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2240  }
2241  case Intrinsic::eh_sjlj_lsda: {
2242    MachineFunction &MF = DAG.getMachineFunction();
2243    ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2244    unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2245    EVT PtrVT = getPointerTy();
2246    DebugLoc dl = Op.getDebugLoc();
2247    Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2248    SDValue CPAddr;
2249    unsigned PCAdj = (RelocM != Reloc::PIC_)
2250      ? 0 : (Subtarget->isThumb() ? 4 : 8);
2251    ARMConstantPoolValue *CPV =
2252      ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
2253                                      ARMCP::CPLSDA, PCAdj);
2254    CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2255    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2256    SDValue Result =
2257      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
2258                  MachinePointerInfo::getConstantPool(),
2259                  false, false, 0);
2260
2261    if (RelocM == Reloc::PIC_) {
2262      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
2263      Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
2264    }
2265    return Result;
2266  }
2267  case Intrinsic::arm_neon_vmulls:
2268  case Intrinsic::arm_neon_vmullu: {
2269    unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
2270      ? ARMISD::VMULLs : ARMISD::VMULLu;
2271    return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(),
2272                       Op.getOperand(1), Op.getOperand(2));
2273  }
2274  }
2275}
2276
2277static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
2278                               const ARMSubtarget *Subtarget) {
2279  DebugLoc dl = Op.getDebugLoc();
2280  if (!Subtarget->hasDataBarrier()) {
2281    // Some ARMv6 cpus can support data barriers with an mcr instruction.
2282    // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
2283    // here.
2284    assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
2285           "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
2286    return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
2287                       DAG.getConstant(0, MVT::i32));
2288  }
2289
2290  SDValue Op5 = Op.getOperand(5);
2291  bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0;
2292  unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2293  unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2294  bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0);
2295
2296  ARM_MB::MemBOpt DMBOpt;
2297  if (isDeviceBarrier)
2298    DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY;
2299  else
2300    DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH;
2301  return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
2302                     DAG.getConstant(DMBOpt, MVT::i32));
2303}
2304
2305
2306static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
2307                                 const ARMSubtarget *Subtarget) {
2308  // FIXME: handle "fence singlethread" more efficiently.
2309  DebugLoc dl = Op.getDebugLoc();
2310  if (!Subtarget->hasDataBarrier()) {
2311    // Some ARMv6 cpus can support data barriers with an mcr instruction.
2312    // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
2313    // here.
2314    assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
2315           "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
2316    return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
2317                       DAG.getConstant(0, MVT::i32));
2318  }
2319
2320  return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
2321                     DAG.getConstant(ARM_MB::ISH, MVT::i32));
2322}
2323
2324static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
2325                             const ARMSubtarget *Subtarget) {
2326  // ARM pre v5TE and Thumb1 does not have preload instructions.
2327  if (!(Subtarget->isThumb2() ||
2328        (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
2329    // Just preserve the chain.
2330    return Op.getOperand(0);
2331
2332  DebugLoc dl = Op.getDebugLoc();
2333  unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
2334  if (!isRead &&
2335      (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
2336    // ARMv7 with MP extension has PLDW.
2337    return Op.getOperand(0);
2338
2339  unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
2340  if (Subtarget->isThumb()) {
2341    // Invert the bits.
2342    isRead = ~isRead & 1;
2343    isData = ~isData & 1;
2344  }
2345
2346  return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
2347                     Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
2348                     DAG.getConstant(isData, MVT::i32));
2349}
2350
2351static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
2352  MachineFunction &MF = DAG.getMachineFunction();
2353  ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
2354
2355  // vastart just stores the address of the VarArgsFrameIndex slot into the
2356  // memory location argument.
2357  DebugLoc dl = Op.getDebugLoc();
2358  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2359  SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2360  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2361  return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
2362                      MachinePointerInfo(SV), false, false, 0);
2363}
2364
2365SDValue
2366ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
2367                                        SDValue &Root, SelectionDAG &DAG,
2368                                        DebugLoc dl) const {
2369  MachineFunction &MF = DAG.getMachineFunction();
2370  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2371
2372  TargetRegisterClass *RC;
2373  if (AFI->isThumb1OnlyFunction())
2374    RC = ARM::tGPRRegisterClass;
2375  else
2376    RC = ARM::GPRRegisterClass;
2377
2378  // Transform the arguments stored in physical registers into virtual ones.
2379  unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2380  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2381
2382  SDValue ArgValue2;
2383  if (NextVA.isMemLoc()) {
2384    MachineFrameInfo *MFI = MF.getFrameInfo();
2385    int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
2386
2387    // Create load node to retrieve arguments from the stack.
2388    SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2389    ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
2390                            MachinePointerInfo::getFixedStack(FI),
2391                            false, false, 0);
2392  } else {
2393    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2394    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
2395  }
2396
2397  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
2398}
2399
2400void
2401ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF,
2402                                  unsigned &VARegSize, unsigned &VARegSaveSize)
2403  const {
2404  unsigned NumGPRs;
2405  if (CCInfo.isFirstByValRegValid())
2406    NumGPRs = ARM::R4 - CCInfo.getFirstByValReg();
2407  else {
2408    unsigned int firstUnalloced;
2409    firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs,
2410                                                sizeof(GPRArgRegs) /
2411                                                sizeof(GPRArgRegs[0]));
2412    NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0;
2413  }
2414
2415  unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment();
2416  VARegSize = NumGPRs * 4;
2417  VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
2418}
2419
2420// The remaining GPRs hold either the beginning of variable-argument
2421// data, or the beginning of an aggregate passed by value (usuall
2422// byval).  Either way, we allocate stack slots adjacent to the data
2423// provided by our caller, and store the unallocated registers there.
2424// If this is a variadic function, the va_list pointer will begin with
2425// these values; otherwise, this reassembles a (byval) structure that
2426// was split between registers and memory.
2427void
2428ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
2429                                        DebugLoc dl, SDValue &Chain,
2430                                        unsigned ArgOffset) const {
2431  MachineFunction &MF = DAG.getMachineFunction();
2432  MachineFrameInfo *MFI = MF.getFrameInfo();
2433  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2434  unsigned firstRegToSaveIndex;
2435  if (CCInfo.isFirstByValRegValid())
2436    firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0;
2437  else {
2438    firstRegToSaveIndex = CCInfo.getFirstUnallocated
2439      (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
2440  }
2441
2442  unsigned VARegSize, VARegSaveSize;
2443  computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize);
2444  if (VARegSaveSize) {
2445    // If this function is vararg, store any remaining integer argument regs
2446    // to their spots on the stack so that they may be loaded by deferencing
2447    // the result of va_next.
2448    AFI->setVarArgsRegSaveSize(VARegSaveSize);
2449    AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize,
2450                                                     ArgOffset + VARegSaveSize
2451                                                     - VARegSize,
2452                                                     false));
2453    SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
2454                                    getPointerTy());
2455
2456    SmallVector<SDValue, 4> MemOps;
2457    for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
2458      TargetRegisterClass *RC;
2459      if (AFI->isThumb1OnlyFunction())
2460        RC = ARM::tGPRRegisterClass;
2461      else
2462        RC = ARM::GPRRegisterClass;
2463
2464      unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC);
2465      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
2466      SDValue Store =
2467        DAG.getStore(Val.getValue(1), dl, Val, FIN,
2468                 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()),
2469                     false, false, 0);
2470      MemOps.push_back(Store);
2471      FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
2472                        DAG.getConstant(4, getPointerTy()));
2473    }
2474    if (!MemOps.empty())
2475      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2476                          &MemOps[0], MemOps.size());
2477  } else
2478    // This will point to the next argument passed via stack.
2479    AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true));
2480}
2481
2482SDValue
2483ARMTargetLowering::LowerFormalArguments(SDValue Chain,
2484                                        CallingConv::ID CallConv, bool isVarArg,
2485                                        const SmallVectorImpl<ISD::InputArg>
2486                                          &Ins,
2487                                        DebugLoc dl, SelectionDAG &DAG,
2488                                        SmallVectorImpl<SDValue> &InVals)
2489                                          const {
2490  MachineFunction &MF = DAG.getMachineFunction();
2491  MachineFrameInfo *MFI = MF.getFrameInfo();
2492
2493  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2494
2495  // Assign locations to all of the incoming arguments.
2496  SmallVector<CCValAssign, 16> ArgLocs;
2497  ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2498                    getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue);
2499  CCInfo.AnalyzeFormalArguments(Ins,
2500                                CCAssignFnForNode(CallConv, /* Return*/ false,
2501                                                  isVarArg));
2502
2503  SmallVector<SDValue, 16> ArgValues;
2504  int lastInsIndex = -1;
2505
2506  SDValue ArgValue;
2507  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2508    CCValAssign &VA = ArgLocs[i];
2509
2510    // Arguments stored in registers.
2511    if (VA.isRegLoc()) {
2512      EVT RegVT = VA.getLocVT();
2513
2514      if (VA.needsCustom()) {
2515        // f64 and vector types are split up into multiple registers or
2516        // combinations of registers and stack slots.
2517        if (VA.getLocVT() == MVT::v2f64) {
2518          SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
2519                                                   Chain, DAG, dl);
2520          VA = ArgLocs[++i]; // skip ahead to next loc
2521          SDValue ArgValue2;
2522          if (VA.isMemLoc()) {
2523            int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
2524            SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2525            ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
2526                                    MachinePointerInfo::getFixedStack(FI),
2527                                    false, false, 0);
2528          } else {
2529            ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
2530                                             Chain, DAG, dl);
2531          }
2532          ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2533          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2534                                 ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
2535          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
2536                                 ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
2537        } else
2538          ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
2539
2540      } else {
2541        TargetRegisterClass *RC;
2542
2543        if (RegVT == MVT::f32)
2544          RC = ARM::SPRRegisterClass;
2545        else if (RegVT == MVT::f64)
2546          RC = ARM::DPRRegisterClass;
2547        else if (RegVT == MVT::v2f64)
2548          RC = ARM::QPRRegisterClass;
2549        else if (RegVT == MVT::i32)
2550          RC = (AFI->isThumb1OnlyFunction() ?
2551                ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
2552        else
2553          llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
2554
2555        // Transform the arguments in physical registers into virtual ones.
2556        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2557        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2558      }
2559
2560      // If this is an 8 or 16-bit value, it is really passed promoted
2561      // to 32 bits.  Insert an assert[sz]ext to capture this, then
2562      // truncate to the right size.
2563      switch (VA.getLocInfo()) {
2564      default: llvm_unreachable("Unknown loc info!");
2565      case CCValAssign::Full: break;
2566      case CCValAssign::BCvt:
2567        ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2568        break;
2569      case CCValAssign::SExt:
2570        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2571                               DAG.getValueType(VA.getValVT()));
2572        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2573        break;
2574      case CCValAssign::ZExt:
2575        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2576                               DAG.getValueType(VA.getValVT()));
2577        ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2578        break;
2579      }
2580
2581      InVals.push_back(ArgValue);
2582
2583    } else { // VA.isRegLoc()
2584
2585      // sanity check
2586      assert(VA.isMemLoc());
2587      assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
2588
2589      int index = ArgLocs[i].getValNo();
2590
2591      // Some Ins[] entries become multiple ArgLoc[] entries.
2592      // Process them only once.
2593      if (index != lastInsIndex)
2594        {
2595          ISD::ArgFlagsTy Flags = Ins[index].Flags;
2596          // FIXME: For now, all byval parameter objects are marked mutable.
2597          // This can be changed with more analysis.
2598          // In case of tail call optimization mark all arguments mutable.
2599          // Since they could be overwritten by lowering of arguments in case of
2600          // a tail call.
2601          if (Flags.isByVal()) {
2602            unsigned VARegSize, VARegSaveSize;
2603            computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize);
2604            VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0);
2605            unsigned Bytes = Flags.getByValSize() - VARegSize;
2606            if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2607            int FI = MFI->CreateFixedObject(Bytes,
2608                                            VA.getLocMemOffset(), false);
2609            InVals.push_back(DAG.getFrameIndex(FI, getPointerTy()));
2610          } else {
2611            int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
2612                                            VA.getLocMemOffset(), true);
2613
2614            // Create load nodes to retrieve arguments from the stack.
2615            SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2616            InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
2617                                         MachinePointerInfo::getFixedStack(FI),
2618                                         false, false, 0));
2619          }
2620          lastInsIndex = index;
2621        }
2622    }
2623  }
2624
2625  // varargs
2626  if (isVarArg)
2627    VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset());
2628
2629  return Chain;
2630}
2631
2632/// isFloatingPointZero - Return true if this is +0.0.
2633static bool isFloatingPointZero(SDValue Op) {
2634  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
2635    return CFP->getValueAPF().isPosZero();
2636  else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
2637    // Maybe this has already been legalized into the constant pool?
2638    if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
2639      SDValue WrapperOp = Op.getOperand(1).getOperand(0);
2640      if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
2641        if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
2642          return CFP->getValueAPF().isPosZero();
2643    }
2644  }
2645  return false;
2646}
2647
2648/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
2649/// the given operands.
2650SDValue
2651ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
2652                             SDValue &ARMcc, SelectionDAG &DAG,
2653                             DebugLoc dl) const {
2654  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
2655    unsigned C = RHSC->getZExtValue();
2656    if (!isLegalICmpImmediate(C)) {
2657      // Constant does not fit, try adjusting it by one?
2658      switch (CC) {
2659      default: break;
2660      case ISD::SETLT:
2661      case ISD::SETGE:
2662        if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
2663          CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
2664          RHS = DAG.getConstant(C-1, MVT::i32);
2665        }
2666        break;
2667      case ISD::SETULT:
2668      case ISD::SETUGE:
2669        if (C != 0 && isLegalICmpImmediate(C-1)) {
2670          CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
2671          RHS = DAG.getConstant(C-1, MVT::i32);
2672        }
2673        break;
2674      case ISD::SETLE:
2675      case ISD::SETGT:
2676        if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
2677          CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
2678          RHS = DAG.getConstant(C+1, MVT::i32);
2679        }
2680        break;
2681      case ISD::SETULE:
2682      case ISD::SETUGT:
2683        if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
2684          CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
2685          RHS = DAG.getConstant(C+1, MVT::i32);
2686        }
2687        break;
2688      }
2689    }
2690  }
2691
2692  ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2693  ARMISD::NodeType CompareType;
2694  switch (CondCode) {
2695  default:
2696    CompareType = ARMISD::CMP;
2697    break;
2698  case ARMCC::EQ:
2699  case ARMCC::NE:
2700    // Uses only Z Flag
2701    CompareType = ARMISD::CMPZ;
2702    break;
2703  }
2704  ARMcc = DAG.getConstant(CondCode, MVT::i32);
2705  return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
2706}
2707
2708/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
2709SDValue
2710ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
2711                             DebugLoc dl) const {
2712  SDValue Cmp;
2713  if (!isFloatingPointZero(RHS))
2714    Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
2715  else
2716    Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
2717  return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
2718}
2719
2720/// duplicateCmp - Glue values can have only one use, so this function
2721/// duplicates a comparison node.
2722SDValue
2723ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
2724  unsigned Opc = Cmp.getOpcode();
2725  DebugLoc DL = Cmp.getDebugLoc();
2726  if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
2727    return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
2728
2729  assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
2730  Cmp = Cmp.getOperand(0);
2731  Opc = Cmp.getOpcode();
2732  if (Opc == ARMISD::CMPFP)
2733    Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
2734  else {
2735    assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
2736    Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
2737  }
2738  return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
2739}
2740
2741SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2742  SDValue Cond = Op.getOperand(0);
2743  SDValue SelectTrue = Op.getOperand(1);
2744  SDValue SelectFalse = Op.getOperand(2);
2745  DebugLoc dl = Op.getDebugLoc();
2746
2747  // Convert:
2748  //
2749  //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
2750  //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
2751  //
2752  if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
2753    const ConstantSDNode *CMOVTrue =
2754      dyn_cast<ConstantSDNode>(Cond.getOperand(0));
2755    const ConstantSDNode *CMOVFalse =
2756      dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2757
2758    if (CMOVTrue && CMOVFalse) {
2759      unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
2760      unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
2761
2762      SDValue True;
2763      SDValue False;
2764      if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
2765        True = SelectTrue;
2766        False = SelectFalse;
2767      } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
2768        True = SelectFalse;
2769        False = SelectTrue;
2770      }
2771
2772      if (True.getNode() && False.getNode()) {
2773        EVT VT = Op.getValueType();
2774        SDValue ARMcc = Cond.getOperand(2);
2775        SDValue CCR = Cond.getOperand(3);
2776        SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
2777        assert(True.getValueType() == VT);
2778        return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp);
2779      }
2780    }
2781  }
2782
2783  return DAG.getSelectCC(dl, Cond,
2784                         DAG.getConstant(0, Cond.getValueType()),
2785                         SelectTrue, SelectFalse, ISD::SETNE);
2786}
2787
2788SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2789  EVT VT = Op.getValueType();
2790  SDValue LHS = Op.getOperand(0);
2791  SDValue RHS = Op.getOperand(1);
2792  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2793  SDValue TrueVal = Op.getOperand(2);
2794  SDValue FalseVal = Op.getOperand(3);
2795  DebugLoc dl = Op.getDebugLoc();
2796
2797  if (LHS.getValueType() == MVT::i32) {
2798    SDValue ARMcc;
2799    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2800    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2801    return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
2802  }
2803
2804  ARMCC::CondCodes CondCode, CondCode2;
2805  FPCCToARMCC(CC, CondCode, CondCode2);
2806
2807  SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2808  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2809  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2810  SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
2811                               ARMcc, CCR, Cmp);
2812  if (CondCode2 != ARMCC::AL) {
2813    SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
2814    // FIXME: Needs another CMP because flag can have but one use.
2815    SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
2816    Result = DAG.getNode(ARMISD::CMOV, dl, VT,
2817                         Result, TrueVal, ARMcc2, CCR, Cmp2);
2818  }
2819  return Result;
2820}
2821
2822/// canChangeToInt - Given the fp compare operand, return true if it is suitable
2823/// to morph to an integer compare sequence.
2824static bool canChangeToInt(SDValue Op, bool &SeenZero,
2825                           const ARMSubtarget *Subtarget) {
2826  SDNode *N = Op.getNode();
2827  if (!N->hasOneUse())
2828    // Otherwise it requires moving the value from fp to integer registers.
2829    return false;
2830  if (!N->getNumValues())
2831    return false;
2832  EVT VT = Op.getValueType();
2833  if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
2834    // f32 case is generally profitable. f64 case only makes sense when vcmpe +
2835    // vmrs are very slow, e.g. cortex-a8.
2836    return false;
2837
2838  if (isFloatingPointZero(Op)) {
2839    SeenZero = true;
2840    return true;
2841  }
2842  return ISD::isNormalLoad(N);
2843}
2844
2845static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
2846  if (isFloatingPointZero(Op))
2847    return DAG.getConstant(0, MVT::i32);
2848
2849  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
2850    return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2851                       Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
2852                       Ld->isVolatile(), Ld->isNonTemporal(),
2853                       Ld->getAlignment());
2854
2855  llvm_unreachable("Unknown VFP cmp argument!");
2856}
2857
2858static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
2859                           SDValue &RetVal1, SDValue &RetVal2) {
2860  if (isFloatingPointZero(Op)) {
2861    RetVal1 = DAG.getConstant(0, MVT::i32);
2862    RetVal2 = DAG.getConstant(0, MVT::i32);
2863    return;
2864  }
2865
2866  if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
2867    SDValue Ptr = Ld->getBasePtr();
2868    RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2869                          Ld->getChain(), Ptr,
2870                          Ld->getPointerInfo(),
2871                          Ld->isVolatile(), Ld->isNonTemporal(),
2872                          Ld->getAlignment());
2873
2874    EVT PtrType = Ptr.getValueType();
2875    unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
2876    SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(),
2877                                 PtrType, Ptr, DAG.getConstant(4, PtrType));
2878    RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
2879                          Ld->getChain(), NewPtr,
2880                          Ld->getPointerInfo().getWithOffset(4),
2881                          Ld->isVolatile(), Ld->isNonTemporal(),
2882                          NewAlign);
2883    return;
2884  }
2885
2886  llvm_unreachable("Unknown VFP cmp argument!");
2887}
2888
2889/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
2890/// f32 and even f64 comparisons to integer ones.
2891SDValue
2892ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
2893  SDValue Chain = Op.getOperand(0);
2894  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2895  SDValue LHS = Op.getOperand(2);
2896  SDValue RHS = Op.getOperand(3);
2897  SDValue Dest = Op.getOperand(4);
2898  DebugLoc dl = Op.getDebugLoc();
2899
2900  bool SeenZero = false;
2901  if (canChangeToInt(LHS, SeenZero, Subtarget) &&
2902      canChangeToInt(RHS, SeenZero, Subtarget) &&
2903      // If one of the operand is zero, it's safe to ignore the NaN case since
2904      // we only care about equality comparisons.
2905      (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) {
2906    // If unsafe fp math optimization is enabled and there are no other uses of
2907    // the CMP operands, and the condition code is EQ or NE, we can optimize it
2908    // to an integer comparison.
2909    if (CC == ISD::SETOEQ)
2910      CC = ISD::SETEQ;
2911    else if (CC == ISD::SETUNE)
2912      CC = ISD::SETNE;
2913
2914    SDValue ARMcc;
2915    if (LHS.getValueType() == MVT::f32) {
2916      LHS = bitcastf32Toi32(LHS, DAG);
2917      RHS = bitcastf32Toi32(RHS, DAG);
2918      SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2919      SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2920      return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2921                         Chain, Dest, ARMcc, CCR, Cmp);
2922    }
2923
2924    SDValue LHS1, LHS2;
2925    SDValue RHS1, RHS2;
2926    expandf64Toi32(LHS, DAG, LHS1, LHS2);
2927    expandf64Toi32(RHS, DAG, RHS1, RHS2);
2928    ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
2929    ARMcc = DAG.getConstant(CondCode, MVT::i32);
2930    SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2931    SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
2932    return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
2933  }
2934
2935  return SDValue();
2936}
2937
2938SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2939  SDValue Chain = Op.getOperand(0);
2940  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2941  SDValue LHS = Op.getOperand(2);
2942  SDValue RHS = Op.getOperand(3);
2943  SDValue Dest = Op.getOperand(4);
2944  DebugLoc dl = Op.getDebugLoc();
2945
2946  if (LHS.getValueType() == MVT::i32) {
2947    SDValue ARMcc;
2948    SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
2949    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2950    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
2951                       Chain, Dest, ARMcc, CCR, Cmp);
2952  }
2953
2954  assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
2955
2956  if (UnsafeFPMath &&
2957      (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
2958       CC == ISD::SETNE || CC == ISD::SETUNE)) {
2959    SDValue Result = OptimizeVFPBrcond(Op, DAG);
2960    if (Result.getNode())
2961      return Result;
2962  }
2963
2964  ARMCC::CondCodes CondCode, CondCode2;
2965  FPCCToARMCC(CC, CondCode, CondCode2);
2966
2967  SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
2968  SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
2969  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
2970  SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
2971  SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
2972  SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2973  if (CondCode2 != ARMCC::AL) {
2974    ARMcc = DAG.getConstant(CondCode2, MVT::i32);
2975    SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
2976    Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
2977  }
2978  return Res;
2979}
2980
2981SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
2982  SDValue Chain = Op.getOperand(0);
2983  SDValue Table = Op.getOperand(1);
2984  SDValue Index = Op.getOperand(2);
2985  DebugLoc dl = Op.getDebugLoc();
2986
2987  EVT PTy = getPointerTy();
2988  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
2989  ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
2990  SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
2991  SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
2992  Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
2993  Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
2994  SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
2995  if (Subtarget->isThumb2()) {
2996    // Thumb2 uses a two-level jump. That is, it jumps into the jump table
2997    // which does another jump to the destination. This also makes it easier
2998    // to translate it to TBB / TBH later.
2999    // FIXME: This might not work if the function is extremely large.
3000    return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
3001                       Addr, Op.getOperand(2), JTI, UId);
3002  }
3003  if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
3004    Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
3005                       MachinePointerInfo::getJumpTable(),
3006                       false, false, 0);
3007    Chain = Addr.getValue(1);
3008    Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
3009    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
3010  } else {
3011    Addr = DAG.getLoad(PTy, dl, Chain, Addr,
3012                       MachinePointerInfo::getJumpTable(), false, false, 0);
3013    Chain = Addr.getValue(1);
3014    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
3015  }
3016}
3017
3018static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
3019  DebugLoc dl = Op.getDebugLoc();
3020  unsigned Opc;
3021
3022  switch (Op.getOpcode()) {
3023  default:
3024    assert(0 && "Invalid opcode!");
3025  case ISD::FP_TO_SINT:
3026    Opc = ARMISD::FTOSI;
3027    break;
3028  case ISD::FP_TO_UINT:
3029    Opc = ARMISD::FTOUI;
3030    break;
3031  }
3032  Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
3033  return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
3034}
3035
3036static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
3037  EVT VT = Op.getValueType();
3038  DebugLoc dl = Op.getDebugLoc();
3039
3040  assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
3041         "Invalid type for custom lowering!");
3042  if (VT != MVT::v4f32)
3043    return DAG.UnrollVectorOp(Op.getNode());
3044
3045  unsigned CastOpc;
3046  unsigned Opc;
3047  switch (Op.getOpcode()) {
3048  default:
3049    assert(0 && "Invalid opcode!");
3050  case ISD::SINT_TO_FP:
3051    CastOpc = ISD::SIGN_EXTEND;
3052    Opc = ISD::SINT_TO_FP;
3053    break;
3054  case ISD::UINT_TO_FP:
3055    CastOpc = ISD::ZERO_EXTEND;
3056    Opc = ISD::UINT_TO_FP;
3057    break;
3058  }
3059
3060  Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
3061  return DAG.getNode(Opc, dl, VT, Op);
3062}
3063
3064static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
3065  EVT VT = Op.getValueType();
3066  if (VT.isVector())
3067    return LowerVectorINT_TO_FP(Op, DAG);
3068
3069  DebugLoc dl = Op.getDebugLoc();
3070  unsigned Opc;
3071
3072  switch (Op.getOpcode()) {
3073  default:
3074    assert(0 && "Invalid opcode!");
3075  case ISD::SINT_TO_FP:
3076    Opc = ARMISD::SITOF;
3077    break;
3078  case ISD::UINT_TO_FP:
3079    Opc = ARMISD::UITOF;
3080    break;
3081  }
3082
3083  Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
3084  return DAG.getNode(Opc, dl, VT, Op);
3085}
3086
3087SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
3088  // Implement fcopysign with a fabs and a conditional fneg.
3089  SDValue Tmp0 = Op.getOperand(0);
3090  SDValue Tmp1 = Op.getOperand(1);
3091  DebugLoc dl = Op.getDebugLoc();
3092  EVT VT = Op.getValueType();
3093  EVT SrcVT = Tmp1.getValueType();
3094  bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
3095    Tmp0.getOpcode() == ARMISD::VMOVDRR;
3096  bool UseNEON = !InGPR && Subtarget->hasNEON();
3097
3098  if (UseNEON) {
3099    // Use VBSL to copy the sign bit.
3100    unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
3101    SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
3102                               DAG.getTargetConstant(EncodedVal, MVT::i32));
3103    EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
3104    if (VT == MVT::f64)
3105      Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
3106                         DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
3107                         DAG.getConstant(32, MVT::i32));
3108    else /*if (VT == MVT::f32)*/
3109      Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
3110    if (SrcVT == MVT::f32) {
3111      Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
3112      if (VT == MVT::f64)
3113        Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
3114                           DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
3115                           DAG.getConstant(32, MVT::i32));
3116    } else if (VT == MVT::f32)
3117      Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
3118                         DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
3119                         DAG.getConstant(32, MVT::i32));
3120    Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
3121    Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
3122
3123    SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
3124                                            MVT::i32);
3125    AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
3126    SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
3127                                  DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
3128
3129    SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
3130                              DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
3131                              DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
3132    if (VT == MVT::f32) {
3133      Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
3134      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
3135                        DAG.getConstant(0, MVT::i32));
3136    } else {
3137      Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
3138    }
3139
3140    return Res;
3141  }
3142
3143  // Bitcast operand 1 to i32.
3144  if (SrcVT == MVT::f64)
3145    Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
3146                       &Tmp1, 1).getValue(1);
3147  Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
3148
3149  // Or in the signbit with integer operations.
3150  SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
3151  SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
3152  Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
3153  if (VT == MVT::f32) {
3154    Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
3155                       DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
3156    return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
3157                       DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
3158  }
3159
3160  // f64: Or the high part with signbit and then combine two parts.
3161  Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
3162                     &Tmp0, 1);
3163  SDValue Lo = Tmp0.getValue(0);
3164  SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
3165  Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
3166  return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
3167}
3168
3169SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
3170  MachineFunction &MF = DAG.getMachineFunction();
3171  MachineFrameInfo *MFI = MF.getFrameInfo();
3172  MFI->setReturnAddressIsTaken(true);
3173
3174  EVT VT = Op.getValueType();
3175  DebugLoc dl = Op.getDebugLoc();
3176  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3177  if (Depth) {
3178    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
3179    SDValue Offset = DAG.getConstant(4, MVT::i32);
3180    return DAG.getLoad(VT, dl, DAG.getEntryNode(),
3181                       DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
3182                       MachinePointerInfo(), false, false, 0);
3183  }
3184
3185  // Return LR, which contains the return address. Mark it an implicit live-in.
3186  unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
3187  return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
3188}
3189
3190SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
3191  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
3192  MFI->setFrameAddressIsTaken(true);
3193
3194  EVT VT = Op.getValueType();
3195  DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
3196  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3197  unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
3198    ? ARM::R7 : ARM::R11;
3199  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
3200  while (Depth--)
3201    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
3202                            MachinePointerInfo(),
3203                            false, false, 0);
3204  return FrameAddr;
3205}
3206
3207/// ExpandBITCAST - If the target supports VFP, this function is called to
3208/// expand a bit convert where either the source or destination type is i64 to
3209/// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
3210/// operand type is illegal (e.g., v2f32 for a target that doesn't support
3211/// vectors), since the legalizer won't know what to do with that.
3212static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
3213  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3214  DebugLoc dl = N->getDebugLoc();
3215  SDValue Op = N->getOperand(0);
3216
3217  // This function is only supposed to be called for i64 types, either as the
3218  // source or destination of the bit convert.
3219  EVT SrcVT = Op.getValueType();
3220  EVT DstVT = N->getValueType(0);
3221  assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
3222         "ExpandBITCAST called for non-i64 type");
3223
3224  // Turn i64->f64 into VMOVDRR.
3225  if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
3226    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
3227                             DAG.getConstant(0, MVT::i32));
3228    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
3229                             DAG.getConstant(1, MVT::i32));
3230    return DAG.getNode(ISD::BITCAST, dl, DstVT,
3231                       DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
3232  }
3233
3234  // Turn f64->i64 into VMOVRRD.
3235  if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
3236    SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
3237                              DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
3238    // Merge the pieces into a single i64 value.
3239    return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
3240  }
3241
3242  return SDValue();
3243}
3244
3245/// getZeroVector - Returns a vector of specified type with all zero elements.
3246/// Zero vectors are used to represent vector negation and in those cases
3247/// will be implemented with the NEON VNEG instruction.  However, VNEG does
3248/// not support i64 elements, so sometimes the zero vectors will need to be
3249/// explicitly constructed.  Regardless, use a canonical VMOV to create the
3250/// zero vector.
3251static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
3252  assert(VT.isVector() && "Expected a vector type");
3253  // The canonical modified immediate encoding of a zero vector is....0!
3254  SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
3255  EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
3256  SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
3257  return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3258}
3259
3260/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
3261/// i32 values and take a 2 x i32 value to shift plus a shift amount.
3262SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
3263                                                SelectionDAG &DAG) const {
3264  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
3265  EVT VT = Op.getValueType();
3266  unsigned VTBits = VT.getSizeInBits();
3267  DebugLoc dl = Op.getDebugLoc();
3268  SDValue ShOpLo = Op.getOperand(0);
3269  SDValue ShOpHi = Op.getOperand(1);
3270  SDValue ShAmt  = Op.getOperand(2);
3271  SDValue ARMcc;
3272  unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
3273
3274  assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
3275
3276  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
3277                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
3278  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
3279  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
3280                                   DAG.getConstant(VTBits, MVT::i32));
3281  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
3282  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3283  SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
3284
3285  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3286  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
3287                          ARMcc, DAG, dl);
3288  SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
3289  SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
3290                           CCR, Cmp);
3291
3292  SDValue Ops[2] = { Lo, Hi };
3293  return DAG.getMergeValues(Ops, 2, dl);
3294}
3295
3296/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
3297/// i32 values and take a 2 x i32 value to shift plus a shift amount.
3298SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
3299                                               SelectionDAG &DAG) const {
3300  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
3301  EVT VT = Op.getValueType();
3302  unsigned VTBits = VT.getSizeInBits();
3303  DebugLoc dl = Op.getDebugLoc();
3304  SDValue ShOpLo = Op.getOperand(0);
3305  SDValue ShOpHi = Op.getOperand(1);
3306  SDValue ShAmt  = Op.getOperand(2);
3307  SDValue ARMcc;
3308
3309  assert(Op.getOpcode() == ISD::SHL_PARTS);
3310  SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
3311                                 DAG.getConstant(VTBits, MVT::i32), ShAmt);
3312  SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
3313  SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
3314                                   DAG.getConstant(VTBits, MVT::i32));
3315  SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
3316  SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
3317
3318  SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3319  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3320  SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
3321                          ARMcc, DAG, dl);
3322  SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
3323  SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
3324                           CCR, Cmp);
3325
3326  SDValue Ops[2] = { Lo, Hi };
3327  return DAG.getMergeValues(Ops, 2, dl);
3328}
3329
3330SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
3331                                            SelectionDAG &DAG) const {
3332  // The rounding mode is in bits 23:22 of the FPSCR.
3333  // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
3334  // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
3335  // so that the shift + and get folded into a bitfield extract.
3336  DebugLoc dl = Op.getDebugLoc();
3337  SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
3338                              DAG.getConstant(Intrinsic::arm_get_fpscr,
3339                                              MVT::i32));
3340  SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
3341                                  DAG.getConstant(1U << 22, MVT::i32));
3342  SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
3343                              DAG.getConstant(22, MVT::i32));
3344  return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
3345                     DAG.getConstant(3, MVT::i32));
3346}
3347
3348static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
3349                         const ARMSubtarget *ST) {
3350  EVT VT = N->getValueType(0);
3351  DebugLoc dl = N->getDebugLoc();
3352
3353  if (!ST->hasV6T2Ops())
3354    return SDValue();
3355
3356  SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
3357  return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
3358}
3359
3360static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
3361                          const ARMSubtarget *ST) {
3362  EVT VT = N->getValueType(0);
3363  DebugLoc dl = N->getDebugLoc();
3364
3365  if (!VT.isVector())
3366    return SDValue();
3367
3368  // Lower vector shifts on NEON to use VSHL.
3369  assert(ST->hasNEON() && "unexpected vector shift");
3370
3371  // Left shifts translate directly to the vshiftu intrinsic.
3372  if (N->getOpcode() == ISD::SHL)
3373    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3374                       DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
3375                       N->getOperand(0), N->getOperand(1));
3376
3377  assert((N->getOpcode() == ISD::SRA ||
3378          N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
3379
3380  // NEON uses the same intrinsics for both left and right shifts.  For
3381  // right shifts, the shift amounts are negative, so negate the vector of
3382  // shift amounts.
3383  EVT ShiftVT = N->getOperand(1).getValueType();
3384  SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
3385                                     getZeroVector(ShiftVT, DAG, dl),
3386                                     N->getOperand(1));
3387  Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
3388                             Intrinsic::arm_neon_vshifts :
3389                             Intrinsic::arm_neon_vshiftu);
3390  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3391                     DAG.getConstant(vshiftInt, MVT::i32),
3392                     N->getOperand(0), NegatedCount);
3393}
3394
3395static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
3396                                const ARMSubtarget *ST) {
3397  EVT VT = N->getValueType(0);
3398  DebugLoc dl = N->getDebugLoc();
3399
3400  // We can get here for a node like i32 = ISD::SHL i32, i64
3401  if (VT != MVT::i64)
3402    return SDValue();
3403
3404  assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
3405         "Unknown shift to lower!");
3406
3407  // We only lower SRA, SRL of 1 here, all others use generic lowering.
3408  if (!isa<ConstantSDNode>(N->getOperand(1)) ||
3409      cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
3410    return SDValue();
3411
3412  // If we are in thumb mode, we don't have RRX.
3413  if (ST->isThumb1Only()) return SDValue();
3414
3415  // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
3416  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3417                           DAG.getConstant(0, MVT::i32));
3418  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
3419                           DAG.getConstant(1, MVT::i32));
3420
3421  // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
3422  // captures the result into a carry flag.
3423  unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
3424  Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1);
3425
3426  // The low part is an ARMISD::RRX operand, which shifts the carry in.
3427  Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
3428
3429  // Merge the pieces into a single i64 value.
3430 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
3431}
3432
3433static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
3434  SDValue TmpOp0, TmpOp1;
3435  bool Invert = false;
3436  bool Swap = false;
3437  unsigned Opc = 0;
3438
3439  SDValue Op0 = Op.getOperand(0);
3440  SDValue Op1 = Op.getOperand(1);
3441  SDValue CC = Op.getOperand(2);
3442  EVT VT = Op.getValueType();
3443  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
3444  DebugLoc dl = Op.getDebugLoc();
3445
3446  if (Op.getOperand(1).getValueType().isFloatingPoint()) {
3447    switch (SetCCOpcode) {
3448    default: llvm_unreachable("Illegal FP comparison"); break;
3449    case ISD::SETUNE:
3450    case ISD::SETNE:  Invert = true; // Fallthrough
3451    case ISD::SETOEQ:
3452    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
3453    case ISD::SETOLT:
3454    case ISD::SETLT: Swap = true; // Fallthrough
3455    case ISD::SETOGT:
3456    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
3457    case ISD::SETOLE:
3458    case ISD::SETLE:  Swap = true; // Fallthrough
3459    case ISD::SETOGE:
3460    case ISD::SETGE: Opc = ARMISD::VCGE; break;
3461    case ISD::SETUGE: Swap = true; // Fallthrough
3462    case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
3463    case ISD::SETUGT: Swap = true; // Fallthrough
3464    case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
3465    case ISD::SETUEQ: Invert = true; // Fallthrough
3466    case ISD::SETONE:
3467      // Expand this to (OLT | OGT).
3468      TmpOp0 = Op0;
3469      TmpOp1 = Op1;
3470      Opc = ISD::OR;
3471      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3472      Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
3473      break;
3474    case ISD::SETUO: Invert = true; // Fallthrough
3475    case ISD::SETO:
3476      // Expand this to (OLT | OGE).
3477      TmpOp0 = Op0;
3478      TmpOp1 = Op1;
3479      Opc = ISD::OR;
3480      Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3481      Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
3482      break;
3483    }
3484  } else {
3485    // Integer comparisons.
3486    switch (SetCCOpcode) {
3487    default: llvm_unreachable("Illegal integer comparison"); break;
3488    case ISD::SETNE:  Invert = true;
3489    case ISD::SETEQ:  Opc = ARMISD::VCEQ; break;
3490    case ISD::SETLT:  Swap = true;
3491    case ISD::SETGT:  Opc = ARMISD::VCGT; break;
3492    case ISD::SETLE:  Swap = true;
3493    case ISD::SETGE:  Opc = ARMISD::VCGE; break;
3494    case ISD::SETULT: Swap = true;
3495    case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
3496    case ISD::SETULE: Swap = true;
3497    case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
3498    }
3499
3500    // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
3501    if (Opc == ARMISD::VCEQ) {
3502
3503      SDValue AndOp;
3504      if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3505        AndOp = Op0;
3506      else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
3507        AndOp = Op1;
3508
3509      // Ignore bitconvert.
3510      if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
3511        AndOp = AndOp.getOperand(0);
3512
3513      if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
3514        Opc = ARMISD::VTST;
3515        Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
3516        Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
3517        Invert = !Invert;
3518      }
3519    }
3520  }
3521
3522  if (Swap)
3523    std::swap(Op0, Op1);
3524
3525  // If one of the operands is a constant vector zero, attempt to fold the
3526  // comparison to a specialized compare-against-zero form.
3527  SDValue SingleOp;
3528  if (ISD::isBuildVectorAllZeros(Op1.getNode()))
3529    SingleOp = Op0;
3530  else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
3531    if (Opc == ARMISD::VCGE)
3532      Opc = ARMISD::VCLEZ;
3533    else if (Opc == ARMISD::VCGT)
3534      Opc = ARMISD::VCLTZ;
3535    SingleOp = Op1;
3536  }
3537
3538  SDValue Result;
3539  if (SingleOp.getNode()) {
3540    switch (Opc) {
3541    case ARMISD::VCEQ:
3542      Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
3543    case ARMISD::VCGE:
3544      Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
3545    case ARMISD::VCLEZ:
3546      Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
3547    case ARMISD::VCGT:
3548      Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
3549    case ARMISD::VCLTZ:
3550      Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
3551    default:
3552      Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3553    }
3554  } else {
3555     Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3556  }
3557
3558  if (Invert)
3559    Result = DAG.getNOT(dl, Result, VT);
3560
3561  return Result;
3562}
3563
3564/// isNEONModifiedImm - Check if the specified splat value corresponds to a
3565/// valid vector constant for a NEON instruction with a "modified immediate"
3566/// operand (e.g., VMOV).  If so, return the encoded value.
3567static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3568                                 unsigned SplatBitSize, SelectionDAG &DAG,
3569                                 EVT &VT, bool is128Bits, NEONModImmType type) {
3570  unsigned OpCmode, Imm;
3571
3572  // SplatBitSize is set to the smallest size that splats the vector, so a
3573  // zero vector will always have SplatBitSize == 8.  However, NEON modified
3574  // immediate instructions others than VMOV do not support the 8-bit encoding
3575  // of a zero vector, and the default encoding of zero is supposed to be the
3576  // 32-bit version.
3577  if (SplatBits == 0)
3578    SplatBitSize = 32;
3579
3580  switch (SplatBitSize) {
3581  case 8:
3582    if (type != VMOVModImm)
3583      return SDValue();
3584    // Any 1-byte value is OK.  Op=0, Cmode=1110.
3585    assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3586    OpCmode = 0xe;
3587    Imm = SplatBits;
3588    VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3589    break;
3590
3591  case 16:
3592    // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
3593    VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3594    if ((SplatBits & ~0xff) == 0) {
3595      // Value = 0x00nn: Op=x, Cmode=100x.
3596      OpCmode = 0x8;
3597      Imm = SplatBits;
3598      break;
3599    }
3600    if ((SplatBits & ~0xff00) == 0) {
3601      // Value = 0xnn00: Op=x, Cmode=101x.
3602      OpCmode = 0xa;
3603      Imm = SplatBits >> 8;
3604      break;
3605    }
3606    return SDValue();
3607
3608  case 32:
3609    // NEON's 32-bit VMOV supports splat values where:
3610    // * only one byte is nonzero, or
3611    // * the least significant byte is 0xff and the second byte is nonzero, or
3612    // * the least significant 2 bytes are 0xff and the third is nonzero.
3613    VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3614    if ((SplatBits & ~0xff) == 0) {
3615      // Value = 0x000000nn: Op=x, Cmode=000x.
3616      OpCmode = 0;
3617      Imm = SplatBits;
3618      break;
3619    }
3620    if ((SplatBits & ~0xff00) == 0) {
3621      // Value = 0x0000nn00: Op=x, Cmode=001x.
3622      OpCmode = 0x2;
3623      Imm = SplatBits >> 8;
3624      break;
3625    }
3626    if ((SplatBits & ~0xff0000) == 0) {
3627      // Value = 0x00nn0000: Op=x, Cmode=010x.
3628      OpCmode = 0x4;
3629      Imm = SplatBits >> 16;
3630      break;
3631    }
3632    if ((SplatBits & ~0xff000000) == 0) {
3633      // Value = 0xnn000000: Op=x, Cmode=011x.
3634      OpCmode = 0x6;
3635      Imm = SplatBits >> 24;
3636      break;
3637    }
3638
3639    // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
3640    if (type == OtherModImm) return SDValue();
3641
3642    if ((SplatBits & ~0xffff) == 0 &&
3643        ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3644      // Value = 0x0000nnff: Op=x, Cmode=1100.
3645      OpCmode = 0xc;
3646      Imm = SplatBits >> 8;
3647      SplatBits |= 0xff;
3648      break;
3649    }
3650
3651    if ((SplatBits & ~0xffffff) == 0 &&
3652        ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3653      // Value = 0x00nnffff: Op=x, Cmode=1101.
3654      OpCmode = 0xd;
3655      Imm = SplatBits >> 16;
3656      SplatBits |= 0xffff;
3657      break;
3658    }
3659
3660    // Note: there are a few 32-bit splat values (specifically: 00ffff00,
3661    // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
3662    // VMOV.I32.  A (very) minor optimization would be to replicate the value
3663    // and fall through here to test for a valid 64-bit splat.  But, then the
3664    // caller would also need to check and handle the change in size.
3665    return SDValue();
3666
3667  case 64: {
3668    if (type != VMOVModImm)
3669      return SDValue();
3670    // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
3671    uint64_t BitMask = 0xff;
3672    uint64_t Val = 0;
3673    unsigned ImmMask = 1;
3674    Imm = 0;
3675    for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3676      if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3677        Val |= BitMask;
3678        Imm |= ImmMask;
3679      } else if ((SplatBits & BitMask) != 0) {
3680        return SDValue();
3681      }
3682      BitMask <<= 8;
3683      ImmMask <<= 1;
3684    }
3685    // Op=1, Cmode=1110.
3686    OpCmode = 0x1e;
3687    SplatBits = Val;
3688    VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3689    break;
3690  }
3691
3692  default:
3693    llvm_unreachable("unexpected size for isNEONModifiedImm");
3694    return SDValue();
3695  }
3696
3697  unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
3698  return DAG.getTargetConstant(EncodedVal, MVT::i32);
3699}
3700
3701static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
3702                       bool &ReverseVEXT, unsigned &Imm) {
3703  unsigned NumElts = VT.getVectorNumElements();
3704  ReverseVEXT = false;
3705
3706  // Assume that the first shuffle index is not UNDEF.  Fail if it is.
3707  if (M[0] < 0)
3708    return false;
3709
3710  Imm = M[0];
3711
3712  // If this is a VEXT shuffle, the immediate value is the index of the first
3713  // element.  The other shuffle indices must be the successive elements after
3714  // the first one.
3715  unsigned ExpectedElt = Imm;
3716  for (unsigned i = 1; i < NumElts; ++i) {
3717    // Increment the expected index.  If it wraps around, it may still be
3718    // a VEXT but the source vectors must be swapped.
3719    ExpectedElt += 1;
3720    if (ExpectedElt == NumElts * 2) {
3721      ExpectedElt = 0;
3722      ReverseVEXT = true;
3723    }
3724
3725    if (M[i] < 0) continue; // ignore UNDEF indices
3726    if (ExpectedElt != static_cast<unsigned>(M[i]))
3727      return false;
3728  }
3729
3730  // Adjust the index value if the source operands will be swapped.
3731  if (ReverseVEXT)
3732    Imm -= NumElts;
3733
3734  return true;
3735}
3736
3737/// isVREVMask - Check if a vector shuffle corresponds to a VREV
3738/// instruction with the specified blocksize.  (The order of the elements
3739/// within each block of the vector is reversed.)
3740static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
3741                       unsigned BlockSize) {
3742  assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
3743         "Only possible block sizes for VREV are: 16, 32, 64");
3744
3745  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3746  if (EltSz == 64)
3747    return false;
3748
3749  unsigned NumElts = VT.getVectorNumElements();
3750  unsigned BlockElts = M[0] + 1;
3751  // If the first shuffle index is UNDEF, be optimistic.
3752  if (M[0] < 0)
3753    BlockElts = BlockSize / EltSz;
3754
3755  if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
3756    return false;
3757
3758  for (unsigned i = 0; i < NumElts; ++i) {
3759    if (M[i] < 0) continue; // ignore UNDEF indices
3760    if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
3761      return false;
3762  }
3763
3764  return true;
3765}
3766
3767static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) {
3768  // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
3769  // range, then 0 is placed into the resulting vector. So pretty much any mask
3770  // of 8 elements can work here.
3771  return VT == MVT::v8i8 && M.size() == 8;
3772}
3773
3774static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
3775                       unsigned &WhichResult) {
3776  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3777  if (EltSz == 64)
3778    return false;
3779
3780  unsigned NumElts = VT.getVectorNumElements();
3781  WhichResult = (M[0] == 0 ? 0 : 1);
3782  for (unsigned i = 0; i < NumElts; i += 2) {
3783    if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3784        (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult))
3785      return false;
3786  }
3787  return true;
3788}
3789
3790/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
3791/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3792/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
3793static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3794                                unsigned &WhichResult) {
3795  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3796  if (EltSz == 64)
3797    return false;
3798
3799  unsigned NumElts = VT.getVectorNumElements();
3800  WhichResult = (M[0] == 0 ? 0 : 1);
3801  for (unsigned i = 0; i < NumElts; i += 2) {
3802    if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) ||
3803        (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult))
3804      return false;
3805  }
3806  return true;
3807}
3808
3809static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
3810                       unsigned &WhichResult) {
3811  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3812  if (EltSz == 64)
3813    return false;
3814
3815  unsigned NumElts = VT.getVectorNumElements();
3816  WhichResult = (M[0] == 0 ? 0 : 1);
3817  for (unsigned i = 0; i != NumElts; ++i) {
3818    if (M[i] < 0) continue; // ignore UNDEF indices
3819    if ((unsigned) M[i] != 2 * i + WhichResult)
3820      return false;
3821  }
3822
3823  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3824  if (VT.is64BitVector() && EltSz == 32)
3825    return false;
3826
3827  return true;
3828}
3829
3830/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
3831/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3832/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
3833static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3834                                unsigned &WhichResult) {
3835  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3836  if (EltSz == 64)
3837    return false;
3838
3839  unsigned Half = VT.getVectorNumElements() / 2;
3840  WhichResult = (M[0] == 0 ? 0 : 1);
3841  for (unsigned j = 0; j != 2; ++j) {
3842    unsigned Idx = WhichResult;
3843    for (unsigned i = 0; i != Half; ++i) {
3844      int MIdx = M[i + j * Half];
3845      if (MIdx >= 0 && (unsigned) MIdx != Idx)
3846        return false;
3847      Idx += 2;
3848    }
3849  }
3850
3851  // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3852  if (VT.is64BitVector() && EltSz == 32)
3853    return false;
3854
3855  return true;
3856}
3857
3858static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
3859                       unsigned &WhichResult) {
3860  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3861  if (EltSz == 64)
3862    return false;
3863
3864  unsigned NumElts = VT.getVectorNumElements();
3865  WhichResult = (M[0] == 0 ? 0 : 1);
3866  unsigned Idx = WhichResult * NumElts / 2;
3867  for (unsigned i = 0; i != NumElts; i += 2) {
3868    if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3869        (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts))
3870      return false;
3871    Idx += 1;
3872  }
3873
3874  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3875  if (VT.is64BitVector() && EltSz == 32)
3876    return false;
3877
3878  return true;
3879}
3880
3881/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
3882/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
3883/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
3884static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
3885                                unsigned &WhichResult) {
3886  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3887  if (EltSz == 64)
3888    return false;
3889
3890  unsigned NumElts = VT.getVectorNumElements();
3891  WhichResult = (M[0] == 0 ? 0 : 1);
3892  unsigned Idx = WhichResult * NumElts / 2;
3893  for (unsigned i = 0; i != NumElts; i += 2) {
3894    if ((M[i] >= 0 && (unsigned) M[i] != Idx) ||
3895        (M[i+1] >= 0 && (unsigned) M[i+1] != Idx))
3896      return false;
3897    Idx += 1;
3898  }
3899
3900  // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
3901  if (VT.is64BitVector() && EltSz == 32)
3902    return false;
3903
3904  return true;
3905}
3906
3907// If N is an integer constant that can be moved into a register in one
3908// instruction, return an SDValue of such a constant (will become a MOV
3909// instruction).  Otherwise return null.
3910static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
3911                                     const ARMSubtarget *ST, DebugLoc dl) {
3912  uint64_t Val;
3913  if (!isa<ConstantSDNode>(N))
3914    return SDValue();
3915  Val = cast<ConstantSDNode>(N)->getZExtValue();
3916
3917  if (ST->isThumb1Only()) {
3918    if (Val <= 255 || ~Val <= 255)
3919      return DAG.getConstant(Val, MVT::i32);
3920  } else {
3921    if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
3922      return DAG.getConstant(Val, MVT::i32);
3923  }
3924  return SDValue();
3925}
3926
3927// If this is a case we can't handle, return null and let the default
3928// expansion code take care of it.
3929SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3930                                             const ARMSubtarget *ST) const {
3931  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3932  DebugLoc dl = Op.getDebugLoc();
3933  EVT VT = Op.getValueType();
3934
3935  APInt SplatBits, SplatUndef;
3936  unsigned SplatBitSize;
3937  bool HasAnyUndefs;
3938  if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3939    if (SplatBitSize <= 64) {
3940      // Check if an immediate VMOV works.
3941      EVT VmovVT;
3942      SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
3943                                      SplatUndef.getZExtValue(), SplatBitSize,
3944                                      DAG, VmovVT, VT.is128BitVector(),
3945                                      VMOVModImm);
3946      if (Val.getNode()) {
3947        SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
3948        return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3949      }
3950
3951      // Try an immediate VMVN.
3952      uint64_t NegatedImm = (~SplatBits).getZExtValue();
3953      Val = isNEONModifiedImm(NegatedImm,
3954                                      SplatUndef.getZExtValue(), SplatBitSize,
3955                                      DAG, VmovVT, VT.is128BitVector(),
3956                                      VMVNModImm);
3957      if (Val.getNode()) {
3958        SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
3959        return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3960      }
3961    }
3962  }
3963
3964  // Scan through the operands to see if only one value is used.
3965  unsigned NumElts = VT.getVectorNumElements();
3966  bool isOnlyLowElement = true;
3967  bool usesOnlyOneValue = true;
3968  bool isConstant = true;
3969  SDValue Value;
3970  for (unsigned i = 0; i < NumElts; ++i) {
3971    SDValue V = Op.getOperand(i);
3972    if (V.getOpcode() == ISD::UNDEF)
3973      continue;
3974    if (i > 0)
3975      isOnlyLowElement = false;
3976    if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
3977      isConstant = false;
3978
3979    if (!Value.getNode())
3980      Value = V;
3981    else if (V != Value)
3982      usesOnlyOneValue = false;
3983  }
3984
3985  if (!Value.getNode())
3986    return DAG.getUNDEF(VT);
3987
3988  if (isOnlyLowElement)
3989    return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
3990
3991  unsigned EltSize = VT.getVectorElementType().getSizeInBits();
3992
3993  // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
3994  // i32 and try again.
3995  if (usesOnlyOneValue && EltSize <= 32) {
3996    if (!isConstant)
3997      return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
3998    if (VT.getVectorElementType().isFloatingPoint()) {
3999      SmallVector<SDValue, 8> Ops;
4000      for (unsigned i = 0; i < NumElts; ++i)
4001        Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
4002                                  Op.getOperand(i)));
4003      EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
4004      SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
4005      Val = LowerBUILD_VECTOR(Val, DAG, ST);
4006      if (Val.getNode())
4007        return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4008    }
4009    SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
4010    if (Val.getNode())
4011      return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
4012  }
4013
4014  // If all elements are constants and the case above didn't get hit, fall back
4015  // to the default expansion, which will generate a load from the constant
4016  // pool.
4017  if (isConstant)
4018    return SDValue();
4019
4020  // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
4021  if (NumElts >= 4) {
4022    SDValue shuffle = ReconstructShuffle(Op, DAG);
4023    if (shuffle != SDValue())
4024      return shuffle;
4025  }
4026
4027  // Vectors with 32- or 64-bit elements can be built by directly assigning
4028  // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
4029  // will be legalized.
4030  if (EltSize >= 32) {
4031    // Do the expansion with floating-point types, since that is what the VFP
4032    // registers are defined to use, and since i64 is not legal.
4033    EVT EltVT = EVT::getFloatingPointVT(EltSize);
4034    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
4035    SmallVector<SDValue, 8> Ops;
4036    for (unsigned i = 0; i < NumElts; ++i)
4037      Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
4038    SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
4039    return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4040  }
4041
4042  return SDValue();
4043}
4044
4045// Gather data to see if the operation can be modelled as a
4046// shuffle in combination with VEXTs.
4047SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
4048                                              SelectionDAG &DAG) const {
4049  DebugLoc dl = Op.getDebugLoc();
4050  EVT VT = Op.getValueType();
4051  unsigned NumElts = VT.getVectorNumElements();
4052
4053  SmallVector<SDValue, 2> SourceVecs;
4054  SmallVector<unsigned, 2> MinElts;
4055  SmallVector<unsigned, 2> MaxElts;
4056
4057  for (unsigned i = 0; i < NumElts; ++i) {
4058    SDValue V = Op.getOperand(i);
4059    if (V.getOpcode() == ISD::UNDEF)
4060      continue;
4061    else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
4062      // A shuffle can only come from building a vector from various
4063      // elements of other vectors.
4064      return SDValue();
4065    } else if (V.getOperand(0).getValueType().getVectorElementType() !=
4066               VT.getVectorElementType()) {
4067      // This code doesn't know how to handle shuffles where the vector
4068      // element types do not match (this happens because type legalization
4069      // promotes the return type of EXTRACT_VECTOR_ELT).
4070      // FIXME: It might be appropriate to extend this code to handle
4071      // mismatched types.
4072      return SDValue();
4073    }
4074
4075    // Record this extraction against the appropriate vector if possible...
4076    SDValue SourceVec = V.getOperand(0);
4077    unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
4078    bool FoundSource = false;
4079    for (unsigned j = 0; j < SourceVecs.size(); ++j) {
4080      if (SourceVecs[j] == SourceVec) {
4081        if (MinElts[j] > EltNo)
4082          MinElts[j] = EltNo;
4083        if (MaxElts[j] < EltNo)
4084          MaxElts[j] = EltNo;
4085        FoundSource = true;
4086        break;
4087      }
4088    }
4089
4090    // Or record a new source if not...
4091    if (!FoundSource) {
4092      SourceVecs.push_back(SourceVec);
4093      MinElts.push_back(EltNo);
4094      MaxElts.push_back(EltNo);
4095    }
4096  }
4097
4098  // Currently only do something sane when at most two source vectors
4099  // involved.
4100  if (SourceVecs.size() > 2)
4101    return SDValue();
4102
4103  SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
4104  int VEXTOffsets[2] = {0, 0};
4105
4106  // This loop extracts the usage patterns of the source vectors
4107  // and prepares appropriate SDValues for a shuffle if possible.
4108  for (unsigned i = 0; i < SourceVecs.size(); ++i) {
4109    if (SourceVecs[i].getValueType() == VT) {
4110      // No VEXT necessary
4111      ShuffleSrcs[i] = SourceVecs[i];
4112      VEXTOffsets[i] = 0;
4113      continue;
4114    } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) {
4115      // It probably isn't worth padding out a smaller vector just to
4116      // break it down again in a shuffle.
4117      return SDValue();
4118    }
4119
4120    // Since only 64-bit and 128-bit vectors are legal on ARM and
4121    // we've eliminated the other cases...
4122    assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts &&
4123           "unexpected vector sizes in ReconstructShuffle");
4124
4125    if (MaxElts[i] - MinElts[i] >= NumElts) {
4126      // Span too large for a VEXT to cope
4127      return SDValue();
4128    }
4129
4130    if (MinElts[i] >= NumElts) {
4131      // The extraction can just take the second half
4132      VEXTOffsets[i] = NumElts;
4133      ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4134                                   SourceVecs[i],
4135                                   DAG.getIntPtrConstant(NumElts));
4136    } else if (MaxElts[i] < NumElts) {
4137      // The extraction can just take the first half
4138      VEXTOffsets[i] = 0;
4139      ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4140                                   SourceVecs[i],
4141                                   DAG.getIntPtrConstant(0));
4142    } else {
4143      // An actual VEXT is needed
4144      VEXTOffsets[i] = MinElts[i];
4145      SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4146                                     SourceVecs[i],
4147                                     DAG.getIntPtrConstant(0));
4148      SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4149                                     SourceVecs[i],
4150                                     DAG.getIntPtrConstant(NumElts));
4151      ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
4152                                   DAG.getConstant(VEXTOffsets[i], MVT::i32));
4153    }
4154  }
4155
4156  SmallVector<int, 8> Mask;
4157
4158  for (unsigned i = 0; i < NumElts; ++i) {
4159    SDValue Entry = Op.getOperand(i);
4160    if (Entry.getOpcode() == ISD::UNDEF) {
4161      Mask.push_back(-1);
4162      continue;
4163    }
4164
4165    SDValue ExtractVec = Entry.getOperand(0);
4166    int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i)
4167                                          .getOperand(1))->getSExtValue();
4168    if (ExtractVec == SourceVecs[0]) {
4169      Mask.push_back(ExtractElt - VEXTOffsets[0]);
4170    } else {
4171      Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]);
4172    }
4173  }
4174
4175  // Final check before we try to produce nonsense...
4176  if (isShuffleMaskLegal(Mask, VT))
4177    return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
4178                                &Mask[0]);
4179
4180  return SDValue();
4181}
4182
4183/// isShuffleMaskLegal - Targets can use this to indicate that they only
4184/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
4185/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
4186/// are assumed to be legal.
4187bool
4188ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
4189                                      EVT VT) const {
4190  if (VT.getVectorNumElements() == 4 &&
4191      (VT.is128BitVector() || VT.is64BitVector())) {
4192    unsigned PFIndexes[4];
4193    for (unsigned i = 0; i != 4; ++i) {
4194      if (M[i] < 0)
4195        PFIndexes[i] = 8;
4196      else
4197        PFIndexes[i] = M[i];
4198    }
4199
4200    // Compute the index in the perfect shuffle table.
4201    unsigned PFTableIndex =
4202      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
4203    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
4204    unsigned Cost = (PFEntry >> 30);
4205
4206    if (Cost <= 4)
4207      return true;
4208  }
4209
4210  bool ReverseVEXT;
4211  unsigned Imm, WhichResult;
4212
4213  unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4214  return (EltSize >= 32 ||
4215          ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
4216          isVREVMask(M, VT, 64) ||
4217          isVREVMask(M, VT, 32) ||
4218          isVREVMask(M, VT, 16) ||
4219          isVEXTMask(M, VT, ReverseVEXT, Imm) ||
4220          isVTBLMask(M, VT) ||
4221          isVTRNMask(M, VT, WhichResult) ||
4222          isVUZPMask(M, VT, WhichResult) ||
4223          isVZIPMask(M, VT, WhichResult) ||
4224          isVTRN_v_undef_Mask(M, VT, WhichResult) ||
4225          isVUZP_v_undef_Mask(M, VT, WhichResult) ||
4226          isVZIP_v_undef_Mask(M, VT, WhichResult));
4227}
4228
4229/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
4230/// the specified operations to build the shuffle.
4231static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
4232                                      SDValue RHS, SelectionDAG &DAG,
4233                                      DebugLoc dl) {
4234  unsigned OpNum = (PFEntry >> 26) & 0x0F;
4235  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
4236  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
4237
4238  enum {
4239    OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
4240    OP_VREV,
4241    OP_VDUP0,
4242    OP_VDUP1,
4243    OP_VDUP2,
4244    OP_VDUP3,
4245    OP_VEXT1,
4246    OP_VEXT2,
4247    OP_VEXT3,
4248    OP_VUZPL, // VUZP, left result
4249    OP_VUZPR, // VUZP, right result
4250    OP_VZIPL, // VZIP, left result
4251    OP_VZIPR, // VZIP, right result
4252    OP_VTRNL, // VTRN, left result
4253    OP_VTRNR  // VTRN, right result
4254  };
4255
4256  if (OpNum == OP_COPY) {
4257    if (LHSID == (1*9+2)*9+3) return LHS;
4258    assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
4259    return RHS;
4260  }
4261
4262  SDValue OpLHS, OpRHS;
4263  OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
4264  OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
4265  EVT VT = OpLHS.getValueType();
4266
4267  switch (OpNum) {
4268  default: llvm_unreachable("Unknown shuffle opcode!");
4269  case OP_VREV:
4270    // VREV divides the vector in half and swaps within the half.
4271    if (VT.getVectorElementType() == MVT::i32 ||
4272        VT.getVectorElementType() == MVT::f32)
4273      return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
4274    // vrev <4 x i16> -> VREV32
4275    if (VT.getVectorElementType() == MVT::i16)
4276      return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
4277    // vrev <4 x i8> -> VREV16
4278    assert(VT.getVectorElementType() == MVT::i8);
4279    return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
4280  case OP_VDUP0:
4281  case OP_VDUP1:
4282  case OP_VDUP2:
4283  case OP_VDUP3:
4284    return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
4285                       OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
4286  case OP_VEXT1:
4287  case OP_VEXT2:
4288  case OP_VEXT3:
4289    return DAG.getNode(ARMISD::VEXT, dl, VT,
4290                       OpLHS, OpRHS,
4291                       DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
4292  case OP_VUZPL:
4293  case OP_VUZPR:
4294    return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4295                       OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
4296  case OP_VZIPL:
4297  case OP_VZIPR:
4298    return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4299                       OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
4300  case OP_VTRNL:
4301  case OP_VTRNR:
4302    return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4303                       OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
4304  }
4305}
4306
4307static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
4308                                       SmallVectorImpl<int> &ShuffleMask,
4309                                       SelectionDAG &DAG) {
4310  // Check to see if we can use the VTBL instruction.
4311  SDValue V1 = Op.getOperand(0);
4312  SDValue V2 = Op.getOperand(1);
4313  DebugLoc DL = Op.getDebugLoc();
4314
4315  SmallVector<SDValue, 8> VTBLMask;
4316  for (SmallVectorImpl<int>::iterator
4317         I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
4318    VTBLMask.push_back(DAG.getConstant(*I, MVT::i32));
4319
4320  if (V2.getNode()->getOpcode() == ISD::UNDEF)
4321    return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
4322                       DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
4323                                   &VTBLMask[0], 8));
4324
4325  return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
4326                     DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8,
4327                                 &VTBLMask[0], 8));
4328}
4329
4330static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
4331  SDValue V1 = Op.getOperand(0);
4332  SDValue V2 = Op.getOperand(1);
4333  DebugLoc dl = Op.getDebugLoc();
4334  EVT VT = Op.getValueType();
4335  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4336  SmallVector<int, 8> ShuffleMask;
4337
4338  // Convert shuffles that are directly supported on NEON to target-specific
4339  // DAG nodes, instead of keeping them as shuffles and matching them again
4340  // during code selection.  This is more efficient and avoids the possibility
4341  // of inconsistencies between legalization and selection.
4342  // FIXME: floating-point vectors should be canonicalized to integer vectors
4343  // of the same time so that they get CSEd properly.
4344  SVN->getMask(ShuffleMask);
4345
4346  unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4347  if (EltSize <= 32) {
4348    if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
4349      int Lane = SVN->getSplatIndex();
4350      // If this is undef splat, generate it via "just" vdup, if possible.
4351      if (Lane == -1) Lane = 0;
4352
4353      if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4354        return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
4355      }
4356      return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
4357                         DAG.getConstant(Lane, MVT::i32));
4358    }
4359
4360    bool ReverseVEXT;
4361    unsigned Imm;
4362    if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
4363      if (ReverseVEXT)
4364        std::swap(V1, V2);
4365      return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
4366                         DAG.getConstant(Imm, MVT::i32));
4367    }
4368
4369    if (isVREVMask(ShuffleMask, VT, 64))
4370      return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
4371    if (isVREVMask(ShuffleMask, VT, 32))
4372      return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
4373    if (isVREVMask(ShuffleMask, VT, 16))
4374      return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
4375
4376    // Check for Neon shuffles that modify both input vectors in place.
4377    // If both results are used, i.e., if there are two shuffles with the same
4378    // source operands and with masks corresponding to both results of one of
4379    // these operations, DAG memoization will ensure that a single node is
4380    // used for both shuffles.
4381    unsigned WhichResult;
4382    if (isVTRNMask(ShuffleMask, VT, WhichResult))
4383      return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4384                         V1, V2).getValue(WhichResult);
4385    if (isVUZPMask(ShuffleMask, VT, WhichResult))
4386      return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4387                         V1, V2).getValue(WhichResult);
4388    if (isVZIPMask(ShuffleMask, VT, WhichResult))
4389      return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4390                         V1, V2).getValue(WhichResult);
4391
4392    if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
4393      return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4394                         V1, V1).getValue(WhichResult);
4395    if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4396      return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4397                         V1, V1).getValue(WhichResult);
4398    if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4399      return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4400                         V1, V1).getValue(WhichResult);
4401  }
4402
4403  // If the shuffle is not directly supported and it has 4 elements, use
4404  // the PerfectShuffle-generated table to synthesize it from other shuffles.
4405  unsigned NumElts = VT.getVectorNumElements();
4406  if (NumElts == 4) {
4407    unsigned PFIndexes[4];
4408    for (unsigned i = 0; i != 4; ++i) {
4409      if (ShuffleMask[i] < 0)
4410        PFIndexes[i] = 8;
4411      else
4412        PFIndexes[i] = ShuffleMask[i];
4413    }
4414
4415    // Compute the index in the perfect shuffle table.
4416    unsigned PFTableIndex =
4417      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
4418    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
4419    unsigned Cost = (PFEntry >> 30);
4420
4421    if (Cost <= 4)
4422      return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
4423  }
4424
4425  // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
4426  if (EltSize >= 32) {
4427    // Do the expansion with floating-point types, since that is what the VFP
4428    // registers are defined to use, and since i64 is not legal.
4429    EVT EltVT = EVT::getFloatingPointVT(EltSize);
4430    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
4431    V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
4432    V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
4433    SmallVector<SDValue, 8> Ops;
4434    for (unsigned i = 0; i < NumElts; ++i) {
4435      if (ShuffleMask[i] < 0)
4436        Ops.push_back(DAG.getUNDEF(EltVT));
4437      else
4438        Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
4439                                  ShuffleMask[i] < (int)NumElts ? V1 : V2,
4440                                  DAG.getConstant(ShuffleMask[i] & (NumElts-1),
4441                                                  MVT::i32)));
4442    }
4443    SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
4444    return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4445  }
4446
4447  if (VT == MVT::v8i8) {
4448    SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG);
4449    if (NewOp.getNode())
4450      return NewOp;
4451  }
4452
4453  return SDValue();
4454}
4455
4456static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
4457  // INSERT_VECTOR_ELT is legal only for immediate indexes.
4458  SDValue Lane = Op.getOperand(2);
4459  if (!isa<ConstantSDNode>(Lane))
4460    return SDValue();
4461
4462  return Op;
4463}
4464
4465static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
4466  // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
4467  SDValue Lane = Op.getOperand(1);
4468  if (!isa<ConstantSDNode>(Lane))
4469    return SDValue();
4470
4471  SDValue Vec = Op.getOperand(0);
4472  if (Op.getValueType() == MVT::i32 &&
4473      Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
4474    DebugLoc dl = Op.getDebugLoc();
4475    return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
4476  }
4477
4478  return Op;
4479}
4480
4481static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
4482  // The only time a CONCAT_VECTORS operation can have legal types is when
4483  // two 64-bit vectors are concatenated to a 128-bit vector.
4484  assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
4485         "unexpected CONCAT_VECTORS");
4486  DebugLoc dl = Op.getDebugLoc();
4487  SDValue Val = DAG.getUNDEF(MVT::v2f64);
4488  SDValue Op0 = Op.getOperand(0);
4489  SDValue Op1 = Op.getOperand(1);
4490  if (Op0.getOpcode() != ISD::UNDEF)
4491    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4492                      DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
4493                      DAG.getIntPtrConstant(0));
4494  if (Op1.getOpcode() != ISD::UNDEF)
4495    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
4496                      DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
4497                      DAG.getIntPtrConstant(1));
4498  return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
4499}
4500
4501/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
4502/// element has been zero/sign-extended, depending on the isSigned parameter,
4503/// from an integer type half its size.
4504static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
4505                                   bool isSigned) {
4506  // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
4507  EVT VT = N->getValueType(0);
4508  if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
4509    SDNode *BVN = N->getOperand(0).getNode();
4510    if (BVN->getValueType(0) != MVT::v4i32 ||
4511        BVN->getOpcode() != ISD::BUILD_VECTOR)
4512      return false;
4513    unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4514    unsigned HiElt = 1 - LoElt;
4515    ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
4516    ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
4517    ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
4518    ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
4519    if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
4520      return false;
4521    if (isSigned) {
4522      if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
4523          Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
4524        return true;
4525    } else {
4526      if (Hi0->isNullValue() && Hi1->isNullValue())
4527        return true;
4528    }
4529    return false;
4530  }
4531
4532  if (N->getOpcode() != ISD::BUILD_VECTOR)
4533    return false;
4534
4535  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
4536    SDNode *Elt = N->getOperand(i).getNode();
4537    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
4538      unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4539      unsigned HalfSize = EltSize / 2;
4540      if (isSigned) {
4541        if (!isIntN(HalfSize, C->getSExtValue()))
4542          return false;
4543      } else {
4544        if (!isUIntN(HalfSize, C->getZExtValue()))
4545          return false;
4546      }
4547      continue;
4548    }
4549    return false;
4550  }
4551
4552  return true;
4553}
4554
4555/// isSignExtended - Check if a node is a vector value that is sign-extended
4556/// or a constant BUILD_VECTOR with sign-extended elements.
4557static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
4558  if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
4559    return true;
4560  if (isExtendedBUILD_VECTOR(N, DAG, true))
4561    return true;
4562  return false;
4563}
4564
4565/// isZeroExtended - Check if a node is a vector value that is zero-extended
4566/// or a constant BUILD_VECTOR with zero-extended elements.
4567static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
4568  if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
4569    return true;
4570  if (isExtendedBUILD_VECTOR(N, DAG, false))
4571    return true;
4572  return false;
4573}
4574
4575/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending
4576/// load, or BUILD_VECTOR with extended elements, return the unextended value.
4577static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
4578  if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
4579    return N->getOperand(0);
4580  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
4581    return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
4582                       LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
4583                       LD->isNonTemporal(), LD->getAlignment());
4584  // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
4585  // have been legalized as a BITCAST from v4i32.
4586  if (N->getOpcode() == ISD::BITCAST) {
4587    SDNode *BVN = N->getOperand(0).getNode();
4588    assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
4589           BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
4590    unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0;
4591    return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32,
4592                       BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
4593  }
4594  // Construct a new BUILD_VECTOR with elements truncated to half the size.
4595  assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
4596  EVT VT = N->getValueType(0);
4597  unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
4598  unsigned NumElts = VT.getVectorNumElements();
4599  MVT TruncVT = MVT::getIntegerVT(EltSize);
4600  SmallVector<SDValue, 8> Ops;
4601  for (unsigned i = 0; i != NumElts; ++i) {
4602    ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
4603    const APInt &CInt = C->getAPIntValue();
4604    Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT));
4605  }
4606  return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
4607                     MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts);
4608}
4609
4610static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
4611  unsigned Opcode = N->getOpcode();
4612  if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4613    SDNode *N0 = N->getOperand(0).getNode();
4614    SDNode *N1 = N->getOperand(1).getNode();
4615    return N0->hasOneUse() && N1->hasOneUse() &&
4616      isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
4617  }
4618  return false;
4619}
4620
4621static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
4622  unsigned Opcode = N->getOpcode();
4623  if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
4624    SDNode *N0 = N->getOperand(0).getNode();
4625    SDNode *N1 = N->getOperand(1).getNode();
4626    return N0->hasOneUse() && N1->hasOneUse() &&
4627      isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
4628  }
4629  return false;
4630}
4631
4632static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
4633  // Multiplications are only custom-lowered for 128-bit vectors so that
4634  // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
4635  EVT VT = Op.getValueType();
4636  assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL");
4637  SDNode *N0 = Op.getOperand(0).getNode();
4638  SDNode *N1 = Op.getOperand(1).getNode();
4639  unsigned NewOpc = 0;
4640  bool isMLA = false;
4641  bool isN0SExt = isSignExtended(N0, DAG);
4642  bool isN1SExt = isSignExtended(N1, DAG);
4643  if (isN0SExt && isN1SExt)
4644    NewOpc = ARMISD::VMULLs;
4645  else {
4646    bool isN0ZExt = isZeroExtended(N0, DAG);
4647    bool isN1ZExt = isZeroExtended(N1, DAG);
4648    if (isN0ZExt && isN1ZExt)
4649      NewOpc = ARMISD::VMULLu;
4650    else if (isN1SExt || isN1ZExt) {
4651      // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
4652      // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
4653      if (isN1SExt && isAddSubSExt(N0, DAG)) {
4654        NewOpc = ARMISD::VMULLs;
4655        isMLA = true;
4656      } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
4657        NewOpc = ARMISD::VMULLu;
4658        isMLA = true;
4659      } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
4660        std::swap(N0, N1);
4661        NewOpc = ARMISD::VMULLu;
4662        isMLA = true;
4663      }
4664    }
4665
4666    if (!NewOpc) {
4667      if (VT == MVT::v2i64)
4668        // Fall through to expand this.  It is not legal.
4669        return SDValue();
4670      else
4671        // Other vector multiplications are legal.
4672        return Op;
4673    }
4674  }
4675
4676  // Legalize to a VMULL instruction.
4677  DebugLoc DL = Op.getDebugLoc();
4678  SDValue Op0;
4679  SDValue Op1 = SkipExtension(N1, DAG);
4680  if (!isMLA) {
4681    Op0 = SkipExtension(N0, DAG);
4682    assert(Op0.getValueType().is64BitVector() &&
4683           Op1.getValueType().is64BitVector() &&
4684           "unexpected types for extended operands to VMULL");
4685    return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4686  }
4687
4688  // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
4689  // isel lowering to take advantage of no-stall back to back vmul + vmla.
4690  //   vmull q0, d4, d6
4691  //   vmlal q0, d5, d6
4692  // is faster than
4693  //   vaddl q0, d4, d5
4694  //   vmovl q1, d6
4695  //   vmul  q0, q0, q1
4696  SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG);
4697  SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG);
4698  EVT Op1VT = Op1.getValueType();
4699  return DAG.getNode(N0->getOpcode(), DL, VT,
4700                     DAG.getNode(NewOpc, DL, VT,
4701                               DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
4702                     DAG.getNode(NewOpc, DL, VT,
4703                               DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
4704}
4705
4706static SDValue
4707LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) {
4708  // Convert to float
4709  // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
4710  // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
4711  X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
4712  Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
4713  X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
4714  Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
4715  // Get reciprocal estimate.
4716  // float4 recip = vrecpeq_f32(yf);
4717  Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4718                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y);
4719  // Because char has a smaller range than uchar, we can actually get away
4720  // without any newton steps.  This requires that we use a weird bias
4721  // of 0xb000, however (again, this has been exhaustively tested).
4722  // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
4723  X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
4724  X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
4725  Y = DAG.getConstant(0xb000, MVT::i32);
4726  Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y);
4727  X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
4728  X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
4729  // Convert back to short.
4730  X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
4731  X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
4732  return X;
4733}
4734
4735static SDValue
4736LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) {
4737  SDValue N2;
4738  // Convert to float.
4739  // float4 yf = vcvt_f32_s32(vmovl_s16(y));
4740  // float4 xf = vcvt_f32_s32(vmovl_s16(x));
4741  N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
4742  N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
4743  N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
4744  N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
4745
4746  // Use reciprocal estimate and one refinement step.
4747  // float4 recip = vrecpeq_f32(yf);
4748  // recip *= vrecpsq_f32(yf, recip);
4749  N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4750                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1);
4751  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4752                   DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4753                   N1, N2);
4754  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4755  // Because short has a smaller range than ushort, we can actually get away
4756  // with only a single newton step.  This requires that we use a weird bias
4757  // of 89, however (again, this has been exhaustively tested).
4758  // float4 result = as_float4(as_int4(xf*recip) + 0x89);
4759  N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
4760  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
4761  N1 = DAG.getConstant(0x89, MVT::i32);
4762  N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
4763  N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
4764  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
4765  // Convert back to integer and return.
4766  // return vmovn_s32(vcvt_s32_f32(result));
4767  N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
4768  N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
4769  return N0;
4770}
4771
4772static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
4773  EVT VT = Op.getValueType();
4774  assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
4775         "unexpected type for custom-lowering ISD::SDIV");
4776
4777  DebugLoc dl = Op.getDebugLoc();
4778  SDValue N0 = Op.getOperand(0);
4779  SDValue N1 = Op.getOperand(1);
4780  SDValue N2, N3;
4781
4782  if (VT == MVT::v8i8) {
4783    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
4784    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
4785
4786    N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4787                     DAG.getIntPtrConstant(4));
4788    N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4789                     DAG.getIntPtrConstant(4));
4790    N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4791                     DAG.getIntPtrConstant(0));
4792    N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4793                     DAG.getIntPtrConstant(0));
4794
4795    N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
4796    N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
4797
4798    N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
4799    N0 = LowerCONCAT_VECTORS(N0, DAG);
4800
4801    N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
4802    return N0;
4803  }
4804  return LowerSDIV_v4i16(N0, N1, dl, DAG);
4805}
4806
4807static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
4808  EVT VT = Op.getValueType();
4809  assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
4810         "unexpected type for custom-lowering ISD::UDIV");
4811
4812  DebugLoc dl = Op.getDebugLoc();
4813  SDValue N0 = Op.getOperand(0);
4814  SDValue N1 = Op.getOperand(1);
4815  SDValue N2, N3;
4816
4817  if (VT == MVT::v8i8) {
4818    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
4819    N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
4820
4821    N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4822                     DAG.getIntPtrConstant(4));
4823    N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4824                     DAG.getIntPtrConstant(4));
4825    N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
4826                     DAG.getIntPtrConstant(0));
4827    N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
4828                     DAG.getIntPtrConstant(0));
4829
4830    N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
4831    N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
4832
4833    N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
4834    N0 = LowerCONCAT_VECTORS(N0, DAG);
4835
4836    N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
4837                     DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32),
4838                     N0);
4839    return N0;
4840  }
4841
4842  // v4i16 sdiv ... Convert to float.
4843  // float4 yf = vcvt_f32_s32(vmovl_u16(y));
4844  // float4 xf = vcvt_f32_s32(vmovl_u16(x));
4845  N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
4846  N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
4847  N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
4848  SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
4849
4850  // Use reciprocal estimate and two refinement steps.
4851  // float4 recip = vrecpeq_f32(yf);
4852  // recip *= vrecpsq_f32(yf, recip);
4853  // recip *= vrecpsq_f32(yf, recip);
4854  N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4855                   DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1);
4856  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4857                   DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4858                   BN1, N2);
4859  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4860  N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
4861                   DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
4862                   BN1, N2);
4863  N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
4864  // Simply multiplying by the reciprocal estimate can leave us a few ulps
4865  // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
4866  // and that it will never cause us to return an answer too large).
4867  // float4 result = as_float4(as_int4(xf*recip) + 2);
4868  N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
4869  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
4870  N1 = DAG.getConstant(2, MVT::i32);
4871  N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
4872  N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
4873  N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
4874  // Convert back to integer and return.
4875  // return vmovn_u32(vcvt_s32_f32(result));
4876  N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
4877  N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
4878  return N0;
4879}
4880
4881static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
4882  EVT VT = Op.getNode()->getValueType(0);
4883  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4884
4885  unsigned Opc;
4886  bool ExtraOp = false;
4887  switch (Op.getOpcode()) {
4888  default: assert(0 && "Invalid code");
4889  case ISD::ADDC: Opc = ARMISD::ADDC; break;
4890  case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
4891  case ISD::SUBC: Opc = ARMISD::SUBC; break;
4892  case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
4893  }
4894
4895  if (!ExtraOp)
4896    return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
4897                       Op.getOperand(1));
4898  return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
4899                     Op.getOperand(1), Op.getOperand(2));
4900}
4901
4902static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
4903  // Monotonic load/store is legal for all targets
4904  if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
4905    return Op;
4906
4907  // Aquire/Release load/store is not legal for targets without a
4908  // dmb or equivalent available.
4909  return SDValue();
4910}
4911
4912
4913static void
4914ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results,
4915                    SelectionDAG &DAG, unsigned NewOp) {
4916  DebugLoc dl = Node->getDebugLoc();
4917  assert (Node->getValueType(0) == MVT::i64 &&
4918          "Only know how to expand i64 atomics");
4919
4920  SmallVector<SDValue, 6> Ops;
4921  Ops.push_back(Node->getOperand(0)); // Chain
4922  Ops.push_back(Node->getOperand(1)); // Ptr
4923  // Low part of Val1
4924  Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4925                            Node->getOperand(2), DAG.getIntPtrConstant(0)));
4926  // High part of Val1
4927  Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4928                            Node->getOperand(2), DAG.getIntPtrConstant(1)));
4929  if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) {
4930    // High part of Val1
4931    Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4932                              Node->getOperand(3), DAG.getIntPtrConstant(0)));
4933    // High part of Val2
4934    Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
4935                              Node->getOperand(3), DAG.getIntPtrConstant(1)));
4936  }
4937  SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4938  SDValue Result =
4939    DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64,
4940                            cast<MemSDNode>(Node)->getMemOperand());
4941  SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) };
4942  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
4943  Results.push_back(Result.getValue(2));
4944}
4945
4946SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4947  switch (Op.getOpcode()) {
4948  default: llvm_unreachable("Don't know how to custom lower this!");
4949  case ISD::ConstantPool:  return LowerConstantPool(Op, DAG);
4950  case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
4951  case ISD::GlobalAddress:
4952    return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
4953      LowerGlobalAddressELF(Op, DAG);
4954  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
4955  case ISD::SELECT:        return LowerSELECT(Op, DAG);
4956  case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
4957  case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
4958  case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
4959  case ISD::VASTART:       return LowerVASTART(Op, DAG);
4960  case ISD::MEMBARRIER:    return LowerMEMBARRIER(Op, DAG, Subtarget);
4961  case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
4962  case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
4963  case ISD::SINT_TO_FP:
4964  case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
4965  case ISD::FP_TO_SINT:
4966  case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
4967  case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
4968  case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
4969  case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
4970  case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
4971  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
4972  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
4973  case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
4974  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
4975                                                               Subtarget);
4976  case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG);
4977  case ISD::SHL:
4978  case ISD::SRL:
4979  case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
4980  case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
4981  case ISD::SRL_PARTS:
4982  case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
4983  case ISD::CTTZ:          return LowerCTTZ(Op.getNode(), DAG, Subtarget);
4984  case ISD::SETCC:         return LowerVSETCC(Op, DAG);
4985  case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
4986  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
4987  case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
4988  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
4989  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
4990  case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
4991  case ISD::MUL:           return LowerMUL(Op, DAG);
4992  case ISD::SDIV:          return LowerSDIV(Op, DAG);
4993  case ISD::UDIV:          return LowerUDIV(Op, DAG);
4994  case ISD::ADDC:
4995  case ISD::ADDE:
4996  case ISD::SUBC:
4997  case ISD::SUBE:          return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
4998  case ISD::ATOMIC_LOAD:
4999  case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
5000  }
5001  return SDValue();
5002}
5003
5004/// ReplaceNodeResults - Replace the results of node with an illegal result
5005/// type with new values built out of custom code.
5006void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
5007                                           SmallVectorImpl<SDValue>&Results,
5008                                           SelectionDAG &DAG) const {
5009  SDValue Res;
5010  switch (N->getOpcode()) {
5011  default:
5012    llvm_unreachable("Don't know how to custom expand this!");
5013    break;
5014  case ISD::BITCAST:
5015    Res = ExpandBITCAST(N, DAG);
5016    break;
5017  case ISD::SRL:
5018  case ISD::SRA:
5019    Res = Expand64BitShift(N, DAG, Subtarget);
5020    break;
5021  case ISD::ATOMIC_LOAD_ADD:
5022    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG);
5023    return;
5024  case ISD::ATOMIC_LOAD_AND:
5025    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG);
5026    return;
5027  case ISD::ATOMIC_LOAD_NAND:
5028    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG);
5029    return;
5030  case ISD::ATOMIC_LOAD_OR:
5031    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG);
5032    return;
5033  case ISD::ATOMIC_LOAD_SUB:
5034    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG);
5035    return;
5036  case ISD::ATOMIC_LOAD_XOR:
5037    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG);
5038    return;
5039  case ISD::ATOMIC_SWAP:
5040    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG);
5041    return;
5042  case ISD::ATOMIC_CMP_SWAP:
5043    ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG);
5044    return;
5045  }
5046  if (Res.getNode())
5047    Results.push_back(Res);
5048}
5049
5050//===----------------------------------------------------------------------===//
5051//                           ARM Scheduler Hooks
5052//===----------------------------------------------------------------------===//
5053
5054MachineBasicBlock *
5055ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
5056                                     MachineBasicBlock *BB,
5057                                     unsigned Size) const {
5058  unsigned dest    = MI->getOperand(0).getReg();
5059  unsigned ptr     = MI->getOperand(1).getReg();
5060  unsigned oldval  = MI->getOperand(2).getReg();
5061  unsigned newval  = MI->getOperand(3).getReg();
5062  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5063  DebugLoc dl = MI->getDebugLoc();
5064  bool isThumb2 = Subtarget->isThumb2();
5065
5066  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5067  unsigned scratch =
5068    MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass
5069                                       : ARM::GPRRegisterClass);
5070
5071  if (isThumb2) {
5072    MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5073    MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass);
5074    MRI.constrainRegClass(newval, ARM::rGPRRegisterClass);
5075  }
5076
5077  unsigned ldrOpc, strOpc;
5078  switch (Size) {
5079  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5080  case 1:
5081    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5082    strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5083    break;
5084  case 2:
5085    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5086    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5087    break;
5088  case 4:
5089    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5090    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5091    break;
5092  }
5093
5094  MachineFunction *MF = BB->getParent();
5095  const BasicBlock *LLVM_BB = BB->getBasicBlock();
5096  MachineFunction::iterator It = BB;
5097  ++It; // insert the new blocks after the current block
5098
5099  MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
5100  MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
5101  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5102  MF->insert(It, loop1MBB);
5103  MF->insert(It, loop2MBB);
5104  MF->insert(It, exitMBB);
5105
5106  // Transfer the remainder of BB and its successor edges to exitMBB.
5107  exitMBB->splice(exitMBB->begin(), BB,
5108                  llvm::next(MachineBasicBlock::iterator(MI)),
5109                  BB->end());
5110  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5111
5112  //  thisMBB:
5113  //   ...
5114  //   fallthrough --> loop1MBB
5115  BB->addSuccessor(loop1MBB);
5116
5117  // loop1MBB:
5118  //   ldrex dest, [ptr]
5119  //   cmp dest, oldval
5120  //   bne exitMBB
5121  BB = loop1MBB;
5122  MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5123  if (ldrOpc == ARM::t2LDREX)
5124    MIB.addImm(0);
5125  AddDefaultPred(MIB);
5126  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
5127                 .addReg(dest).addReg(oldval));
5128  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5129    .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5130  BB->addSuccessor(loop2MBB);
5131  BB->addSuccessor(exitMBB);
5132
5133  // loop2MBB:
5134  //   strex scratch, newval, [ptr]
5135  //   cmp scratch, #0
5136  //   bne loop1MBB
5137  BB = loop2MBB;
5138  MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr);
5139  if (strOpc == ARM::t2STREX)
5140    MIB.addImm(0);
5141  AddDefaultPred(MIB);
5142  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5143                 .addReg(scratch).addImm(0));
5144  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5145    .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5146  BB->addSuccessor(loop1MBB);
5147  BB->addSuccessor(exitMBB);
5148
5149  //  exitMBB:
5150  //   ...
5151  BB = exitMBB;
5152
5153  MI->eraseFromParent();   // The instruction is gone now.
5154
5155  return BB;
5156}
5157
5158MachineBasicBlock *
5159ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
5160                                    unsigned Size, unsigned BinOpcode) const {
5161  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
5162  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5163
5164  const BasicBlock *LLVM_BB = BB->getBasicBlock();
5165  MachineFunction *MF = BB->getParent();
5166  MachineFunction::iterator It = BB;
5167  ++It;
5168
5169  unsigned dest = MI->getOperand(0).getReg();
5170  unsigned ptr = MI->getOperand(1).getReg();
5171  unsigned incr = MI->getOperand(2).getReg();
5172  DebugLoc dl = MI->getDebugLoc();
5173  bool isThumb2 = Subtarget->isThumb2();
5174
5175  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5176  if (isThumb2) {
5177    MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5178    MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5179  }
5180
5181  unsigned ldrOpc, strOpc;
5182  switch (Size) {
5183  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5184  case 1:
5185    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5186    strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5187    break;
5188  case 2:
5189    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5190    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5191    break;
5192  case 4:
5193    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5194    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5195    break;
5196  }
5197
5198  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5199  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5200  MF->insert(It, loopMBB);
5201  MF->insert(It, exitMBB);
5202
5203  // Transfer the remainder of BB and its successor edges to exitMBB.
5204  exitMBB->splice(exitMBB->begin(), BB,
5205                  llvm::next(MachineBasicBlock::iterator(MI)),
5206                  BB->end());
5207  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5208
5209  TargetRegisterClass *TRC =
5210    isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5211  unsigned scratch = MRI.createVirtualRegister(TRC);
5212  unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
5213
5214  //  thisMBB:
5215  //   ...
5216  //   fallthrough --> loopMBB
5217  BB->addSuccessor(loopMBB);
5218
5219  //  loopMBB:
5220  //   ldrex dest, ptr
5221  //   <binop> scratch2, dest, incr
5222  //   strex scratch, scratch2, ptr
5223  //   cmp scratch, #0
5224  //   bne- loopMBB
5225  //   fallthrough --> exitMBB
5226  BB = loopMBB;
5227  MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5228  if (ldrOpc == ARM::t2LDREX)
5229    MIB.addImm(0);
5230  AddDefaultPred(MIB);
5231  if (BinOpcode) {
5232    // operand order needs to go the other way for NAND
5233    if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
5234      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
5235                     addReg(incr).addReg(dest)).addReg(0);
5236    else
5237      AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
5238                     addReg(dest).addReg(incr)).addReg(0);
5239  }
5240
5241  MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
5242  if (strOpc == ARM::t2STREX)
5243    MIB.addImm(0);
5244  AddDefaultPred(MIB);
5245  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5246                 .addReg(scratch).addImm(0));
5247  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5248    .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5249
5250  BB->addSuccessor(loopMBB);
5251  BB->addSuccessor(exitMBB);
5252
5253  //  exitMBB:
5254  //   ...
5255  BB = exitMBB;
5256
5257  MI->eraseFromParent();   // The instruction is gone now.
5258
5259  return BB;
5260}
5261
5262MachineBasicBlock *
5263ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
5264                                          MachineBasicBlock *BB,
5265                                          unsigned Size,
5266                                          bool signExtend,
5267                                          ARMCC::CondCodes Cond) const {
5268  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5269
5270  const BasicBlock *LLVM_BB = BB->getBasicBlock();
5271  MachineFunction *MF = BB->getParent();
5272  MachineFunction::iterator It = BB;
5273  ++It;
5274
5275  unsigned dest = MI->getOperand(0).getReg();
5276  unsigned ptr = MI->getOperand(1).getReg();
5277  unsigned incr = MI->getOperand(2).getReg();
5278  unsigned oldval = dest;
5279  DebugLoc dl = MI->getDebugLoc();
5280  bool isThumb2 = Subtarget->isThumb2();
5281
5282  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5283  if (isThumb2) {
5284    MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
5285    MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5286  }
5287
5288  unsigned ldrOpc, strOpc, extendOpc;
5289  switch (Size) {
5290  default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
5291  case 1:
5292    ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
5293    strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
5294    extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
5295    break;
5296  case 2:
5297    ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
5298    strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
5299    extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
5300    break;
5301  case 4:
5302    ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
5303    strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
5304    extendOpc = 0;
5305    break;
5306  }
5307
5308  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5309  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5310  MF->insert(It, loopMBB);
5311  MF->insert(It, exitMBB);
5312
5313  // Transfer the remainder of BB and its successor edges to exitMBB.
5314  exitMBB->splice(exitMBB->begin(), BB,
5315                  llvm::next(MachineBasicBlock::iterator(MI)),
5316                  BB->end());
5317  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5318
5319  TargetRegisterClass *TRC =
5320    isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5321  unsigned scratch = MRI.createVirtualRegister(TRC);
5322  unsigned scratch2 = MRI.createVirtualRegister(TRC);
5323
5324  //  thisMBB:
5325  //   ...
5326  //   fallthrough --> loopMBB
5327  BB->addSuccessor(loopMBB);
5328
5329  //  loopMBB:
5330  //   ldrex dest, ptr
5331  //   (sign extend dest, if required)
5332  //   cmp dest, incr
5333  //   cmov.cond scratch2, dest, incr
5334  //   strex scratch, scratch2, ptr
5335  //   cmp scratch, #0
5336  //   bne- loopMBB
5337  //   fallthrough --> exitMBB
5338  BB = loopMBB;
5339  MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
5340  if (ldrOpc == ARM::t2LDREX)
5341    MIB.addImm(0);
5342  AddDefaultPred(MIB);
5343
5344  // Sign extend the value, if necessary.
5345  if (signExtend && extendOpc) {
5346    oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass);
5347    AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval)
5348                     .addReg(dest)
5349                     .addImm(0));
5350  }
5351
5352  // Build compare and cmov instructions.
5353  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
5354                 .addReg(oldval).addReg(incr));
5355  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2)
5356         .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR);
5357
5358  MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr);
5359  if (strOpc == ARM::t2STREX)
5360    MIB.addImm(0);
5361  AddDefaultPred(MIB);
5362  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5363                 .addReg(scratch).addImm(0));
5364  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5365    .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5366
5367  BB->addSuccessor(loopMBB);
5368  BB->addSuccessor(exitMBB);
5369
5370  //  exitMBB:
5371  //   ...
5372  BB = exitMBB;
5373
5374  MI->eraseFromParent();   // The instruction is gone now.
5375
5376  return BB;
5377}
5378
5379MachineBasicBlock *
5380ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
5381                                      unsigned Op1, unsigned Op2,
5382                                      bool NeedsCarry, bool IsCmpxchg) const {
5383  // This also handles ATOMIC_SWAP, indicated by Op1==0.
5384  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5385
5386  const BasicBlock *LLVM_BB = BB->getBasicBlock();
5387  MachineFunction *MF = BB->getParent();
5388  MachineFunction::iterator It = BB;
5389  ++It;
5390
5391  unsigned destlo = MI->getOperand(0).getReg();
5392  unsigned desthi = MI->getOperand(1).getReg();
5393  unsigned ptr = MI->getOperand(2).getReg();
5394  unsigned vallo = MI->getOperand(3).getReg();
5395  unsigned valhi = MI->getOperand(4).getReg();
5396  DebugLoc dl = MI->getDebugLoc();
5397  bool isThumb2 = Subtarget->isThumb2();
5398
5399  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
5400  if (isThumb2) {
5401    MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass);
5402    MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass);
5403    MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
5404  }
5405
5406  unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD;
5407  unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD;
5408
5409  MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5410  MachineBasicBlock *contBB = 0, *cont2BB = 0;
5411  if (IsCmpxchg) {
5412    contBB = MF->CreateMachineBasicBlock(LLVM_BB);
5413    cont2BB = MF->CreateMachineBasicBlock(LLVM_BB);
5414  }
5415  MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
5416  MF->insert(It, loopMBB);
5417  if (IsCmpxchg) {
5418    MF->insert(It, contBB);
5419    MF->insert(It, cont2BB);
5420  }
5421  MF->insert(It, exitMBB);
5422
5423  // Transfer the remainder of BB and its successor edges to exitMBB.
5424  exitMBB->splice(exitMBB->begin(), BB,
5425                  llvm::next(MachineBasicBlock::iterator(MI)),
5426                  BB->end());
5427  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5428
5429  TargetRegisterClass *TRC =
5430    isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5431  unsigned storesuccess = MRI.createVirtualRegister(TRC);
5432
5433  //  thisMBB:
5434  //   ...
5435  //   fallthrough --> loopMBB
5436  BB->addSuccessor(loopMBB);
5437
5438  //  loopMBB:
5439  //   ldrexd r2, r3, ptr
5440  //   <binopa> r0, r2, incr
5441  //   <binopb> r1, r3, incr
5442  //   strexd storesuccess, r0, r1, ptr
5443  //   cmp storesuccess, #0
5444  //   bne- loopMBB
5445  //   fallthrough --> exitMBB
5446  //
5447  // Note that the registers are explicitly specified because there is not any
5448  // way to force the register allocator to allocate a register pair.
5449  //
5450  // FIXME: The hardcoded registers are not necessary for Thumb2, but we
5451  // need to properly enforce the restriction that the two output registers
5452  // for ldrexd must be different.
5453  BB = loopMBB;
5454  // Load
5455  AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
5456                 .addReg(ARM::R2, RegState::Define)
5457                 .addReg(ARM::R3, RegState::Define).addReg(ptr));
5458  // Copy r2/r3 into dest.  (This copy will normally be coalesced.)
5459  BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2);
5460  BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3);
5461
5462  if (IsCmpxchg) {
5463    // Add early exit
5464    for (unsigned i = 0; i < 2; i++) {
5465      AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr :
5466                                                         ARM::CMPrr))
5467                     .addReg(i == 0 ? destlo : desthi)
5468                     .addReg(i == 0 ? vallo : valhi));
5469      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5470        .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5471      BB->addSuccessor(exitMBB);
5472      BB->addSuccessor(i == 0 ? contBB : cont2BB);
5473      BB = (i == 0 ? contBB : cont2BB);
5474    }
5475
5476    // Copy to physregs for strexd
5477    unsigned setlo = MI->getOperand(5).getReg();
5478    unsigned sethi = MI->getOperand(6).getReg();
5479    BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo);
5480    BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi);
5481  } else if (Op1) {
5482    // Perform binary operation
5483    AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0)
5484                   .addReg(destlo).addReg(vallo))
5485        .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry));
5486    AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1)
5487                   .addReg(desthi).addReg(valhi)).addReg(0);
5488  } else {
5489    // Copy to physregs for strexd
5490    BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo);
5491    BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi);
5492  }
5493
5494  // Store
5495  AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess)
5496                 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr));
5497  // Cmp+jump
5498  AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
5499                 .addReg(storesuccess).addImm(0));
5500  BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
5501    .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
5502
5503  BB->addSuccessor(loopMBB);
5504  BB->addSuccessor(exitMBB);
5505
5506  //  exitMBB:
5507  //   ...
5508  BB = exitMBB;
5509
5510  MI->eraseFromParent();   // The instruction is gone now.
5511
5512  return BB;
5513}
5514
5515/// EmitBasePointerRecalculation - For functions using a base pointer, we
5516/// rematerialize it (via the frame pointer).
5517void ARMTargetLowering::
5518EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
5519                             MachineBasicBlock *DispatchBB) const {
5520  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5521  const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
5522  MachineFunction &MF = *MI->getParent()->getParent();
5523  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
5524  const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
5525
5526  if (!RI.hasBasePointer(MF)) return;
5527
5528  MachineBasicBlock::iterator MBBI = MI;
5529
5530  int32_t NumBytes = AFI->getFramePtrSpillOffset();
5531  unsigned FramePtr = RI.getFrameRegister(MF);
5532  assert(MF.getTarget().getFrameLowering()->hasFP(MF) &&
5533         "Base pointer without frame pointer?");
5534
5535  if (AFI->isThumb2Function())
5536    llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5537                                 FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
5538  else if (AFI->isThumbFunction())
5539    llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5540                                    FramePtr, -NumBytes, *AII, RI);
5541  else
5542    llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
5543                                  FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
5544
5545  if (!RI.needsStackRealignment(MF)) return;
5546
5547  // If there's dynamic realignment, adjust for it.
5548  MachineFrameInfo *MFI = MF.getFrameInfo();
5549  unsigned MaxAlign = MFI->getMaxAlignment();
5550  assert(!AFI->isThumb1OnlyFunction());
5551
5552  // Emit bic r6, r6, MaxAlign
5553  unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri;
5554  AddDefaultCC(
5555    AddDefaultPred(
5556      BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6)
5557      .addReg(ARM::R6, RegState::Kill)
5558      .addImm(MaxAlign - 1)));
5559}
5560
5561/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
5562/// registers the function context.
5563void ARMTargetLowering::
5564SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
5565                       MachineBasicBlock *DispatchBB, int FI) const {
5566  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5567  DebugLoc dl = MI->getDebugLoc();
5568  MachineFunction *MF = MBB->getParent();
5569  MachineRegisterInfo *MRI = &MF->getRegInfo();
5570  MachineConstantPool *MCP = MF->getConstantPool();
5571  ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
5572  const Function *F = MF->getFunction();
5573
5574  bool isThumb = Subtarget->isThumb();
5575  bool isThumb2 = Subtarget->isThumb2();
5576
5577  unsigned PCLabelId = AFI->createPICLabelUId();
5578  unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
5579  ARMConstantPoolValue *CPV =
5580    ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
5581  unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
5582
5583  const TargetRegisterClass *TRC =
5584    isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5585
5586  // Grab constant pool and fixed stack memory operands.
5587  MachineMemOperand *CPMMO =
5588    MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(),
5589                             MachineMemOperand::MOLoad, 4, 4);
5590
5591  MachineMemOperand *FIMMOSt =
5592    MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
5593                             MachineMemOperand::MOStore, 4, 4);
5594
5595  EmitBasePointerRecalculation(MI, MBB, DispatchBB);
5596
5597  // Load the address of the dispatch MBB into the jump buffer.
5598  if (isThumb2) {
5599    // Incoming value: jbuf
5600    //   ldr.n  r5, LCPI1_1
5601    //   orr    r5, r5, #1
5602    //   add    r5, pc
5603    //   str    r5, [$jbuf, #+4] ; &jbuf[1]
5604    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5605    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
5606                   .addConstantPoolIndex(CPI)
5607                   .addMemOperand(CPMMO));
5608    // Set the low bit because of thumb mode.
5609    unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5610    AddDefaultCC(
5611      AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
5612                     .addReg(NewVReg1, RegState::Kill)
5613                     .addImm(0x01)));
5614    unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5615    BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
5616      .addReg(NewVReg2, RegState::Kill)
5617      .addImm(PCLabelId);
5618    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
5619                   .addReg(NewVReg3, RegState::Kill)
5620                   .addFrameIndex(FI)
5621                   .addImm(36)  // &jbuf[1] :: pc
5622                   .addMemOperand(FIMMOSt));
5623  } else if (isThumb) {
5624    // Incoming value: jbuf
5625    //   ldr.n  r1, LCPI1_4
5626    //   add    r1, pc
5627    //   mov    r2, #1
5628    //   orrs   r1, r2
5629    //   add    r2, $jbuf, #+4 ; &jbuf[1]
5630    //   str    r1, [r2]
5631    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5632    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
5633                   .addConstantPoolIndex(CPI)
5634                   .addMemOperand(CPMMO));
5635    unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5636    BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
5637      .addReg(NewVReg1, RegState::Kill)
5638      .addImm(PCLabelId);
5639    // Set the low bit because of thumb mode.
5640    unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5641    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
5642                   .addReg(ARM::CPSR, RegState::Define)
5643                   .addImm(1));
5644    unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5645    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
5646                   .addReg(ARM::CPSR, RegState::Define)
5647                   .addReg(NewVReg2, RegState::Kill)
5648                   .addReg(NewVReg3, RegState::Kill));
5649    unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
5650    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5)
5651                   .addFrameIndex(FI)
5652                   .addImm(36)); // &jbuf[1] :: pc
5653    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
5654                   .addReg(NewVReg4, RegState::Kill)
5655                   .addReg(NewVReg5, RegState::Kill)
5656                   .addImm(0)
5657                   .addMemOperand(FIMMOSt));
5658  } else {
5659    // Incoming value: jbuf
5660    //   ldr  r1, LCPI1_1
5661    //   add  r1, pc, r1
5662    //   str  r1, [$jbuf, #+4] ; &jbuf[1]
5663    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5664    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12),  NewVReg1)
5665                   .addConstantPoolIndex(CPI)
5666                   .addImm(0)
5667                   .addMemOperand(CPMMO));
5668    unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5669    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
5670                   .addReg(NewVReg1, RegState::Kill)
5671                   .addImm(PCLabelId));
5672    AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
5673                   .addReg(NewVReg2, RegState::Kill)
5674                   .addFrameIndex(FI)
5675                   .addImm(36)  // &jbuf[1] :: pc
5676                   .addMemOperand(FIMMOSt));
5677  }
5678}
5679
5680MachineBasicBlock *ARMTargetLowering::
5681EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
5682  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5683  DebugLoc dl = MI->getDebugLoc();
5684  MachineFunction *MF = MBB->getParent();
5685  MachineRegisterInfo *MRI = &MF->getRegInfo();
5686  ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
5687  MachineFrameInfo *MFI = MF->getFrameInfo();
5688  int FI = MFI->getFunctionContextIndex();
5689
5690  const TargetRegisterClass *TRC =
5691    Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
5692
5693  // Get a mapping of the call site numbers to all of the landing pads they're
5694  // associated with.
5695  DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad;
5696  unsigned MaxCSNum = 0;
5697  MachineModuleInfo &MMI = MF->getMMI();
5698  for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) {
5699    if (!BB->isLandingPad()) continue;
5700
5701    // FIXME: We should assert that the EH_LABEL is the first MI in the landing
5702    // pad.
5703    for (MachineBasicBlock::iterator
5704           II = BB->begin(), IE = BB->end(); II != IE; ++II) {
5705      if (!II->isEHLabel()) continue;
5706
5707      MCSymbol *Sym = II->getOperand(0).getMCSymbol();
5708      if (!MMI.hasCallSiteLandingPad(Sym)) continue;
5709
5710      SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym);
5711      for (SmallVectorImpl<unsigned>::iterator
5712             CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
5713           CSI != CSE; ++CSI) {
5714        CallSiteNumToLPad[*CSI].push_back(BB);
5715        MaxCSNum = std::max(MaxCSNum, *CSI);
5716      }
5717      break;
5718    }
5719  }
5720
5721  // Get an ordered list of the machine basic blocks for the jump table.
5722  std::vector<MachineBasicBlock*> LPadList;
5723  SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs;
5724  LPadList.reserve(CallSiteNumToLPad.size());
5725  for (unsigned I = 1; I <= MaxCSNum; ++I) {
5726    SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
5727    for (SmallVectorImpl<MachineBasicBlock*>::iterator
5728           II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
5729      LPadList.push_back(*II);
5730      InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
5731    }
5732  }
5733
5734  assert(!LPadList.empty() &&
5735         "No landing pad destinations for the dispatch jump table!");
5736
5737  // Create the jump table and associated information.
5738  MachineJumpTableInfo *JTI =
5739    MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
5740  unsigned MJTI = JTI->createJumpTableIndex(LPadList);
5741  unsigned UId = AFI->createJumpTableUId();
5742
5743  // Create the MBBs for the dispatch code.
5744
5745  // Shove the dispatch's address into the return slot in the function context.
5746  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
5747  DispatchBB->setIsLandingPad();
5748
5749  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
5750  BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));
5751  DispatchBB->addSuccessor(TrapBB);
5752
5753  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
5754  DispatchBB->addSuccessor(DispContBB);
5755
5756  // Insert and MBBs.
5757  MF->insert(MF->end(), DispatchBB);
5758  MF->insert(MF->end(), DispContBB);
5759  MF->insert(MF->end(), TrapBB);
5760
5761  // Insert code into the entry block that creates and registers the function
5762  // context.
5763  SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
5764
5765  MachineMemOperand *FIMMOLd =
5766    MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
5767                             MachineMemOperand::MOLoad |
5768                             MachineMemOperand::MOVolatile, 4, 4);
5769
5770  unsigned NumLPads = LPadList.size();
5771  if (Subtarget->isThumb2()) {
5772    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5773    AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
5774                   .addFrameIndex(FI)
5775                   .addImm(4)
5776                   .addMemOperand(FIMMOLd));
5777
5778    if (NumLPads < 256) {
5779      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
5780                     .addReg(NewVReg1)
5781                     .addImm(LPadList.size()));
5782    } else {
5783      unsigned VReg1 = MRI->createVirtualRegister(TRC);
5784      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
5785                     .addImm(NumLPads & 0xFFFF));
5786
5787      unsigned VReg2 = VReg1;
5788      if ((NumLPads & 0xFFFF0000) != 0) {
5789        VReg2 = MRI->createVirtualRegister(TRC);
5790        AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
5791                       .addReg(VReg1)
5792                       .addImm(NumLPads >> 16));
5793      }
5794
5795      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
5796                     .addReg(NewVReg1)
5797                     .addReg(VReg2));
5798    }
5799
5800    BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
5801      .addMBB(TrapBB)
5802      .addImm(ARMCC::HI)
5803      .addReg(ARM::CPSR);
5804
5805    unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5806    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3)
5807                   .addJumpTableIndex(MJTI)
5808                   .addImm(UId));
5809
5810    unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5811    AddDefaultCC(
5812      AddDefaultPred(
5813        BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
5814        .addReg(NewVReg3, RegState::Kill)
5815        .addReg(NewVReg1)
5816        .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
5817
5818    BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
5819      .addReg(NewVReg4, RegState::Kill)
5820      .addReg(NewVReg1)
5821      .addJumpTableIndex(MJTI)
5822      .addImm(UId);
5823  } else if (Subtarget->isThumb()) {
5824    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5825    AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
5826                   .addFrameIndex(FI)
5827                   .addImm(1)
5828                   .addMemOperand(FIMMOLd));
5829
5830    if (NumLPads < 256) {
5831      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
5832                     .addReg(NewVReg1)
5833                     .addImm(NumLPads));
5834    } else {
5835      MachineConstantPool *ConstantPool = MF->getConstantPool();
5836      Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
5837      const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
5838
5839      // MachineConstantPool wants an explicit alignment.
5840      unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
5841      if (Align == 0)
5842        Align = getTargetData()->getTypeAllocSize(C->getType());
5843      unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
5844
5845      unsigned VReg1 = MRI->createVirtualRegister(TRC);
5846      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
5847                     .addReg(VReg1, RegState::Define)
5848                     .addConstantPoolIndex(Idx));
5849      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
5850                     .addReg(NewVReg1)
5851                     .addReg(VReg1));
5852    }
5853
5854    BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
5855      .addMBB(TrapBB)
5856      .addImm(ARMCC::HI)
5857      .addReg(ARM::CPSR);
5858
5859    unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
5860    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
5861                   .addReg(ARM::CPSR, RegState::Define)
5862                   .addReg(NewVReg1)
5863                   .addImm(2));
5864
5865    unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5866    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
5867                   .addJumpTableIndex(MJTI)
5868                   .addImm(UId));
5869
5870    unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5871    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
5872                   .addReg(ARM::CPSR, RegState::Define)
5873                   .addReg(NewVReg2, RegState::Kill)
5874                   .addReg(NewVReg3));
5875
5876    MachineMemOperand *JTMMOLd =
5877      MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(),
5878                               MachineMemOperand::MOLoad, 4, 4);
5879
5880    unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
5881    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
5882                   .addReg(NewVReg4, RegState::Kill)
5883                   .addImm(0)
5884                   .addMemOperand(JTMMOLd));
5885
5886    unsigned NewVReg6 = MRI->createVirtualRegister(TRC);
5887    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
5888                   .addReg(ARM::CPSR, RegState::Define)
5889                   .addReg(NewVReg5, RegState::Kill)
5890                   .addReg(NewVReg3));
5891
5892    BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
5893      .addReg(NewVReg6, RegState::Kill)
5894      .addJumpTableIndex(MJTI)
5895      .addImm(UId);
5896  } else {
5897    unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
5898    AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
5899                   .addFrameIndex(FI)
5900                   .addImm(4)
5901                   .addMemOperand(FIMMOLd));
5902
5903    if (NumLPads < 256) {
5904      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
5905                     .addReg(NewVReg1)
5906                     .addImm(NumLPads));
5907    } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
5908      unsigned VReg1 = MRI->createVirtualRegister(TRC);
5909      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
5910                     .addImm(NumLPads & 0xFFFF));
5911
5912      unsigned VReg2 = VReg1;
5913      if ((NumLPads & 0xFFFF0000) != 0) {
5914        VReg2 = MRI->createVirtualRegister(TRC);
5915        AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
5916                       .addReg(VReg1)
5917                       .addImm(NumLPads >> 16));
5918      }
5919
5920      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
5921                     .addReg(NewVReg1)
5922                     .addReg(VReg2));
5923    } else {
5924      MachineConstantPool *ConstantPool = MF->getConstantPool();
5925      Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
5926      const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
5927
5928      // MachineConstantPool wants an explicit alignment.
5929      unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
5930      if (Align == 0)
5931        Align = getTargetData()->getTypeAllocSize(C->getType());
5932      unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
5933
5934      unsigned VReg1 = MRI->createVirtualRegister(TRC);
5935      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
5936                     .addReg(VReg1, RegState::Define)
5937                     .addConstantPoolIndex(Idx)
5938                     .addImm(0));
5939      AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
5940                     .addReg(NewVReg1)
5941                     .addReg(VReg1, RegState::Kill));
5942    }
5943
5944    BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
5945      .addMBB(TrapBB)
5946      .addImm(ARMCC::HI)
5947      .addReg(ARM::CPSR);
5948
5949    unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
5950    AddDefaultCC(
5951      AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
5952                     .addReg(NewVReg1)
5953                     .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
5954    unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
5955    AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
5956                   .addJumpTableIndex(MJTI)
5957                   .addImm(UId));
5958
5959    MachineMemOperand *JTMMOLd =
5960      MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(),
5961                               MachineMemOperand::MOLoad, 4, 4);
5962    unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
5963    AddDefaultPred(
5964      BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
5965      .addReg(NewVReg3, RegState::Kill)
5966      .addReg(NewVReg4)
5967      .addImm(0)
5968      .addMemOperand(JTMMOLd));
5969
5970    BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
5971      .addReg(NewVReg5, RegState::Kill)
5972      .addReg(NewVReg4)
5973      .addJumpTableIndex(MJTI)
5974      .addImm(UId);
5975  }
5976
5977  // Add the jump table entries as successors to the MBB.
5978  MachineBasicBlock *PrevMBB = 0;
5979  for (std::vector<MachineBasicBlock*>::iterator
5980         I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
5981    MachineBasicBlock *CurMBB = *I;
5982    if (PrevMBB != CurMBB)
5983      DispContBB->addSuccessor(CurMBB);
5984    PrevMBB = CurMBB;
5985  }
5986
5987  // N.B. the order the invoke BBs are processed in doesn't matter here.
5988  const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
5989  const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
5990  const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF);
5991  SmallVector<MachineBasicBlock*, 64> MBBLPads;
5992  for (SmallPtrSet<MachineBasicBlock*, 64>::iterator
5993         I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) {
5994    MachineBasicBlock *BB = *I;
5995
5996    // Remove the landing pad successor from the invoke block and replace it
5997    // with the new dispatch block.
5998    SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
5999                                                  BB->succ_end());
6000    while (!Successors.empty()) {
6001      MachineBasicBlock *SMBB = Successors.pop_back_val();
6002      if (SMBB->isLandingPad()) {
6003        BB->removeSuccessor(SMBB);
6004        MBBLPads.push_back(SMBB);
6005      }
6006    }
6007
6008    BB->addSuccessor(DispatchBB);
6009
6010    // Find the invoke call and mark all of the callee-saved registers as
6011    // 'implicit defined' so that they're spilled. This prevents code from
6012    // moving instructions to before the EH block, where they will never be
6013    // executed.
6014    for (MachineBasicBlock::reverse_iterator
6015           II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
6016      if (!II->getDesc().isCall()) continue;
6017
6018      DenseMap<unsigned, bool> DefRegs;
6019      for (MachineInstr::mop_iterator
6020             OI = II->operands_begin(), OE = II->operands_end();
6021           OI != OE; ++OI) {
6022        if (!OI->isReg()) continue;
6023        DefRegs[OI->getReg()] = true;
6024      }
6025
6026      MachineInstrBuilder MIB(&*II);
6027
6028      for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
6029        unsigned Reg = SavedRegs[i];
6030        if (Subtarget->isThumb2() &&
6031            !ARM::tGPRRegisterClass->contains(Reg) &&
6032            !ARM::hGPRRegisterClass->contains(Reg))
6033          continue;
6034        else if (Subtarget->isThumb1Only() &&
6035                 !ARM::tGPRRegisterClass->contains(Reg))
6036          continue;
6037        else if (!Subtarget->isThumb() &&
6038                 !ARM::GPRRegisterClass->contains(Reg))
6039          continue;
6040        if (!DefRegs[Reg])
6041          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
6042      }
6043
6044      break;
6045    }
6046  }
6047
6048  // Mark all former landing pads as non-landing pads. The dispatch is the only
6049  // landing pad now.
6050  for (SmallVectorImpl<MachineBasicBlock*>::iterator
6051         I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
6052    (*I)->setIsLandingPad(false);
6053
6054  // The instruction is gone now.
6055  MI->eraseFromParent();
6056
6057  return MBB;
6058}
6059
6060static
6061MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
6062  for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
6063       E = MBB->succ_end(); I != E; ++I)
6064    if (*I != Succ)
6065      return *I;
6066  llvm_unreachable("Expecting a BB with two successors!");
6067}
6068
6069MachineBasicBlock *
6070ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
6071                                               MachineBasicBlock *BB) const {
6072  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
6073  DebugLoc dl = MI->getDebugLoc();
6074  bool isThumb2 = Subtarget->isThumb2();
6075  switch (MI->getOpcode()) {
6076  default: {
6077    MI->dump();
6078    llvm_unreachable("Unexpected instr type to insert");
6079  }
6080  // The Thumb2 pre-indexed stores have the same MI operands, they just
6081  // define them differently in the .td files from the isel patterns, so
6082  // they need pseudos.
6083  case ARM::t2STR_preidx:
6084    MI->setDesc(TII->get(ARM::t2STR_PRE));
6085    return BB;
6086  case ARM::t2STRB_preidx:
6087    MI->setDesc(TII->get(ARM::t2STRB_PRE));
6088    return BB;
6089  case ARM::t2STRH_preidx:
6090    MI->setDesc(TII->get(ARM::t2STRH_PRE));
6091    return BB;
6092
6093  case ARM::STRi_preidx:
6094  case ARM::STRBi_preidx: {
6095    unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ?
6096      ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM;
6097    // Decode the offset.
6098    unsigned Offset = MI->getOperand(4).getImm();
6099    bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
6100    Offset = ARM_AM::getAM2Offset(Offset);
6101    if (isSub)
6102      Offset = -Offset;
6103
6104    MachineMemOperand *MMO = *MI->memoperands_begin();
6105    BuildMI(*BB, MI, dl, TII->get(NewOpc))
6106      .addOperand(MI->getOperand(0))  // Rn_wb
6107      .addOperand(MI->getOperand(1))  // Rt
6108      .addOperand(MI->getOperand(2))  // Rn
6109      .addImm(Offset)                 // offset (skip GPR==zero_reg)
6110      .addOperand(MI->getOperand(5))  // pred
6111      .addOperand(MI->getOperand(6))
6112      .addMemOperand(MMO);
6113    MI->eraseFromParent();
6114    return BB;
6115  }
6116  case ARM::STRr_preidx:
6117  case ARM::STRBr_preidx:
6118  case ARM::STRH_preidx: {
6119    unsigned NewOpc;
6120    switch (MI->getOpcode()) {
6121    default: llvm_unreachable("unexpected opcode!");
6122    case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
6123    case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
6124    case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
6125    }
6126    MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
6127    for (unsigned i = 0; i < MI->getNumOperands(); ++i)
6128      MIB.addOperand(MI->getOperand(i));
6129    MI->eraseFromParent();
6130    return BB;
6131  }
6132  case ARM::ATOMIC_LOAD_ADD_I8:
6133     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6134  case ARM::ATOMIC_LOAD_ADD_I16:
6135     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6136  case ARM::ATOMIC_LOAD_ADD_I32:
6137     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
6138
6139  case ARM::ATOMIC_LOAD_AND_I8:
6140     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6141  case ARM::ATOMIC_LOAD_AND_I16:
6142     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6143  case ARM::ATOMIC_LOAD_AND_I32:
6144     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6145
6146  case ARM::ATOMIC_LOAD_OR_I8:
6147     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6148  case ARM::ATOMIC_LOAD_OR_I16:
6149     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6150  case ARM::ATOMIC_LOAD_OR_I32:
6151     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6152
6153  case ARM::ATOMIC_LOAD_XOR_I8:
6154     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6155  case ARM::ATOMIC_LOAD_XOR_I16:
6156     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6157  case ARM::ATOMIC_LOAD_XOR_I32:
6158     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6159
6160  case ARM::ATOMIC_LOAD_NAND_I8:
6161     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6162  case ARM::ATOMIC_LOAD_NAND_I16:
6163     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6164  case ARM::ATOMIC_LOAD_NAND_I32:
6165     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
6166
6167  case ARM::ATOMIC_LOAD_SUB_I8:
6168     return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6169  case ARM::ATOMIC_LOAD_SUB_I16:
6170     return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6171  case ARM::ATOMIC_LOAD_SUB_I32:
6172     return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
6173
6174  case ARM::ATOMIC_LOAD_MIN_I8:
6175     return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT);
6176  case ARM::ATOMIC_LOAD_MIN_I16:
6177     return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT);
6178  case ARM::ATOMIC_LOAD_MIN_I32:
6179     return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT);
6180
6181  case ARM::ATOMIC_LOAD_MAX_I8:
6182     return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT);
6183  case ARM::ATOMIC_LOAD_MAX_I16:
6184     return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT);
6185  case ARM::ATOMIC_LOAD_MAX_I32:
6186     return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT);
6187
6188  case ARM::ATOMIC_LOAD_UMIN_I8:
6189     return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO);
6190  case ARM::ATOMIC_LOAD_UMIN_I16:
6191     return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO);
6192  case ARM::ATOMIC_LOAD_UMIN_I32:
6193     return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO);
6194
6195  case ARM::ATOMIC_LOAD_UMAX_I8:
6196     return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI);
6197  case ARM::ATOMIC_LOAD_UMAX_I16:
6198     return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI);
6199  case ARM::ATOMIC_LOAD_UMAX_I32:
6200     return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI);
6201
6202  case ARM::ATOMIC_SWAP_I8:  return EmitAtomicBinary(MI, BB, 1, 0);
6203  case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
6204  case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
6205
6206  case ARM::ATOMIC_CMP_SWAP_I8:  return EmitAtomicCmpSwap(MI, BB, 1);
6207  case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
6208  case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
6209
6210
6211  case ARM::ATOMADD6432:
6212    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr,
6213                              isThumb2 ? ARM::t2ADCrr : ARM::ADCrr,
6214                              /*NeedsCarry*/ true);
6215  case ARM::ATOMSUB6432:
6216    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
6217                              isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
6218                              /*NeedsCarry*/ true);
6219  case ARM::ATOMOR6432:
6220    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr,
6221                              isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
6222  case ARM::ATOMXOR6432:
6223    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr,
6224                              isThumb2 ? ARM::t2EORrr : ARM::EORrr);
6225  case ARM::ATOMAND6432:
6226    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
6227                              isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
6228  case ARM::ATOMSWAP6432:
6229    return EmitAtomicBinary64(MI, BB, 0, 0, false);
6230  case ARM::ATOMCMPXCHG6432:
6231    return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr,
6232                              isThumb2 ? ARM::t2SBCrr : ARM::SBCrr,
6233                              /*NeedsCarry*/ false, /*IsCmpxchg*/true);
6234
6235  case ARM::tMOVCCr_pseudo: {
6236    // To "insert" a SELECT_CC instruction, we actually have to insert the
6237    // diamond control-flow pattern.  The incoming instruction knows the
6238    // destination vreg to set, the condition code register to branch on, the
6239    // true/false values to select between, and a branch opcode to use.
6240    const BasicBlock *LLVM_BB = BB->getBasicBlock();
6241    MachineFunction::iterator It = BB;
6242    ++It;
6243
6244    //  thisMBB:
6245    //  ...
6246    //   TrueVal = ...
6247    //   cmpTY ccX, r1, r2
6248    //   bCC copy1MBB
6249    //   fallthrough --> copy0MBB
6250    MachineBasicBlock *thisMBB  = BB;
6251    MachineFunction *F = BB->getParent();
6252    MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
6253    MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
6254    F->insert(It, copy0MBB);
6255    F->insert(It, sinkMBB);
6256
6257    // Transfer the remainder of BB and its successor edges to sinkMBB.
6258    sinkMBB->splice(sinkMBB->begin(), BB,
6259                    llvm::next(MachineBasicBlock::iterator(MI)),
6260                    BB->end());
6261    sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
6262
6263    BB->addSuccessor(copy0MBB);
6264    BB->addSuccessor(sinkMBB);
6265
6266    BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
6267      .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
6268
6269    //  copy0MBB:
6270    //   %FalseValue = ...
6271    //   # fallthrough to sinkMBB
6272    BB = copy0MBB;
6273
6274    // Update machine-CFG edges
6275    BB->addSuccessor(sinkMBB);
6276
6277    //  sinkMBB:
6278    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
6279    //  ...
6280    BB = sinkMBB;
6281    BuildMI(*BB, BB->begin(), dl,
6282            TII->get(ARM::PHI), MI->getOperand(0).getReg())
6283      .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
6284      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
6285
6286    MI->eraseFromParent();   // The pseudo instruction is gone now.
6287    return BB;
6288  }
6289
6290  case ARM::BCCi64:
6291  case ARM::BCCZi64: {
6292    // If there is an unconditional branch to the other successor, remove it.
6293    BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
6294
6295    // Compare both parts that make up the double comparison separately for
6296    // equality.
6297    bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
6298
6299    unsigned LHS1 = MI->getOperand(1).getReg();
6300    unsigned LHS2 = MI->getOperand(2).getReg();
6301    if (RHSisZero) {
6302      AddDefaultPred(BuildMI(BB, dl,
6303                             TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
6304                     .addReg(LHS1).addImm(0));
6305      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
6306        .addReg(LHS2).addImm(0)
6307        .addImm(ARMCC::EQ).addReg(ARM::CPSR);
6308    } else {
6309      unsigned RHS1 = MI->getOperand(3).getReg();
6310      unsigned RHS2 = MI->getOperand(4).getReg();
6311      AddDefaultPred(BuildMI(BB, dl,
6312                             TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
6313                     .addReg(LHS1).addReg(RHS1));
6314      BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
6315        .addReg(LHS2).addReg(RHS2)
6316        .addImm(ARMCC::EQ).addReg(ARM::CPSR);
6317    }
6318
6319    MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB();
6320    MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
6321    if (MI->getOperand(0).getImm() == ARMCC::NE)
6322      std::swap(destMBB, exitMBB);
6323
6324    BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
6325      .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
6326    if (isThumb2)
6327      AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB));
6328    else
6329      BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
6330
6331    MI->eraseFromParent();   // The pseudo instruction is gone now.
6332    return BB;
6333  }
6334
6335  case ARM::Int_eh_sjlj_setjmp:
6336  case ARM::Int_eh_sjlj_setjmp_nofp:
6337  case ARM::tInt_eh_sjlj_setjmp:
6338  case ARM::t2Int_eh_sjlj_setjmp:
6339  case ARM::t2Int_eh_sjlj_setjmp_nofp:
6340    EmitSjLjDispatchBlock(MI, BB);
6341    return BB;
6342
6343  case ARM::ABS:
6344  case ARM::t2ABS: {
6345    // To insert an ABS instruction, we have to insert the
6346    // diamond control-flow pattern.  The incoming instruction knows the
6347    // source vreg to test against 0, the destination vreg to set,
6348    // the condition code register to branch on, the
6349    // true/false values to select between, and a branch opcode to use.
6350    // It transforms
6351    //     V1 = ABS V0
6352    // into
6353    //     V2 = MOVS V0
6354    //     BCC                      (branch to SinkBB if V0 >= 0)
6355    //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
6356    //     SinkBB: V1 = PHI(V2, V3)
6357    const BasicBlock *LLVM_BB = BB->getBasicBlock();
6358    MachineFunction::iterator BBI = BB;
6359    ++BBI;
6360    MachineFunction *Fn = BB->getParent();
6361    MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
6362    MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
6363    Fn->insert(BBI, RSBBB);
6364    Fn->insert(BBI, SinkBB);
6365
6366    unsigned int ABSSrcReg = MI->getOperand(1).getReg();
6367    unsigned int ABSDstReg = MI->getOperand(0).getReg();
6368    bool isThumb2 = Subtarget->isThumb2();
6369    MachineRegisterInfo &MRI = Fn->getRegInfo();
6370    // In Thumb mode S must not be specified if source register is the SP or
6371    // PC and if destination register is the SP, so restrict register class
6372    unsigned NewMovDstReg = MRI.createVirtualRegister(
6373      isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
6374    unsigned NewRsbDstReg = MRI.createVirtualRegister(
6375      isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass);
6376
6377    // Transfer the remainder of BB and its successor edges to sinkMBB.
6378    SinkBB->splice(SinkBB->begin(), BB,
6379      llvm::next(MachineBasicBlock::iterator(MI)),
6380      BB->end());
6381    SinkBB->transferSuccessorsAndUpdatePHIs(BB);
6382
6383    BB->addSuccessor(RSBBB);
6384    BB->addSuccessor(SinkBB);
6385
6386    // fall through to SinkMBB
6387    RSBBB->addSuccessor(SinkBB);
6388
6389    // insert a movs at the end of BB
6390    BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr),
6391      NewMovDstReg)
6392      .addReg(ABSSrcReg, RegState::Kill)
6393      .addImm((unsigned)ARMCC::AL).addReg(0)
6394      .addReg(ARM::CPSR, RegState::Define);
6395
6396    // insert a bcc with opposite CC to ARMCC::MI at the end of BB
6397    BuildMI(BB, dl,
6398      TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
6399      .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
6400
6401    // insert rsbri in RSBBB
6402    // Note: BCC and rsbri will be converted into predicated rsbmi
6403    // by if-conversion pass
6404    BuildMI(*RSBBB, RSBBB->begin(), dl,
6405      TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
6406      .addReg(NewMovDstReg, RegState::Kill)
6407      .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
6408
6409    // insert PHI in SinkBB,
6410    // reuse ABSDstReg to not change uses of ABS instruction
6411    BuildMI(*SinkBB, SinkBB->begin(), dl,
6412      TII->get(ARM::PHI), ABSDstReg)
6413      .addReg(NewRsbDstReg).addMBB(RSBBB)
6414      .addReg(NewMovDstReg).addMBB(BB);
6415
6416    // remove ABS instruction
6417    MI->eraseFromParent();
6418
6419    // return last added BB
6420    return SinkBB;
6421  }
6422  }
6423}
6424
6425void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
6426                                                      SDNode *Node) const {
6427  const MCInstrDesc *MCID = &MI->getDesc();
6428  if (!MCID->hasPostISelHook()) {
6429    assert(!convertAddSubFlagsOpcode(MI->getOpcode()) &&
6430           "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'");
6431    return;
6432  }
6433
6434  // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
6435  // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
6436  // operand is still set to noreg. If needed, set the optional operand's
6437  // register to CPSR, and remove the redundant implicit def.
6438  //
6439  // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
6440
6441  // Rename pseudo opcodes.
6442  unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode());
6443  if (NewOpc) {
6444    const ARMBaseInstrInfo *TII =
6445      static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo());
6446    MCID = &TII->get(NewOpc);
6447
6448    assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 &&
6449           "converted opcode should be the same except for cc_out");
6450
6451    MI->setDesc(*MCID);
6452
6453    // Add the optional cc_out operand
6454    MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
6455  }
6456  unsigned ccOutIdx = MCID->getNumOperands() - 1;
6457
6458  // Any ARM instruction that sets the 's' bit should specify an optional
6459  // "cc_out" operand in the last operand position.
6460  if (!MCID->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
6461    assert(!NewOpc && "Optional cc_out operand required");
6462    return;
6463  }
6464  // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
6465  // since we already have an optional CPSR def.
6466  bool definesCPSR = false;
6467  bool deadCPSR = false;
6468  for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands();
6469       i != e; ++i) {
6470    const MachineOperand &MO = MI->getOperand(i);
6471    if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
6472      definesCPSR = true;
6473      if (MO.isDead())
6474        deadCPSR = true;
6475      MI->RemoveOperand(i);
6476      break;
6477    }
6478  }
6479  if (!definesCPSR) {
6480    assert(!NewOpc && "Optional cc_out operand required");
6481    return;
6482  }
6483  assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
6484  if (deadCPSR) {
6485    assert(!MI->getOperand(ccOutIdx).getReg() &&
6486           "expect uninitialized optional cc_out operand");
6487    return;
6488  }
6489
6490  // If this instruction was defined with an optional CPSR def and its dag node
6491  // had a live implicit CPSR def, then activate the optional CPSR def.
6492  MachineOperand &MO = MI->getOperand(ccOutIdx);
6493  MO.setReg(ARM::CPSR);
6494  MO.setIsDef(true);
6495}
6496
6497//===----------------------------------------------------------------------===//
6498//                           ARM Optimization Hooks
6499//===----------------------------------------------------------------------===//
6500
6501static
6502SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6503                            TargetLowering::DAGCombinerInfo &DCI) {
6504  SelectionDAG &DAG = DCI.DAG;
6505  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6506  EVT VT = N->getValueType(0);
6507  unsigned Opc = N->getOpcode();
6508  bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
6509  SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
6510  SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
6511  ISD::CondCode CC = ISD::SETCC_INVALID;
6512
6513  if (isSlctCC) {
6514    CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
6515  } else {
6516    SDValue CCOp = Slct.getOperand(0);
6517    if (CCOp.getOpcode() == ISD::SETCC)
6518      CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
6519  }
6520
6521  bool DoXform = false;
6522  bool InvCC = false;
6523  assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
6524          "Bad input!");
6525
6526  if (LHS.getOpcode() == ISD::Constant &&
6527      cast<ConstantSDNode>(LHS)->isNullValue()) {
6528    DoXform = true;
6529  } else if (CC != ISD::SETCC_INVALID &&
6530             RHS.getOpcode() == ISD::Constant &&
6531             cast<ConstantSDNode>(RHS)->isNullValue()) {
6532    std::swap(LHS, RHS);
6533    SDValue Op0 = Slct.getOperand(0);
6534    EVT OpVT = isSlctCC ? Op0.getValueType() :
6535                          Op0.getOperand(0).getValueType();
6536    bool isInt = OpVT.isInteger();
6537    CC = ISD::getSetCCInverse(CC, isInt);
6538
6539    if (!TLI.isCondCodeLegal(CC, OpVT))
6540      return SDValue();         // Inverse operator isn't legal.
6541
6542    DoXform = true;
6543    InvCC = true;
6544  }
6545
6546  if (DoXform) {
6547    SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
6548    if (isSlctCC)
6549      return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
6550                             Slct.getOperand(0), Slct.getOperand(1), CC);
6551    SDValue CCOp = Slct.getOperand(0);
6552    if (InvCC)
6553      CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
6554                          CCOp.getOperand(0), CCOp.getOperand(1), CC);
6555    return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
6556                       CCOp, OtherOp, Result);
6557  }
6558  return SDValue();
6559}
6560
6561// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
6562// (only after legalization).
6563static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
6564                                 TargetLowering::DAGCombinerInfo &DCI,
6565                                 const ARMSubtarget *Subtarget) {
6566
6567  // Only perform optimization if after legalize, and if NEON is available. We
6568  // also expected both operands to be BUILD_VECTORs.
6569  if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
6570      || N0.getOpcode() != ISD::BUILD_VECTOR
6571      || N1.getOpcode() != ISD::BUILD_VECTOR)
6572    return SDValue();
6573
6574  // Check output type since VPADDL operand elements can only be 8, 16, or 32.
6575  EVT VT = N->getValueType(0);
6576  if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
6577    return SDValue();
6578
6579  // Check that the vector operands are of the right form.
6580  // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
6581  // operands, where N is the size of the formed vector.
6582  // Each EXTRACT_VECTOR should have the same input vector and odd or even
6583  // index such that we have a pair wise add pattern.
6584
6585  // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
6586  if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
6587    return SDValue();
6588  SDValue Vec = N0->getOperand(0)->getOperand(0);
6589  SDNode *V = Vec.getNode();
6590  unsigned nextIndex = 0;
6591
6592  // For each operands to the ADD which are BUILD_VECTORs,
6593  // check to see if each of their operands are an EXTRACT_VECTOR with
6594  // the same vector and appropriate index.
6595  for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
6596    if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
6597        && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
6598
6599      SDValue ExtVec0 = N0->getOperand(i);
6600      SDValue ExtVec1 = N1->getOperand(i);
6601
6602      // First operand is the vector, verify its the same.
6603      if (V != ExtVec0->getOperand(0).getNode() ||
6604          V != ExtVec1->getOperand(0).getNode())
6605        return SDValue();
6606
6607      // Second is the constant, verify its correct.
6608      ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
6609      ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
6610
6611      // For the constant, we want to see all the even or all the odd.
6612      if (!C0 || !C1 || C0->getZExtValue() != nextIndex
6613          || C1->getZExtValue() != nextIndex+1)
6614        return SDValue();
6615
6616      // Increment index.
6617      nextIndex+=2;
6618    } else
6619      return SDValue();
6620  }
6621
6622  // Create VPADDL node.
6623  SelectionDAG &DAG = DCI.DAG;
6624  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6625
6626  // Build operand list.
6627  SmallVector<SDValue, 8> Ops;
6628  Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls,
6629                                TLI.getPointerTy()));
6630
6631  // Input is the vector.
6632  Ops.push_back(Vec);
6633
6634  // Get widened type and narrowed type.
6635  MVT widenType;
6636  unsigned numElem = VT.getVectorNumElements();
6637  switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
6638    case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
6639    case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
6640    case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
6641    default:
6642      assert(0 && "Invalid vector element type for padd optimization.");
6643  }
6644
6645  SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
6646                            widenType, &Ops[0], Ops.size());
6647  return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp);
6648}
6649
6650/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
6651/// operands N0 and N1.  This is a helper for PerformADDCombine that is
6652/// called with the default operands, and if that fails, with commuted
6653/// operands.
6654static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
6655                                          TargetLowering::DAGCombinerInfo &DCI,
6656                                          const ARMSubtarget *Subtarget){
6657
6658  // Attempt to create vpaddl for this add.
6659  SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget);
6660  if (Result.getNode())
6661    return Result;
6662
6663  // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
6664  if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
6665    SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
6666    if (Result.getNode()) return Result;
6667  }
6668  return SDValue();
6669}
6670
6671/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
6672///
6673static SDValue PerformADDCombine(SDNode *N,
6674                                 TargetLowering::DAGCombinerInfo &DCI,
6675                                 const ARMSubtarget *Subtarget) {
6676  SDValue N0 = N->getOperand(0);
6677  SDValue N1 = N->getOperand(1);
6678
6679  // First try with the default operand order.
6680  SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget);
6681  if (Result.getNode())
6682    return Result;
6683
6684  // If that didn't work, try again with the operands commuted.
6685  return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
6686}
6687
6688/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
6689///
6690static SDValue PerformSUBCombine(SDNode *N,
6691                                 TargetLowering::DAGCombinerInfo &DCI) {
6692  SDValue N0 = N->getOperand(0);
6693  SDValue N1 = N->getOperand(1);
6694
6695  // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
6696  if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
6697    SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
6698    if (Result.getNode()) return Result;
6699  }
6700
6701  return SDValue();
6702}
6703
6704/// PerformVMULCombine
6705/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
6706/// special multiplier accumulator forwarding.
6707///   vmul d3, d0, d2
6708///   vmla d3, d1, d2
6709/// is faster than
6710///   vadd d3, d0, d1
6711///   vmul d3, d3, d2
6712static SDValue PerformVMULCombine(SDNode *N,
6713                                  TargetLowering::DAGCombinerInfo &DCI,
6714                                  const ARMSubtarget *Subtarget) {
6715  if (!Subtarget->hasVMLxForwarding())
6716    return SDValue();
6717
6718  SelectionDAG &DAG = DCI.DAG;
6719  SDValue N0 = N->getOperand(0);
6720  SDValue N1 = N->getOperand(1);
6721  unsigned Opcode = N0.getOpcode();
6722  if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
6723      Opcode != ISD::FADD && Opcode != ISD::FSUB) {
6724    Opcode = N1.getOpcode();
6725    if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
6726        Opcode != ISD::FADD && Opcode != ISD::FSUB)
6727      return SDValue();
6728    std::swap(N0, N1);
6729  }
6730
6731  EVT VT = N->getValueType(0);
6732  DebugLoc DL = N->getDebugLoc();
6733  SDValue N00 = N0->getOperand(0);
6734  SDValue N01 = N0->getOperand(1);
6735  return DAG.getNode(Opcode, DL, VT,
6736                     DAG.getNode(ISD::MUL, DL, VT, N00, N1),
6737                     DAG.getNode(ISD::MUL, DL, VT, N01, N1));
6738}
6739
6740static SDValue PerformMULCombine(SDNode *N,
6741                                 TargetLowering::DAGCombinerInfo &DCI,
6742                                 const ARMSubtarget *Subtarget) {
6743  SelectionDAG &DAG = DCI.DAG;
6744
6745  if (Subtarget->isThumb1Only())
6746    return SDValue();
6747
6748  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
6749    return SDValue();
6750
6751  EVT VT = N->getValueType(0);
6752  if (VT.is64BitVector() || VT.is128BitVector())
6753    return PerformVMULCombine(N, DCI, Subtarget);
6754  if (VT != MVT::i32)
6755    return SDValue();
6756
6757  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6758  if (!C)
6759    return SDValue();
6760
6761  uint64_t MulAmt = C->getZExtValue();
6762  unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
6763  ShiftAmt = ShiftAmt & (32 - 1);
6764  SDValue V = N->getOperand(0);
6765  DebugLoc DL = N->getDebugLoc();
6766
6767  SDValue Res;
6768  MulAmt >>= ShiftAmt;
6769  if (isPowerOf2_32(MulAmt - 1)) {
6770    // (mul x, 2^N + 1) => (add (shl x, N), x)
6771    Res = DAG.getNode(ISD::ADD, DL, VT,
6772                      V, DAG.getNode(ISD::SHL, DL, VT,
6773                                     V, DAG.getConstant(Log2_32(MulAmt-1),
6774                                                        MVT::i32)));
6775  } else if (isPowerOf2_32(MulAmt + 1)) {
6776    // (mul x, 2^N - 1) => (sub (shl x, N), x)
6777    Res = DAG.getNode(ISD::SUB, DL, VT,
6778                      DAG.getNode(ISD::SHL, DL, VT,
6779                                  V, DAG.getConstant(Log2_32(MulAmt+1),
6780                                                     MVT::i32)),
6781                                                     V);
6782  } else
6783    return SDValue();
6784
6785  if (ShiftAmt != 0)
6786    Res = DAG.getNode(ISD::SHL, DL, VT, Res,
6787                      DAG.getConstant(ShiftAmt, MVT::i32));
6788
6789  // Do not add new nodes to DAG combiner worklist.
6790  DCI.CombineTo(N, Res, false);
6791  return SDValue();
6792}
6793
6794static SDValue PerformANDCombine(SDNode *N,
6795                                TargetLowering::DAGCombinerInfo &DCI) {
6796
6797  // Attempt to use immediate-form VBIC
6798  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
6799  DebugLoc dl = N->getDebugLoc();
6800  EVT VT = N->getValueType(0);
6801  SelectionDAG &DAG = DCI.DAG;
6802
6803  if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
6804    return SDValue();
6805
6806  APInt SplatBits, SplatUndef;
6807  unsigned SplatBitSize;
6808  bool HasAnyUndefs;
6809  if (BVN &&
6810      BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6811    if (SplatBitSize <= 64) {
6812      EVT VbicVT;
6813      SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
6814                                      SplatUndef.getZExtValue(), SplatBitSize,
6815                                      DAG, VbicVT, VT.is128BitVector(),
6816                                      OtherModImm);
6817      if (Val.getNode()) {
6818        SDValue Input =
6819          DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
6820        SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
6821        return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
6822      }
6823    }
6824  }
6825
6826  return SDValue();
6827}
6828
6829/// PerformORCombine - Target-specific dag combine xforms for ISD::OR
6830static SDValue PerformORCombine(SDNode *N,
6831                                TargetLowering::DAGCombinerInfo &DCI,
6832                                const ARMSubtarget *Subtarget) {
6833  // Attempt to use immediate-form VORR
6834  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
6835  DebugLoc dl = N->getDebugLoc();
6836  EVT VT = N->getValueType(0);
6837  SelectionDAG &DAG = DCI.DAG;
6838
6839  if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
6840    return SDValue();
6841
6842  APInt SplatBits, SplatUndef;
6843  unsigned SplatBitSize;
6844  bool HasAnyUndefs;
6845  if (BVN && Subtarget->hasNEON() &&
6846      BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6847    if (SplatBitSize <= 64) {
6848      EVT VorrVT;
6849      SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6850                                      SplatUndef.getZExtValue(), SplatBitSize,
6851                                      DAG, VorrVT, VT.is128BitVector(),
6852                                      OtherModImm);
6853      if (Val.getNode()) {
6854        SDValue Input =
6855          DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
6856        SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
6857        return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
6858      }
6859    }
6860  }
6861
6862  SDValue N0 = N->getOperand(0);
6863  if (N0.getOpcode() != ISD::AND)
6864    return SDValue();
6865  SDValue N1 = N->getOperand(1);
6866
6867  // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
6868  if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
6869      DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
6870    APInt SplatUndef;
6871    unsigned SplatBitSize;
6872    bool HasAnyUndefs;
6873
6874    BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
6875    APInt SplatBits0;
6876    if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
6877                                  HasAnyUndefs) && !HasAnyUndefs) {
6878      BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
6879      APInt SplatBits1;
6880      if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
6881                                    HasAnyUndefs) && !HasAnyUndefs &&
6882          SplatBits0 == ~SplatBits1) {
6883        // Canonicalize the vector type to make instruction selection simpler.
6884        EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
6885        SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
6886                                     N0->getOperand(1), N0->getOperand(0),
6887                                     N1->getOperand(0));
6888        return DAG.getNode(ISD::BITCAST, dl, VT, Result);
6889      }
6890    }
6891  }
6892
6893  // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
6894  // reasonable.
6895
6896  // BFI is only available on V6T2+
6897  if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
6898    return SDValue();
6899
6900  DebugLoc DL = N->getDebugLoc();
6901  // 1) or (and A, mask), val => ARMbfi A, val, mask
6902  //      iff (val & mask) == val
6903  //
6904  // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
6905  //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
6906  //          && mask == ~mask2
6907  //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
6908  //          && ~mask == mask2
6909  //  (i.e., copy a bitfield value into another bitfield of the same width)
6910
6911  if (VT != MVT::i32)
6912    return SDValue();
6913
6914  SDValue N00 = N0.getOperand(0);
6915
6916  // The value and the mask need to be constants so we can verify this is
6917  // actually a bitfield set. If the mask is 0xffff, we can do better
6918  // via a movt instruction, so don't use BFI in that case.
6919  SDValue MaskOp = N0.getOperand(1);
6920  ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
6921  if (!MaskC)
6922    return SDValue();
6923  unsigned Mask = MaskC->getZExtValue();
6924  if (Mask == 0xffff)
6925    return SDValue();
6926  SDValue Res;
6927  // Case (1): or (and A, mask), val => ARMbfi A, val, mask
6928  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
6929  if (N1C) {
6930    unsigned Val = N1C->getZExtValue();
6931    if ((Val & ~Mask) != Val)
6932      return SDValue();
6933
6934    if (ARM::isBitFieldInvertedMask(Mask)) {
6935      Val >>= CountTrailingZeros_32(~Mask);
6936
6937      Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
6938                        DAG.getConstant(Val, MVT::i32),
6939                        DAG.getConstant(Mask, MVT::i32));
6940
6941      // Do not add new nodes to DAG combiner worklist.
6942      DCI.CombineTo(N, Res, false);
6943      return SDValue();
6944    }
6945  } else if (N1.getOpcode() == ISD::AND) {
6946    // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
6947    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
6948    if (!N11C)
6949      return SDValue();
6950    unsigned Mask2 = N11C->getZExtValue();
6951
6952    // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
6953    // as is to match.
6954    if (ARM::isBitFieldInvertedMask(Mask) &&
6955        (Mask == ~Mask2)) {
6956      // The pack halfword instruction works better for masks that fit it,
6957      // so use that when it's available.
6958      if (Subtarget->hasT2ExtractPack() &&
6959          (Mask == 0xffff || Mask == 0xffff0000))
6960        return SDValue();
6961      // 2a
6962      unsigned amt = CountTrailingZeros_32(Mask2);
6963      Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
6964                        DAG.getConstant(amt, MVT::i32));
6965      Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
6966                        DAG.getConstant(Mask, MVT::i32));
6967      // Do not add new nodes to DAG combiner worklist.
6968      DCI.CombineTo(N, Res, false);
6969      return SDValue();
6970    } else if (ARM::isBitFieldInvertedMask(~Mask) &&
6971               (~Mask == Mask2)) {
6972      // The pack halfword instruction works better for masks that fit it,
6973      // so use that when it's available.
6974      if (Subtarget->hasT2ExtractPack() &&
6975          (Mask2 == 0xffff || Mask2 == 0xffff0000))
6976        return SDValue();
6977      // 2b
6978      unsigned lsb = CountTrailingZeros_32(Mask);
6979      Res = DAG.getNode(ISD::SRL, DL, VT, N00,
6980                        DAG.getConstant(lsb, MVT::i32));
6981      Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
6982                        DAG.getConstant(Mask2, MVT::i32));
6983      // Do not add new nodes to DAG combiner worklist.
6984      DCI.CombineTo(N, Res, false);
6985      return SDValue();
6986    }
6987  }
6988
6989  if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
6990      N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
6991      ARM::isBitFieldInvertedMask(~Mask)) {
6992    // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
6993    // where lsb(mask) == #shamt and masked bits of B are known zero.
6994    SDValue ShAmt = N00.getOperand(1);
6995    unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
6996    unsigned LSB = CountTrailingZeros_32(Mask);
6997    if (ShAmtC != LSB)
6998      return SDValue();
6999
7000    Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
7001                      DAG.getConstant(~Mask, MVT::i32));
7002
7003    // Do not add new nodes to DAG combiner worklist.
7004    DCI.CombineTo(N, Res, false);
7005  }
7006
7007  return SDValue();
7008}
7009
7010/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
7011/// the bits being cleared by the AND are not demanded by the BFI.
7012static SDValue PerformBFICombine(SDNode *N,
7013                                 TargetLowering::DAGCombinerInfo &DCI) {
7014  SDValue N1 = N->getOperand(1);
7015  if (N1.getOpcode() == ISD::AND) {
7016    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
7017    if (!N11C)
7018      return SDValue();
7019    unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
7020    unsigned LSB = CountTrailingZeros_32(~InvMask);
7021    unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB;
7022    unsigned Mask = (1 << Width)-1;
7023    unsigned Mask2 = N11C->getZExtValue();
7024    if ((Mask & (~Mask2)) == 0)
7025      return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0),
7026                             N->getOperand(0), N1.getOperand(0),
7027                             N->getOperand(2));
7028  }
7029  return SDValue();
7030}
7031
7032/// PerformVMOVRRDCombine - Target-specific dag combine xforms for
7033/// ARMISD::VMOVRRD.
7034static SDValue PerformVMOVRRDCombine(SDNode *N,
7035                                     TargetLowering::DAGCombinerInfo &DCI) {
7036  // vmovrrd(vmovdrr x, y) -> x,y
7037  SDValue InDouble = N->getOperand(0);
7038  if (InDouble.getOpcode() == ARMISD::VMOVDRR)
7039    return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
7040
7041  // vmovrrd(load f64) -> (load i32), (load i32)
7042  SDNode *InNode = InDouble.getNode();
7043  if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
7044      InNode->getValueType(0) == MVT::f64 &&
7045      InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
7046      !cast<LoadSDNode>(InNode)->isVolatile()) {
7047    // TODO: Should this be done for non-FrameIndex operands?
7048    LoadSDNode *LD = cast<LoadSDNode>(InNode);
7049
7050    SelectionDAG &DAG = DCI.DAG;
7051    DebugLoc DL = LD->getDebugLoc();
7052    SDValue BasePtr = LD->getBasePtr();
7053    SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr,
7054                                 LD->getPointerInfo(), LD->isVolatile(),
7055                                 LD->isNonTemporal(), LD->getAlignment());
7056
7057    SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
7058                                    DAG.getConstant(4, MVT::i32));
7059    SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr,
7060                                 LD->getPointerInfo(), LD->isVolatile(),
7061                                 LD->isNonTemporal(),
7062                                 std::min(4U, LD->getAlignment() / 2));
7063
7064    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
7065    SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
7066    DCI.RemoveFromWorklist(LD);
7067    DAG.DeleteNode(LD);
7068    return Result;
7069  }
7070
7071  return SDValue();
7072}
7073
7074/// PerformVMOVDRRCombine - Target-specific dag combine xforms for
7075/// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
7076static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
7077  // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
7078  SDValue Op0 = N->getOperand(0);
7079  SDValue Op1 = N->getOperand(1);
7080  if (Op0.getOpcode() == ISD::BITCAST)
7081    Op0 = Op0.getOperand(0);
7082  if (Op1.getOpcode() == ISD::BITCAST)
7083    Op1 = Op1.getOperand(0);
7084  if (Op0.getOpcode() == ARMISD::VMOVRRD &&
7085      Op0.getNode() == Op1.getNode() &&
7086      Op0.getResNo() == 0 && Op1.getResNo() == 1)
7087    return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
7088                       N->getValueType(0), Op0.getOperand(0));
7089  return SDValue();
7090}
7091
7092/// PerformSTORECombine - Target-specific dag combine xforms for
7093/// ISD::STORE.
7094static SDValue PerformSTORECombine(SDNode *N,
7095                                   TargetLowering::DAGCombinerInfo &DCI) {
7096  // Bitcast an i64 store extracted from a vector to f64.
7097  // Otherwise, the i64 value will be legalized to a pair of i32 values.
7098  StoreSDNode *St = cast<StoreSDNode>(N);
7099  SDValue StVal = St->getValue();
7100  if (!ISD::isNormalStore(St) || St->isVolatile())
7101    return SDValue();
7102
7103  if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
7104      StVal.getNode()->hasOneUse() && !St->isVolatile()) {
7105    SelectionDAG  &DAG = DCI.DAG;
7106    DebugLoc DL = St->getDebugLoc();
7107    SDValue BasePtr = St->getBasePtr();
7108    SDValue NewST1 = DAG.getStore(St->getChain(), DL,
7109                                  StVal.getNode()->getOperand(0), BasePtr,
7110                                  St->getPointerInfo(), St->isVolatile(),
7111                                  St->isNonTemporal(), St->getAlignment());
7112
7113    SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
7114                                    DAG.getConstant(4, MVT::i32));
7115    return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1),
7116                        OffsetPtr, St->getPointerInfo(), St->isVolatile(),
7117                        St->isNonTemporal(),
7118                        std::min(4U, St->getAlignment() / 2));
7119  }
7120
7121  if (StVal.getValueType() != MVT::i64 ||
7122      StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7123    return SDValue();
7124
7125  SelectionDAG &DAG = DCI.DAG;
7126  DebugLoc dl = StVal.getDebugLoc();
7127  SDValue IntVec = StVal.getOperand(0);
7128  EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
7129                                 IntVec.getValueType().getVectorNumElements());
7130  SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
7131  SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
7132                               Vec, StVal.getOperand(1));
7133  dl = N->getDebugLoc();
7134  SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
7135  // Make the DAGCombiner fold the bitcasts.
7136  DCI.AddToWorklist(Vec.getNode());
7137  DCI.AddToWorklist(ExtElt.getNode());
7138  DCI.AddToWorklist(V.getNode());
7139  return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
7140                      St->getPointerInfo(), St->isVolatile(),
7141                      St->isNonTemporal(), St->getAlignment(),
7142                      St->getTBAAInfo());
7143}
7144
7145/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
7146/// are normal, non-volatile loads.  If so, it is profitable to bitcast an
7147/// i64 vector to have f64 elements, since the value can then be loaded
7148/// directly into a VFP register.
7149static bool hasNormalLoadOperand(SDNode *N) {
7150  unsigned NumElts = N->getValueType(0).getVectorNumElements();
7151  for (unsigned i = 0; i < NumElts; ++i) {
7152    SDNode *Elt = N->getOperand(i).getNode();
7153    if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
7154      return true;
7155  }
7156  return false;
7157}
7158
7159/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
7160/// ISD::BUILD_VECTOR.
7161static SDValue PerformBUILD_VECTORCombine(SDNode *N,
7162                                          TargetLowering::DAGCombinerInfo &DCI){
7163  // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
7164  // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
7165  // into a pair of GPRs, which is fine when the value is used as a scalar,
7166  // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
7167  SelectionDAG &DAG = DCI.DAG;
7168  if (N->getNumOperands() == 2) {
7169    SDValue RV = PerformVMOVDRRCombine(N, DAG);
7170    if (RV.getNode())
7171      return RV;
7172  }
7173
7174  // Load i64 elements as f64 values so that type legalization does not split
7175  // them up into i32 values.
7176  EVT VT = N->getValueType(0);
7177  if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
7178    return SDValue();
7179  DebugLoc dl = N->getDebugLoc();
7180  SmallVector<SDValue, 8> Ops;
7181  unsigned NumElts = VT.getVectorNumElements();
7182  for (unsigned i = 0; i < NumElts; ++i) {
7183    SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
7184    Ops.push_back(V);
7185    // Make the DAGCombiner fold the bitcast.
7186    DCI.AddToWorklist(V.getNode());
7187  }
7188  EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
7189  SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts);
7190  return DAG.getNode(ISD::BITCAST, dl, VT, BV);
7191}
7192
7193/// PerformInsertEltCombine - Target-specific dag combine xforms for
7194/// ISD::INSERT_VECTOR_ELT.
7195static SDValue PerformInsertEltCombine(SDNode *N,
7196                                       TargetLowering::DAGCombinerInfo &DCI) {
7197  // Bitcast an i64 load inserted into a vector to f64.
7198  // Otherwise, the i64 value will be legalized to a pair of i32 values.
7199  EVT VT = N->getValueType(0);
7200  SDNode *Elt = N->getOperand(1).getNode();
7201  if (VT.getVectorElementType() != MVT::i64 ||
7202      !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
7203    return SDValue();
7204
7205  SelectionDAG &DAG = DCI.DAG;
7206  DebugLoc dl = N->getDebugLoc();
7207  EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
7208                                 VT.getVectorNumElements());
7209  SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
7210  SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
7211  // Make the DAGCombiner fold the bitcasts.
7212  DCI.AddToWorklist(Vec.getNode());
7213  DCI.AddToWorklist(V.getNode());
7214  SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
7215                               Vec, V, N->getOperand(2));
7216  return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
7217}
7218
7219/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
7220/// ISD::VECTOR_SHUFFLE.
7221static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
7222  // The LLVM shufflevector instruction does not require the shuffle mask
7223  // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
7224  // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
7225  // operands do not match the mask length, they are extended by concatenating
7226  // them with undef vectors.  That is probably the right thing for other
7227  // targets, but for NEON it is better to concatenate two double-register
7228  // size vector operands into a single quad-register size vector.  Do that
7229  // transformation here:
7230  //   shuffle(concat(v1, undef), concat(v2, undef)) ->
7231  //   shuffle(concat(v1, v2), undef)
7232  SDValue Op0 = N->getOperand(0);
7233  SDValue Op1 = N->getOperand(1);
7234  if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
7235      Op1.getOpcode() != ISD::CONCAT_VECTORS ||
7236      Op0.getNumOperands() != 2 ||
7237      Op1.getNumOperands() != 2)
7238    return SDValue();
7239  SDValue Concat0Op1 = Op0.getOperand(1);
7240  SDValue Concat1Op1 = Op1.getOperand(1);
7241  if (Concat0Op1.getOpcode() != ISD::UNDEF ||
7242      Concat1Op1.getOpcode() != ISD::UNDEF)
7243    return SDValue();
7244  // Skip the transformation if any of the types are illegal.
7245  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7246  EVT VT = N->getValueType(0);
7247  if (!TLI.isTypeLegal(VT) ||
7248      !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
7249      !TLI.isTypeLegal(Concat1Op1.getValueType()))
7250    return SDValue();
7251
7252  SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
7253                                  Op0.getOperand(0), Op1.getOperand(0));
7254  // Translate the shuffle mask.
7255  SmallVector<int, 16> NewMask;
7256  unsigned NumElts = VT.getVectorNumElements();
7257  unsigned HalfElts = NumElts/2;
7258  ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
7259  for (unsigned n = 0; n < NumElts; ++n) {
7260    int MaskElt = SVN->getMaskElt(n);
7261    int NewElt = -1;
7262    if (MaskElt < (int)HalfElts)
7263      NewElt = MaskElt;
7264    else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
7265      NewElt = HalfElts + MaskElt - NumElts;
7266    NewMask.push_back(NewElt);
7267  }
7268  return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
7269                              DAG.getUNDEF(VT), NewMask.data());
7270}
7271
7272/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and
7273/// NEON load/store intrinsics to merge base address updates.
7274static SDValue CombineBaseUpdate(SDNode *N,
7275                                 TargetLowering::DAGCombinerInfo &DCI) {
7276  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
7277    return SDValue();
7278
7279  SelectionDAG &DAG = DCI.DAG;
7280  bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
7281                      N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
7282  unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
7283  SDValue Addr = N->getOperand(AddrOpIdx);
7284
7285  // Search for a use of the address operand that is an increment.
7286  for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
7287         UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
7288    SDNode *User = *UI;
7289    if (User->getOpcode() != ISD::ADD ||
7290        UI.getUse().getResNo() != Addr.getResNo())
7291      continue;
7292
7293    // Check that the add is independent of the load/store.  Otherwise, folding
7294    // it would create a cycle.
7295    if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
7296      continue;
7297
7298    // Find the new opcode for the updating load/store.
7299    bool isLoad = true;
7300    bool isLaneOp = false;
7301    unsigned NewOpc = 0;
7302    unsigned NumVecs = 0;
7303    if (isIntrinsic) {
7304      unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
7305      switch (IntNo) {
7306      default: assert(0 && "unexpected intrinsic for Neon base update");
7307      case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
7308        NumVecs = 1; break;
7309      case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
7310        NumVecs = 2; break;
7311      case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
7312        NumVecs = 3; break;
7313      case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
7314        NumVecs = 4; break;
7315      case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
7316        NumVecs = 2; isLaneOp = true; break;
7317      case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
7318        NumVecs = 3; isLaneOp = true; break;
7319      case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
7320        NumVecs = 4; isLaneOp = true; break;
7321      case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
7322        NumVecs = 1; isLoad = false; break;
7323      case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
7324        NumVecs = 2; isLoad = false; break;
7325      case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
7326        NumVecs = 3; isLoad = false; break;
7327      case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
7328        NumVecs = 4; isLoad = false; break;
7329      case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
7330        NumVecs = 2; isLoad = false; isLaneOp = true; break;
7331      case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
7332        NumVecs = 3; isLoad = false; isLaneOp = true; break;
7333      case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
7334        NumVecs = 4; isLoad = false; isLaneOp = true; break;
7335      }
7336    } else {
7337      isLaneOp = true;
7338      switch (N->getOpcode()) {
7339      default: assert(0 && "unexpected opcode for Neon base update");
7340      case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
7341      case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
7342      case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
7343      }
7344    }
7345
7346    // Find the size of memory referenced by the load/store.
7347    EVT VecTy;
7348    if (isLoad)
7349      VecTy = N->getValueType(0);
7350    else
7351      VecTy = N->getOperand(AddrOpIdx+1).getValueType();
7352    unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
7353    if (isLaneOp)
7354      NumBytes /= VecTy.getVectorNumElements();
7355
7356    // If the increment is a constant, it must match the memory ref size.
7357    SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
7358    if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
7359      uint64_t IncVal = CInc->getZExtValue();
7360      if (IncVal != NumBytes)
7361        continue;
7362    } else if (NumBytes >= 3 * 16) {
7363      // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
7364      // separate instructions that make it harder to use a non-constant update.
7365      continue;
7366    }
7367
7368    // Create the new updating load/store node.
7369    EVT Tys[6];
7370    unsigned NumResultVecs = (isLoad ? NumVecs : 0);
7371    unsigned n;
7372    for (n = 0; n < NumResultVecs; ++n)
7373      Tys[n] = VecTy;
7374    Tys[n++] = MVT::i32;
7375    Tys[n] = MVT::Other;
7376    SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2);
7377    SmallVector<SDValue, 8> Ops;
7378    Ops.push_back(N->getOperand(0)); // incoming chain
7379    Ops.push_back(N->getOperand(AddrOpIdx));
7380    Ops.push_back(Inc);
7381    for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
7382      Ops.push_back(N->getOperand(i));
7383    }
7384    MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
7385    SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys,
7386                                           Ops.data(), Ops.size(),
7387                                           MemInt->getMemoryVT(),
7388                                           MemInt->getMemOperand());
7389
7390    // Update the uses.
7391    std::vector<SDValue> NewResults;
7392    for (unsigned i = 0; i < NumResultVecs; ++i) {
7393      NewResults.push_back(SDValue(UpdN.getNode(), i));
7394    }
7395    NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
7396    DCI.CombineTo(N, NewResults);
7397    DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
7398
7399    break;
7400  }
7401  return SDValue();
7402}
7403
7404/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
7405/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
7406/// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
7407/// return true.
7408static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
7409  SelectionDAG &DAG = DCI.DAG;
7410  EVT VT = N->getValueType(0);
7411  // vldN-dup instructions only support 64-bit vectors for N > 1.
7412  if (!VT.is64BitVector())
7413    return false;
7414
7415  // Check if the VDUPLANE operand is a vldN-dup intrinsic.
7416  SDNode *VLD = N->getOperand(0).getNode();
7417  if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
7418    return false;
7419  unsigned NumVecs = 0;
7420  unsigned NewOpc = 0;
7421  unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
7422  if (IntNo == Intrinsic::arm_neon_vld2lane) {
7423    NumVecs = 2;
7424    NewOpc = ARMISD::VLD2DUP;
7425  } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
7426    NumVecs = 3;
7427    NewOpc = ARMISD::VLD3DUP;
7428  } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
7429    NumVecs = 4;
7430    NewOpc = ARMISD::VLD4DUP;
7431  } else {
7432    return false;
7433  }
7434
7435  // First check that all the vldN-lane uses are VDUPLANEs and that the lane
7436  // numbers match the load.
7437  unsigned VLDLaneNo =
7438    cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
7439  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
7440       UI != UE; ++UI) {
7441    // Ignore uses of the chain result.
7442    if (UI.getUse().getResNo() == NumVecs)
7443      continue;
7444    SDNode *User = *UI;
7445    if (User->getOpcode() != ARMISD::VDUPLANE ||
7446        VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
7447      return false;
7448  }
7449
7450  // Create the vldN-dup node.
7451  EVT Tys[5];
7452  unsigned n;
7453  for (n = 0; n < NumVecs; ++n)
7454    Tys[n] = VT;
7455  Tys[n] = MVT::Other;
7456  SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1);
7457  SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
7458  MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
7459  SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys,
7460                                           Ops, 2, VLDMemInt->getMemoryVT(),
7461                                           VLDMemInt->getMemOperand());
7462
7463  // Update the uses.
7464  for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
7465       UI != UE; ++UI) {
7466    unsigned ResNo = UI.getUse().getResNo();
7467    // Ignore uses of the chain result.
7468    if (ResNo == NumVecs)
7469      continue;
7470    SDNode *User = *UI;
7471    DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
7472  }
7473
7474  // Now the vldN-lane intrinsic is dead except for its chain result.
7475  // Update uses of the chain.
7476  std::vector<SDValue> VLDDupResults;
7477  for (unsigned n = 0; n < NumVecs; ++n)
7478    VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
7479  VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
7480  DCI.CombineTo(VLD, VLDDupResults);
7481
7482  return true;
7483}
7484
7485/// PerformVDUPLANECombine - Target-specific dag combine xforms for
7486/// ARMISD::VDUPLANE.
7487static SDValue PerformVDUPLANECombine(SDNode *N,
7488                                      TargetLowering::DAGCombinerInfo &DCI) {
7489  SDValue Op = N->getOperand(0);
7490
7491  // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
7492  // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
7493  if (CombineVLDDUP(N, DCI))
7494    return SDValue(N, 0);
7495
7496  // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
7497  // redundant.  Ignore bit_converts for now; element sizes are checked below.
7498  while (Op.getOpcode() == ISD::BITCAST)
7499    Op = Op.getOperand(0);
7500  if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
7501    return SDValue();
7502
7503  // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
7504  unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
7505  // The canonical VMOV for a zero vector uses a 32-bit element size.
7506  unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7507  unsigned EltBits;
7508  if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
7509    EltSize = 8;
7510  EVT VT = N->getValueType(0);
7511  if (EltSize > VT.getVectorElementType().getSizeInBits())
7512    return SDValue();
7513
7514  return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
7515}
7516
7517// isConstVecPow2 - Return true if each vector element is a power of 2, all
7518// elements are the same constant, C, and Log2(C) ranges from 1 to 32.
7519static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
7520{
7521  integerPart cN;
7522  integerPart c0 = 0;
7523  for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements();
7524       I != E; I++) {
7525    ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I));
7526    if (!C)
7527      return false;
7528
7529    bool isExact;
7530    APFloat APF = C->getValueAPF();
7531    if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact)
7532        != APFloat::opOK || !isExact)
7533      return false;
7534
7535    c0 = (I == 0) ? cN : c0;
7536    if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32)
7537      return false;
7538  }
7539  C = c0;
7540  return true;
7541}
7542
7543/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
7544/// can replace combinations of VMUL and VCVT (floating-point to integer)
7545/// when the VMUL has a constant operand that is a power of 2.
7546///
7547/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
7548///  vmul.f32        d16, d17, d16
7549///  vcvt.s32.f32    d16, d16
7550/// becomes:
7551///  vcvt.s32.f32    d16, d16, #3
7552static SDValue PerformVCVTCombine(SDNode *N,
7553                                  TargetLowering::DAGCombinerInfo &DCI,
7554                                  const ARMSubtarget *Subtarget) {
7555  SelectionDAG &DAG = DCI.DAG;
7556  SDValue Op = N->getOperand(0);
7557
7558  if (!Subtarget->hasNEON() || !Op.getValueType().isVector() ||
7559      Op.getOpcode() != ISD::FMUL)
7560    return SDValue();
7561
7562  uint64_t C;
7563  SDValue N0 = Op->getOperand(0);
7564  SDValue ConstVec = Op->getOperand(1);
7565  bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
7566
7567  if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
7568      !isConstVecPow2(ConstVec, isSigned, C))
7569    return SDValue();
7570
7571  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
7572    Intrinsic::arm_neon_vcvtfp2fxu;
7573  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
7574                     N->getValueType(0),
7575                     DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
7576                     DAG.getConstant(Log2_64(C), MVT::i32));
7577}
7578
7579/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
7580/// can replace combinations of VCVT (integer to floating-point) and VDIV
7581/// when the VDIV has a constant operand that is a power of 2.
7582///
7583/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
7584///  vcvt.f32.s32    d16, d16
7585///  vdiv.f32        d16, d17, d16
7586/// becomes:
7587///  vcvt.f32.s32    d16, d16, #3
7588static SDValue PerformVDIVCombine(SDNode *N,
7589                                  TargetLowering::DAGCombinerInfo &DCI,
7590                                  const ARMSubtarget *Subtarget) {
7591  SelectionDAG &DAG = DCI.DAG;
7592  SDValue Op = N->getOperand(0);
7593  unsigned OpOpcode = Op.getNode()->getOpcode();
7594
7595  if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() ||
7596      (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
7597    return SDValue();
7598
7599  uint64_t C;
7600  SDValue ConstVec = N->getOperand(1);
7601  bool isSigned = OpOpcode == ISD::SINT_TO_FP;
7602
7603  if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
7604      !isConstVecPow2(ConstVec, isSigned, C))
7605    return SDValue();
7606
7607  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
7608    Intrinsic::arm_neon_vcvtfxu2fp;
7609  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
7610                     Op.getValueType(),
7611                     DAG.getConstant(IntrinsicOpcode, MVT::i32),
7612                     Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32));
7613}
7614
7615/// Getvshiftimm - Check if this is a valid build_vector for the immediate
7616/// operand of a vector shift operation, where all the elements of the
7617/// build_vector must have the same constant integer value.
7618static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
7619  // Ignore bit_converts.
7620  while (Op.getOpcode() == ISD::BITCAST)
7621    Op = Op.getOperand(0);
7622  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
7623  APInt SplatBits, SplatUndef;
7624  unsigned SplatBitSize;
7625  bool HasAnyUndefs;
7626  if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
7627                                      HasAnyUndefs, ElementBits) ||
7628      SplatBitSize > ElementBits)
7629    return false;
7630  Cnt = SplatBits.getSExtValue();
7631  return true;
7632}
7633
7634/// isVShiftLImm - Check if this is a valid build_vector for the immediate
7635/// operand of a vector shift left operation.  That value must be in the range:
7636///   0 <= Value < ElementBits for a left shift; or
7637///   0 <= Value <= ElementBits for a long left shift.
7638static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
7639  assert(VT.isVector() && "vector shift count is not a vector type");
7640  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
7641  if (! getVShiftImm(Op, ElementBits, Cnt))
7642    return false;
7643  return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
7644}
7645
7646/// isVShiftRImm - Check if this is a valid build_vector for the immediate
7647/// operand of a vector shift right operation.  For a shift opcode, the value
7648/// is positive, but for an intrinsic the value count must be negative. The
7649/// absolute value must be in the range:
7650///   1 <= |Value| <= ElementBits for a right shift; or
7651///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
7652static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
7653                         int64_t &Cnt) {
7654  assert(VT.isVector() && "vector shift count is not a vector type");
7655  unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
7656  if (! getVShiftImm(Op, ElementBits, Cnt))
7657    return false;
7658  if (isIntrinsic)
7659    Cnt = -Cnt;
7660  return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
7661}
7662
7663/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
7664static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
7665  unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7666  switch (IntNo) {
7667  default:
7668    // Don't do anything for most intrinsics.
7669    break;
7670
7671  // Vector shifts: check for immediate versions and lower them.
7672  // Note: This is done during DAG combining instead of DAG legalizing because
7673  // the build_vectors for 64-bit vector element shift counts are generally
7674  // not legal, and it is hard to see their values after they get legalized to
7675  // loads from a constant pool.
7676  case Intrinsic::arm_neon_vshifts:
7677  case Intrinsic::arm_neon_vshiftu:
7678  case Intrinsic::arm_neon_vshiftls:
7679  case Intrinsic::arm_neon_vshiftlu:
7680  case Intrinsic::arm_neon_vshiftn:
7681  case Intrinsic::arm_neon_vrshifts:
7682  case Intrinsic::arm_neon_vrshiftu:
7683  case Intrinsic::arm_neon_vrshiftn:
7684  case Intrinsic::arm_neon_vqshifts:
7685  case Intrinsic::arm_neon_vqshiftu:
7686  case Intrinsic::arm_neon_vqshiftsu:
7687  case Intrinsic::arm_neon_vqshiftns:
7688  case Intrinsic::arm_neon_vqshiftnu:
7689  case Intrinsic::arm_neon_vqshiftnsu:
7690  case Intrinsic::arm_neon_vqrshiftns:
7691  case Intrinsic::arm_neon_vqrshiftnu:
7692  case Intrinsic::arm_neon_vqrshiftnsu: {
7693    EVT VT = N->getOperand(1).getValueType();
7694    int64_t Cnt;
7695    unsigned VShiftOpc = 0;
7696
7697    switch (IntNo) {
7698    case Intrinsic::arm_neon_vshifts:
7699    case Intrinsic::arm_neon_vshiftu:
7700      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
7701        VShiftOpc = ARMISD::VSHL;
7702        break;
7703      }
7704      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
7705        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
7706                     ARMISD::VSHRs : ARMISD::VSHRu);
7707        break;
7708      }
7709      return SDValue();
7710
7711    case Intrinsic::arm_neon_vshiftls:
7712    case Intrinsic::arm_neon_vshiftlu:
7713      if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
7714        break;
7715      llvm_unreachable("invalid shift count for vshll intrinsic");
7716
7717    case Intrinsic::arm_neon_vrshifts:
7718    case Intrinsic::arm_neon_vrshiftu:
7719      if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
7720        break;
7721      return SDValue();
7722
7723    case Intrinsic::arm_neon_vqshifts:
7724    case Intrinsic::arm_neon_vqshiftu:
7725      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
7726        break;
7727      return SDValue();
7728
7729    case Intrinsic::arm_neon_vqshiftsu:
7730      if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
7731        break;
7732      llvm_unreachable("invalid shift count for vqshlu intrinsic");
7733
7734    case Intrinsic::arm_neon_vshiftn:
7735    case Intrinsic::arm_neon_vrshiftn:
7736    case Intrinsic::arm_neon_vqshiftns:
7737    case Intrinsic::arm_neon_vqshiftnu:
7738    case Intrinsic::arm_neon_vqshiftnsu:
7739    case Intrinsic::arm_neon_vqrshiftns:
7740    case Intrinsic::arm_neon_vqrshiftnu:
7741    case Intrinsic::arm_neon_vqrshiftnsu:
7742      // Narrowing shifts require an immediate right shift.
7743      if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
7744        break;
7745      llvm_unreachable("invalid shift count for narrowing vector shift "
7746                       "intrinsic");
7747
7748    default:
7749      llvm_unreachable("unhandled vector shift");
7750    }
7751
7752    switch (IntNo) {
7753    case Intrinsic::arm_neon_vshifts:
7754    case Intrinsic::arm_neon_vshiftu:
7755      // Opcode already set above.
7756      break;
7757    case Intrinsic::arm_neon_vshiftls:
7758    case Intrinsic::arm_neon_vshiftlu:
7759      if (Cnt == VT.getVectorElementType().getSizeInBits())
7760        VShiftOpc = ARMISD::VSHLLi;
7761      else
7762        VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
7763                     ARMISD::VSHLLs : ARMISD::VSHLLu);
7764      break;
7765    case Intrinsic::arm_neon_vshiftn:
7766      VShiftOpc = ARMISD::VSHRN; break;
7767    case Intrinsic::arm_neon_vrshifts:
7768      VShiftOpc = ARMISD::VRSHRs; break;
7769    case Intrinsic::arm_neon_vrshiftu:
7770      VShiftOpc = ARMISD::VRSHRu; break;
7771    case Intrinsic::arm_neon_vrshiftn:
7772      VShiftOpc = ARMISD::VRSHRN; break;
7773    case Intrinsic::arm_neon_vqshifts:
7774      VShiftOpc = ARMISD::VQSHLs; break;
7775    case Intrinsic::arm_neon_vqshiftu:
7776      VShiftOpc = ARMISD::VQSHLu; break;
7777    case Intrinsic::arm_neon_vqshiftsu:
7778      VShiftOpc = ARMISD::VQSHLsu; break;
7779    case Intrinsic::arm_neon_vqshiftns:
7780      VShiftOpc = ARMISD::VQSHRNs; break;
7781    case Intrinsic::arm_neon_vqshiftnu:
7782      VShiftOpc = ARMISD::VQSHRNu; break;
7783    case Intrinsic::arm_neon_vqshiftnsu:
7784      VShiftOpc = ARMISD::VQSHRNsu; break;
7785    case Intrinsic::arm_neon_vqrshiftns:
7786      VShiftOpc = ARMISD::VQRSHRNs; break;
7787    case Intrinsic::arm_neon_vqrshiftnu:
7788      VShiftOpc = ARMISD::VQRSHRNu; break;
7789    case Intrinsic::arm_neon_vqrshiftnsu:
7790      VShiftOpc = ARMISD::VQRSHRNsu; break;
7791    }
7792
7793    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
7794                       N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
7795  }
7796
7797  case Intrinsic::arm_neon_vshiftins: {
7798    EVT VT = N->getOperand(1).getValueType();
7799    int64_t Cnt;
7800    unsigned VShiftOpc = 0;
7801
7802    if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
7803      VShiftOpc = ARMISD::VSLI;
7804    else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
7805      VShiftOpc = ARMISD::VSRI;
7806    else {
7807      llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
7808    }
7809
7810    return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
7811                       N->getOperand(1), N->getOperand(2),
7812                       DAG.getConstant(Cnt, MVT::i32));
7813  }
7814
7815  case Intrinsic::arm_neon_vqrshifts:
7816  case Intrinsic::arm_neon_vqrshiftu:
7817    // No immediate versions of these to check for.
7818    break;
7819  }
7820
7821  return SDValue();
7822}
7823
7824/// PerformShiftCombine - Checks for immediate versions of vector shifts and
7825/// lowers them.  As with the vector shift intrinsics, this is done during DAG
7826/// combining instead of DAG legalizing because the build_vectors for 64-bit
7827/// vector element shift counts are generally not legal, and it is hard to see
7828/// their values after they get legalized to loads from a constant pool.
7829static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
7830                                   const ARMSubtarget *ST) {
7831  EVT VT = N->getValueType(0);
7832
7833  // Nothing to be done for scalar shifts.
7834  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7835  if (!VT.isVector() || !TLI.isTypeLegal(VT))
7836    return SDValue();
7837
7838  assert(ST->hasNEON() && "unexpected vector shift");
7839  int64_t Cnt;
7840
7841  switch (N->getOpcode()) {
7842  default: llvm_unreachable("unexpected shift opcode");
7843
7844  case ISD::SHL:
7845    if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
7846      return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
7847                         DAG.getConstant(Cnt, MVT::i32));
7848    break;
7849
7850  case ISD::SRA:
7851  case ISD::SRL:
7852    if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
7853      unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
7854                            ARMISD::VSHRs : ARMISD::VSHRu);
7855      return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
7856                         DAG.getConstant(Cnt, MVT::i32));
7857    }
7858  }
7859  return SDValue();
7860}
7861
7862/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
7863/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
7864static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
7865                                    const ARMSubtarget *ST) {
7866  SDValue N0 = N->getOperand(0);
7867
7868  // Check for sign- and zero-extensions of vector extract operations of 8-
7869  // and 16-bit vector elements.  NEON supports these directly.  They are
7870  // handled during DAG combining because type legalization will promote them
7871  // to 32-bit types and it is messy to recognize the operations after that.
7872  if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
7873    SDValue Vec = N0.getOperand(0);
7874    SDValue Lane = N0.getOperand(1);
7875    EVT VT = N->getValueType(0);
7876    EVT EltVT = N0.getValueType();
7877    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7878
7879    if (VT == MVT::i32 &&
7880        (EltVT == MVT::i8 || EltVT == MVT::i16) &&
7881        TLI.isTypeLegal(Vec.getValueType()) &&
7882        isa<ConstantSDNode>(Lane)) {
7883
7884      unsigned Opc = 0;
7885      switch (N->getOpcode()) {
7886      default: llvm_unreachable("unexpected opcode");
7887      case ISD::SIGN_EXTEND:
7888        Opc = ARMISD::VGETLANEs;
7889        break;
7890      case ISD::ZERO_EXTEND:
7891      case ISD::ANY_EXTEND:
7892        Opc = ARMISD::VGETLANEu;
7893        break;
7894      }
7895      return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
7896    }
7897  }
7898
7899  return SDValue();
7900}
7901
7902/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC
7903/// to match f32 max/min patterns to use NEON vmax/vmin instructions.
7904static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
7905                                       const ARMSubtarget *ST) {
7906  // If the target supports NEON, try to use vmax/vmin instructions for f32
7907  // selects like "x < y ? x : y".  Unless the NoNaNsFPMath option is set,
7908  // be careful about NaNs:  NEON's vmax/vmin return NaN if either operand is
7909  // a NaN; only do the transformation when it matches that behavior.
7910
7911  // For now only do this when using NEON for FP operations; if using VFP, it
7912  // is not obvious that the benefit outweighs the cost of switching to the
7913  // NEON pipeline.
7914  if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() ||
7915      N->getValueType(0) != MVT::f32)
7916    return SDValue();
7917
7918  SDValue CondLHS = N->getOperand(0);
7919  SDValue CondRHS = N->getOperand(1);
7920  SDValue LHS = N->getOperand(2);
7921  SDValue RHS = N->getOperand(3);
7922  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
7923
7924  unsigned Opcode = 0;
7925  bool IsReversed;
7926  if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) {
7927    IsReversed = false; // x CC y ? x : y
7928  } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) {
7929    IsReversed = true ; // x CC y ? y : x
7930  } else {
7931    return SDValue();
7932  }
7933
7934  bool IsUnordered;
7935  switch (CC) {
7936  default: break;
7937  case ISD::SETOLT:
7938  case ISD::SETOLE:
7939  case ISD::SETLT:
7940  case ISD::SETLE:
7941  case ISD::SETULT:
7942  case ISD::SETULE:
7943    // If LHS is NaN, an ordered comparison will be false and the result will
7944    // be the RHS, but vmin(NaN, RHS) = NaN.  Avoid this by checking that LHS
7945    // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
7946    IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE);
7947    if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
7948      break;
7949    // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin
7950    // will return -0, so vmin can only be used for unsafe math or if one of
7951    // the operands is known to be nonzero.
7952    if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
7953        !UnsafeFPMath &&
7954        !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
7955      break;
7956    Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
7957    break;
7958
7959  case ISD::SETOGT:
7960  case ISD::SETOGE:
7961  case ISD::SETGT:
7962  case ISD::SETGE:
7963  case ISD::SETUGT:
7964  case ISD::SETUGE:
7965    // If LHS is NaN, an ordered comparison will be false and the result will
7966    // be the RHS, but vmax(NaN, RHS) = NaN.  Avoid this by checking that LHS
7967    // != NaN.  Likewise, for unordered comparisons, check for RHS != NaN.
7968    IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE);
7969    if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
7970      break;
7971    // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax
7972    // will return +0, so vmax can only be used for unsafe math or if one of
7973    // the operands is known to be nonzero.
7974    if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
7975        !UnsafeFPMath &&
7976        !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
7977      break;
7978    Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
7979    break;
7980  }
7981
7982  if (!Opcode)
7983    return SDValue();
7984  return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
7985}
7986
7987/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
7988SDValue
7989ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
7990  SDValue Cmp = N->getOperand(4);
7991  if (Cmp.getOpcode() != ARMISD::CMPZ)
7992    // Only looking at EQ and NE cases.
7993    return SDValue();
7994
7995  EVT VT = N->getValueType(0);
7996  DebugLoc dl = N->getDebugLoc();
7997  SDValue LHS = Cmp.getOperand(0);
7998  SDValue RHS = Cmp.getOperand(1);
7999  SDValue FalseVal = N->getOperand(0);
8000  SDValue TrueVal = N->getOperand(1);
8001  SDValue ARMcc = N->getOperand(2);
8002  ARMCC::CondCodes CC =
8003    (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
8004
8005  // Simplify
8006  //   mov     r1, r0
8007  //   cmp     r1, x
8008  //   mov     r0, y
8009  //   moveq   r0, x
8010  // to
8011  //   cmp     r0, x
8012  //   movne   r0, y
8013  //
8014  //   mov     r1, r0
8015  //   cmp     r1, x
8016  //   mov     r0, x
8017  //   movne   r0, y
8018  // to
8019  //   cmp     r0, x
8020  //   movne   r0, y
8021  /// FIXME: Turn this into a target neutral optimization?
8022  SDValue Res;
8023  if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
8024    Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
8025                      N->getOperand(3), Cmp);
8026  } else if (CC == ARMCC::EQ && TrueVal == RHS) {
8027    SDValue ARMcc;
8028    SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
8029    Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
8030                      N->getOperand(3), NewCmp);
8031  }
8032
8033  if (Res.getNode()) {
8034    APInt KnownZero, KnownOne;
8035    APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
8036    DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne);
8037    // Capture demanded bits information that would be otherwise lost.
8038    if (KnownZero == 0xfffffffe)
8039      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
8040                        DAG.getValueType(MVT::i1));
8041    else if (KnownZero == 0xffffff00)
8042      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
8043                        DAG.getValueType(MVT::i8));
8044    else if (KnownZero == 0xffff0000)
8045      Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
8046                        DAG.getValueType(MVT::i16));
8047  }
8048
8049  return Res;
8050}
8051
8052SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
8053                                             DAGCombinerInfo &DCI) const {
8054  switch (N->getOpcode()) {
8055  default: break;
8056  case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
8057  case ISD::SUB:        return PerformSUBCombine(N, DCI);
8058  case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
8059  case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
8060  case ISD::AND:        return PerformANDCombine(N, DCI);
8061  case ARMISD::BFI:     return PerformBFICombine(N, DCI);
8062  case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
8063  case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
8064  case ISD::STORE:      return PerformSTORECombine(N, DCI);
8065  case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI);
8066  case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
8067  case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
8068  case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
8069  case ISD::FP_TO_SINT:
8070  case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget);
8071  case ISD::FDIV:       return PerformVDIVCombine(N, DCI, Subtarget);
8072  case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
8073  case ISD::SHL:
8074  case ISD::SRA:
8075  case ISD::SRL:        return PerformShiftCombine(N, DCI.DAG, Subtarget);
8076  case ISD::SIGN_EXTEND:
8077  case ISD::ZERO_EXTEND:
8078  case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
8079  case ISD::SELECT_CC:  return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
8080  case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
8081  case ARMISD::VLD2DUP:
8082  case ARMISD::VLD3DUP:
8083  case ARMISD::VLD4DUP:
8084    return CombineBaseUpdate(N, DCI);
8085  case ISD::INTRINSIC_VOID:
8086  case ISD::INTRINSIC_W_CHAIN:
8087    switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
8088    case Intrinsic::arm_neon_vld1:
8089    case Intrinsic::arm_neon_vld2:
8090    case Intrinsic::arm_neon_vld3:
8091    case Intrinsic::arm_neon_vld4:
8092    case Intrinsic::arm_neon_vld2lane:
8093    case Intrinsic::arm_neon_vld3lane:
8094    case Intrinsic::arm_neon_vld4lane:
8095    case Intrinsic::arm_neon_vst1:
8096    case Intrinsic::arm_neon_vst2:
8097    case Intrinsic::arm_neon_vst3:
8098    case Intrinsic::arm_neon_vst4:
8099    case Intrinsic::arm_neon_vst2lane:
8100    case Intrinsic::arm_neon_vst3lane:
8101    case Intrinsic::arm_neon_vst4lane:
8102      return CombineBaseUpdate(N, DCI);
8103    default: break;
8104    }
8105    break;
8106  }
8107  return SDValue();
8108}
8109
8110bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
8111                                                          EVT VT) const {
8112  return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
8113}
8114
8115bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
8116  if (!Subtarget->allowsUnalignedMem())
8117    return false;
8118
8119  switch (VT.getSimpleVT().SimpleTy) {
8120  default:
8121    return false;
8122  case MVT::i8:
8123  case MVT::i16:
8124  case MVT::i32:
8125    return true;
8126  // FIXME: VLD1 etc with standard alignment is legal.
8127  }
8128}
8129
8130static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
8131  if (V < 0)
8132    return false;
8133
8134  unsigned Scale = 1;
8135  switch (VT.getSimpleVT().SimpleTy) {
8136  default: return false;
8137  case MVT::i1:
8138  case MVT::i8:
8139    // Scale == 1;
8140    break;
8141  case MVT::i16:
8142    // Scale == 2;
8143    Scale = 2;
8144    break;
8145  case MVT::i32:
8146    // Scale == 4;
8147    Scale = 4;
8148    break;
8149  }
8150
8151  if ((V & (Scale - 1)) != 0)
8152    return false;
8153  V /= Scale;
8154  return V == (V & ((1LL << 5) - 1));
8155}
8156
8157static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
8158                                      const ARMSubtarget *Subtarget) {
8159  bool isNeg = false;
8160  if (V < 0) {
8161    isNeg = true;
8162    V = - V;
8163  }
8164
8165  switch (VT.getSimpleVT().SimpleTy) {
8166  default: return false;
8167  case MVT::i1:
8168  case MVT::i8:
8169  case MVT::i16:
8170  case MVT::i32:
8171    // + imm12 or - imm8
8172    if (isNeg)
8173      return V == (V & ((1LL << 8) - 1));
8174    return V == (V & ((1LL << 12) - 1));
8175  case MVT::f32:
8176  case MVT::f64:
8177    // Same as ARM mode. FIXME: NEON?
8178    if (!Subtarget->hasVFP2())
8179      return false;
8180    if ((V & 3) != 0)
8181      return false;
8182    V >>= 2;
8183    return V == (V & ((1LL << 8) - 1));
8184  }
8185}
8186
8187/// isLegalAddressImmediate - Return true if the integer value can be used
8188/// as the offset of the target addressing mode for load / store of the
8189/// given type.
8190static bool isLegalAddressImmediate(int64_t V, EVT VT,
8191                                    const ARMSubtarget *Subtarget) {
8192  if (V == 0)
8193    return true;
8194
8195  if (!VT.isSimple())
8196    return false;
8197
8198  if (Subtarget->isThumb1Only())
8199    return isLegalT1AddressImmediate(V, VT);
8200  else if (Subtarget->isThumb2())
8201    return isLegalT2AddressImmediate(V, VT, Subtarget);
8202
8203  // ARM mode.
8204  if (V < 0)
8205    V = - V;
8206  switch (VT.getSimpleVT().SimpleTy) {
8207  default: return false;
8208  case MVT::i1:
8209  case MVT::i8:
8210  case MVT::i32:
8211    // +- imm12
8212    return V == (V & ((1LL << 12) - 1));
8213  case MVT::i16:
8214    // +- imm8
8215    return V == (V & ((1LL << 8) - 1));
8216  case MVT::f32:
8217  case MVT::f64:
8218    if (!Subtarget->hasVFP2()) // FIXME: NEON?
8219      return false;
8220    if ((V & 3) != 0)
8221      return false;
8222    V >>= 2;
8223    return V == (V & ((1LL << 8) - 1));
8224  }
8225}
8226
8227bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
8228                                                      EVT VT) const {
8229  int Scale = AM.Scale;
8230  if (Scale < 0)
8231    return false;
8232
8233  switch (VT.getSimpleVT().SimpleTy) {
8234  default: return false;
8235  case MVT::i1:
8236  case MVT::i8:
8237  case MVT::i16:
8238  case MVT::i32:
8239    if (Scale == 1)
8240      return true;
8241    // r + r << imm
8242    Scale = Scale & ~1;
8243    return Scale == 2 || Scale == 4 || Scale == 8;
8244  case MVT::i64:
8245    // r + r
8246    if (((unsigned)AM.HasBaseReg + Scale) <= 2)
8247      return true;
8248    return false;
8249  case MVT::isVoid:
8250    // Note, we allow "void" uses (basically, uses that aren't loads or
8251    // stores), because arm allows folding a scale into many arithmetic
8252    // operations.  This should be made more precise and revisited later.
8253
8254    // Allow r << imm, but the imm has to be a multiple of two.
8255    if (Scale & 1) return false;
8256    return isPowerOf2_32(Scale);
8257  }
8258}
8259
8260/// isLegalAddressingMode - Return true if the addressing mode represented
8261/// by AM is legal for this target, for a load/store of the specified type.
8262bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
8263                                              Type *Ty) const {
8264  EVT VT = getValueType(Ty, true);
8265  if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
8266    return false;
8267
8268  // Can never fold addr of global into load/store.
8269  if (AM.BaseGV)
8270    return false;
8271
8272  switch (AM.Scale) {
8273  case 0:  // no scale reg, must be "r+i" or "r", or "i".
8274    break;
8275  case 1:
8276    if (Subtarget->isThumb1Only())
8277      return false;
8278    // FALL THROUGH.
8279  default:
8280    // ARM doesn't support any R+R*scale+imm addr modes.
8281    if (AM.BaseOffs)
8282      return false;
8283
8284    if (!VT.isSimple())
8285      return false;
8286
8287    if (Subtarget->isThumb2())
8288      return isLegalT2ScaledAddressingMode(AM, VT);
8289
8290    int Scale = AM.Scale;
8291    switch (VT.getSimpleVT().SimpleTy) {
8292    default: return false;
8293    case MVT::i1:
8294    case MVT::i8:
8295    case MVT::i32:
8296      if (Scale < 0) Scale = -Scale;
8297      if (Scale == 1)
8298        return true;
8299      // r + r << imm
8300      return isPowerOf2_32(Scale & ~1);
8301    case MVT::i16:
8302    case MVT::i64:
8303      // r + r
8304      if (((unsigned)AM.HasBaseReg + Scale) <= 2)
8305        return true;
8306      return false;
8307
8308    case MVT::isVoid:
8309      // Note, we allow "void" uses (basically, uses that aren't loads or
8310      // stores), because arm allows folding a scale into many arithmetic
8311      // operations.  This should be made more precise and revisited later.
8312
8313      // Allow r << imm, but the imm has to be a multiple of two.
8314      if (Scale & 1) return false;
8315      return isPowerOf2_32(Scale);
8316    }
8317    break;
8318  }
8319  return true;
8320}
8321
8322/// isLegalICmpImmediate - Return true if the specified immediate is legal
8323/// icmp immediate, that is the target has icmp instructions which can compare
8324/// a register against the immediate without having to materialize the
8325/// immediate into a register.
8326bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
8327  if (!Subtarget->isThumb())
8328    return ARM_AM::getSOImmVal(Imm) != -1;
8329  if (Subtarget->isThumb2())
8330    return ARM_AM::getT2SOImmVal(Imm) != -1;
8331  return Imm >= 0 && Imm <= 255;
8332}
8333
8334/// isLegalAddImmediate - Return true if the specified immediate is legal
8335/// add immediate, that is the target has add instructions which can add
8336/// a register with the immediate without having to materialize the
8337/// immediate into a register.
8338bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
8339  return ARM_AM::getSOImmVal(Imm) != -1;
8340}
8341
8342static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
8343                                      bool isSEXTLoad, SDValue &Base,
8344                                      SDValue &Offset, bool &isInc,
8345                                      SelectionDAG &DAG) {
8346  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
8347    return false;
8348
8349  if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
8350    // AddressingMode 3
8351    Base = Ptr->getOperand(0);
8352    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8353      int RHSC = (int)RHS->getZExtValue();
8354      if (RHSC < 0 && RHSC > -256) {
8355        assert(Ptr->getOpcode() == ISD::ADD);
8356        isInc = false;
8357        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8358        return true;
8359      }
8360    }
8361    isInc = (Ptr->getOpcode() == ISD::ADD);
8362    Offset = Ptr->getOperand(1);
8363    return true;
8364  } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
8365    // AddressingMode 2
8366    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8367      int RHSC = (int)RHS->getZExtValue();
8368      if (RHSC < 0 && RHSC > -0x1000) {
8369        assert(Ptr->getOpcode() == ISD::ADD);
8370        isInc = false;
8371        Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8372        Base = Ptr->getOperand(0);
8373        return true;
8374      }
8375    }
8376
8377    if (Ptr->getOpcode() == ISD::ADD) {
8378      isInc = true;
8379      ARM_AM::ShiftOpc ShOpcVal=
8380        ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
8381      if (ShOpcVal != ARM_AM::no_shift) {
8382        Base = Ptr->getOperand(1);
8383        Offset = Ptr->getOperand(0);
8384      } else {
8385        Base = Ptr->getOperand(0);
8386        Offset = Ptr->getOperand(1);
8387      }
8388      return true;
8389    }
8390
8391    isInc = (Ptr->getOpcode() == ISD::ADD);
8392    Base = Ptr->getOperand(0);
8393    Offset = Ptr->getOperand(1);
8394    return true;
8395  }
8396
8397  // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
8398  return false;
8399}
8400
8401static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
8402                                     bool isSEXTLoad, SDValue &Base,
8403                                     SDValue &Offset, bool &isInc,
8404                                     SelectionDAG &DAG) {
8405  if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
8406    return false;
8407
8408  Base = Ptr->getOperand(0);
8409  if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
8410    int RHSC = (int)RHS->getZExtValue();
8411    if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
8412      assert(Ptr->getOpcode() == ISD::ADD);
8413      isInc = false;
8414      Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
8415      return true;
8416    } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
8417      isInc = Ptr->getOpcode() == ISD::ADD;
8418      Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
8419      return true;
8420    }
8421  }
8422
8423  return false;
8424}
8425
8426/// getPreIndexedAddressParts - returns true by value, base pointer and
8427/// offset pointer and addressing mode by reference if the node's address
8428/// can be legally represented as pre-indexed load / store address.
8429bool
8430ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
8431                                             SDValue &Offset,
8432                                             ISD::MemIndexedMode &AM,
8433                                             SelectionDAG &DAG) const {
8434  if (Subtarget->isThumb1Only())
8435    return false;
8436
8437  EVT VT;
8438  SDValue Ptr;
8439  bool isSEXTLoad = false;
8440  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8441    Ptr = LD->getBasePtr();
8442    VT  = LD->getMemoryVT();
8443    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
8444  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
8445    Ptr = ST->getBasePtr();
8446    VT  = ST->getMemoryVT();
8447  } else
8448    return false;
8449
8450  bool isInc;
8451  bool isLegal = false;
8452  if (Subtarget->isThumb2())
8453    isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8454                                       Offset, isInc, DAG);
8455  else
8456    isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8457                                        Offset, isInc, DAG);
8458  if (!isLegal)
8459    return false;
8460
8461  AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
8462  return true;
8463}
8464
8465/// getPostIndexedAddressParts - returns true by value, base pointer and
8466/// offset pointer and addressing mode by reference if this node can be
8467/// combined with a load / store to form a post-indexed load / store.
8468bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
8469                                                   SDValue &Base,
8470                                                   SDValue &Offset,
8471                                                   ISD::MemIndexedMode &AM,
8472                                                   SelectionDAG &DAG) const {
8473  if (Subtarget->isThumb1Only())
8474    return false;
8475
8476  EVT VT;
8477  SDValue Ptr;
8478  bool isSEXTLoad = false;
8479  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8480    VT  = LD->getMemoryVT();
8481    Ptr = LD->getBasePtr();
8482    isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
8483  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
8484    VT  = ST->getMemoryVT();
8485    Ptr = ST->getBasePtr();
8486  } else
8487    return false;
8488
8489  bool isInc;
8490  bool isLegal = false;
8491  if (Subtarget->isThumb2())
8492    isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
8493                                       isInc, DAG);
8494  else
8495    isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
8496                                        isInc, DAG);
8497  if (!isLegal)
8498    return false;
8499
8500  if (Ptr != Base) {
8501    // Swap base ptr and offset to catch more post-index load / store when
8502    // it's legal. In Thumb2 mode, offset must be an immediate.
8503    if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
8504        !Subtarget->isThumb2())
8505      std::swap(Base, Offset);
8506
8507    // Post-indexed load / store update the base pointer.
8508    if (Ptr != Base)
8509      return false;
8510  }
8511
8512  AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
8513  return true;
8514}
8515
8516void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
8517                                                       const APInt &Mask,
8518                                                       APInt &KnownZero,
8519                                                       APInt &KnownOne,
8520                                                       const SelectionDAG &DAG,
8521                                                       unsigned Depth) const {
8522  KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
8523  switch (Op.getOpcode()) {
8524  default: break;
8525  case ARMISD::CMOV: {
8526    // Bits are known zero/one if known on the LHS and RHS.
8527    DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
8528    if (KnownZero == 0 && KnownOne == 0) return;
8529
8530    APInt KnownZeroRHS, KnownOneRHS;
8531    DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
8532                          KnownZeroRHS, KnownOneRHS, Depth+1);
8533    KnownZero &= KnownZeroRHS;
8534    KnownOne  &= KnownOneRHS;
8535    return;
8536  }
8537  }
8538}
8539
8540//===----------------------------------------------------------------------===//
8541//                           ARM Inline Assembly Support
8542//===----------------------------------------------------------------------===//
8543
8544bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
8545  // Looking for "rev" which is V6+.
8546  if (!Subtarget->hasV6Ops())
8547    return false;
8548
8549  InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
8550  std::string AsmStr = IA->getAsmString();
8551  SmallVector<StringRef, 4> AsmPieces;
8552  SplitString(AsmStr, AsmPieces, ";\n");
8553
8554  switch (AsmPieces.size()) {
8555  default: return false;
8556  case 1:
8557    AsmStr = AsmPieces[0];
8558    AsmPieces.clear();
8559    SplitString(AsmStr, AsmPieces, " \t,");
8560
8561    // rev $0, $1
8562    if (AsmPieces.size() == 3 &&
8563        AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
8564        IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
8565      IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
8566      if (Ty && Ty->getBitWidth() == 32)
8567        return IntrinsicLowering::LowerToByteSwap(CI);
8568    }
8569    break;
8570  }
8571
8572  return false;
8573}
8574
8575/// getConstraintType - Given a constraint letter, return the type of
8576/// constraint it is for this target.
8577ARMTargetLowering::ConstraintType
8578ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
8579  if (Constraint.size() == 1) {
8580    switch (Constraint[0]) {
8581    default:  break;
8582    case 'l': return C_RegisterClass;
8583    case 'w': return C_RegisterClass;
8584    case 'h': return C_RegisterClass;
8585    case 'x': return C_RegisterClass;
8586    case 't': return C_RegisterClass;
8587    case 'j': return C_Other; // Constant for movw.
8588      // An address with a single base register. Due to the way we
8589      // currently handle addresses it is the same as an 'r' memory constraint.
8590    case 'Q': return C_Memory;
8591    }
8592  } else if (Constraint.size() == 2) {
8593    switch (Constraint[0]) {
8594    default: break;
8595    // All 'U+' constraints are addresses.
8596    case 'U': return C_Memory;
8597    }
8598  }
8599  return TargetLowering::getConstraintType(Constraint);
8600}
8601
8602/// Examine constraint type and operand type and determine a weight value.
8603/// This object must already have been set up with the operand type
8604/// and the current alternative constraint selected.
8605TargetLowering::ConstraintWeight
8606ARMTargetLowering::getSingleConstraintMatchWeight(
8607    AsmOperandInfo &info, const char *constraint) const {
8608  ConstraintWeight weight = CW_Invalid;
8609  Value *CallOperandVal = info.CallOperandVal;
8610    // If we don't have a value, we can't do a match,
8611    // but allow it at the lowest weight.
8612  if (CallOperandVal == NULL)
8613    return CW_Default;
8614  Type *type = CallOperandVal->getType();
8615  // Look at the constraint type.
8616  switch (*constraint) {
8617  default:
8618    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
8619    break;
8620  case 'l':
8621    if (type->isIntegerTy()) {
8622      if (Subtarget->isThumb())
8623        weight = CW_SpecificReg;
8624      else
8625        weight = CW_Register;
8626    }
8627    break;
8628  case 'w':
8629    if (type->isFloatingPointTy())
8630      weight = CW_Register;
8631    break;
8632  }
8633  return weight;
8634}
8635
8636typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
8637RCPair
8638ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
8639                                                EVT VT) const {
8640  if (Constraint.size() == 1) {
8641    // GCC ARM Constraint Letters
8642    switch (Constraint[0]) {
8643    case 'l': // Low regs or general regs.
8644      if (Subtarget->isThumb())
8645        return RCPair(0U, ARM::tGPRRegisterClass);
8646      else
8647        return RCPair(0U, ARM::GPRRegisterClass);
8648    case 'h': // High regs or no regs.
8649      if (Subtarget->isThumb())
8650        return RCPair(0U, ARM::hGPRRegisterClass);
8651      break;
8652    case 'r':
8653      return RCPair(0U, ARM::GPRRegisterClass);
8654    case 'w':
8655      if (VT == MVT::f32)
8656        return RCPair(0U, ARM::SPRRegisterClass);
8657      if (VT.getSizeInBits() == 64)
8658        return RCPair(0U, ARM::DPRRegisterClass);
8659      if (VT.getSizeInBits() == 128)
8660        return RCPair(0U, ARM::QPRRegisterClass);
8661      break;
8662    case 'x':
8663      if (VT == MVT::f32)
8664        return RCPair(0U, ARM::SPR_8RegisterClass);
8665      if (VT.getSizeInBits() == 64)
8666        return RCPair(0U, ARM::DPR_8RegisterClass);
8667      if (VT.getSizeInBits() == 128)
8668        return RCPair(0U, ARM::QPR_8RegisterClass);
8669      break;
8670    case 't':
8671      if (VT == MVT::f32)
8672        return RCPair(0U, ARM::SPRRegisterClass);
8673      break;
8674    }
8675  }
8676  if (StringRef("{cc}").equals_lower(Constraint))
8677    return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
8678
8679  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
8680}
8681
8682/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
8683/// vector.  If it is invalid, don't add anything to Ops.
8684void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
8685                                                     std::string &Constraint,
8686                                                     std::vector<SDValue>&Ops,
8687                                                     SelectionDAG &DAG) const {
8688  SDValue Result(0, 0);
8689
8690  // Currently only support length 1 constraints.
8691  if (Constraint.length() != 1) return;
8692
8693  char ConstraintLetter = Constraint[0];
8694  switch (ConstraintLetter) {
8695  default: break;
8696  case 'j':
8697  case 'I': case 'J': case 'K': case 'L':
8698  case 'M': case 'N': case 'O':
8699    ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
8700    if (!C)
8701      return;
8702
8703    int64_t CVal64 = C->getSExtValue();
8704    int CVal = (int) CVal64;
8705    // None of these constraints allow values larger than 32 bits.  Check
8706    // that the value fits in an int.
8707    if (CVal != CVal64)
8708      return;
8709
8710    switch (ConstraintLetter) {
8711      case 'j':
8712        // Constant suitable for movw, must be between 0 and
8713        // 65535.
8714        if (Subtarget->hasV6T2Ops())
8715          if (CVal >= 0 && CVal <= 65535)
8716            break;
8717        return;
8718      case 'I':
8719        if (Subtarget->isThumb1Only()) {
8720          // This must be a constant between 0 and 255, for ADD
8721          // immediates.
8722          if (CVal >= 0 && CVal <= 255)
8723            break;
8724        } else if (Subtarget->isThumb2()) {
8725          // A constant that can be used as an immediate value in a
8726          // data-processing instruction.
8727          if (ARM_AM::getT2SOImmVal(CVal) != -1)
8728            break;
8729        } else {
8730          // A constant that can be used as an immediate value in a
8731          // data-processing instruction.
8732          if (ARM_AM::getSOImmVal(CVal) != -1)
8733            break;
8734        }
8735        return;
8736
8737      case 'J':
8738        if (Subtarget->isThumb()) {  // FIXME thumb2
8739          // This must be a constant between -255 and -1, for negated ADD
8740          // immediates. This can be used in GCC with an "n" modifier that
8741          // prints the negated value, for use with SUB instructions. It is
8742          // not useful otherwise but is implemented for compatibility.
8743          if (CVal >= -255 && CVal <= -1)
8744            break;
8745        } else {
8746          // This must be a constant between -4095 and 4095. It is not clear
8747          // what this constraint is intended for. Implemented for
8748          // compatibility with GCC.
8749          if (CVal >= -4095 && CVal <= 4095)
8750            break;
8751        }
8752        return;
8753
8754      case 'K':
8755        if (Subtarget->isThumb1Only()) {
8756          // A 32-bit value where only one byte has a nonzero value. Exclude
8757          // zero to match GCC. This constraint is used by GCC internally for
8758          // constants that can be loaded with a move/shift combination.
8759          // It is not useful otherwise but is implemented for compatibility.
8760          if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
8761            break;
8762        } else if (Subtarget->isThumb2()) {
8763          // A constant whose bitwise inverse can be used as an immediate
8764          // value in a data-processing instruction. This can be used in GCC
8765          // with a "B" modifier that prints the inverted value, for use with
8766          // BIC and MVN instructions. It is not useful otherwise but is
8767          // implemented for compatibility.
8768          if (ARM_AM::getT2SOImmVal(~CVal) != -1)
8769            break;
8770        } else {
8771          // A constant whose bitwise inverse can be used as an immediate
8772          // value in a data-processing instruction. This can be used in GCC
8773          // with a "B" modifier that prints the inverted value, for use with
8774          // BIC and MVN instructions. It is not useful otherwise but is
8775          // implemented for compatibility.
8776          if (ARM_AM::getSOImmVal(~CVal) != -1)
8777            break;
8778        }
8779        return;
8780
8781      case 'L':
8782        if (Subtarget->isThumb1Only()) {
8783          // This must be a constant between -7 and 7,
8784          // for 3-operand ADD/SUB immediate instructions.
8785          if (CVal >= -7 && CVal < 7)
8786            break;
8787        } else if (Subtarget->isThumb2()) {
8788          // A constant whose negation can be used as an immediate value in a
8789          // data-processing instruction. This can be used in GCC with an "n"
8790          // modifier that prints the negated value, for use with SUB
8791          // instructions. It is not useful otherwise but is implemented for
8792          // compatibility.
8793          if (ARM_AM::getT2SOImmVal(-CVal) != -1)
8794            break;
8795        } else {
8796          // A constant whose negation can be used as an immediate value in a
8797          // data-processing instruction. This can be used in GCC with an "n"
8798          // modifier that prints the negated value, for use with SUB
8799          // instructions. It is not useful otherwise but is implemented for
8800          // compatibility.
8801          if (ARM_AM::getSOImmVal(-CVal) != -1)
8802            break;
8803        }
8804        return;
8805
8806      case 'M':
8807        if (Subtarget->isThumb()) { // FIXME thumb2
8808          // This must be a multiple of 4 between 0 and 1020, for
8809          // ADD sp + immediate.
8810          if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
8811            break;
8812        } else {
8813          // A power of two or a constant between 0 and 32.  This is used in
8814          // GCC for the shift amount on shifted register operands, but it is
8815          // useful in general for any shift amounts.
8816          if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
8817            break;
8818        }
8819        return;
8820
8821      case 'N':
8822        if (Subtarget->isThumb()) {  // FIXME thumb2
8823          // This must be a constant between 0 and 31, for shift amounts.
8824          if (CVal >= 0 && CVal <= 31)
8825            break;
8826        }
8827        return;
8828
8829      case 'O':
8830        if (Subtarget->isThumb()) {  // FIXME thumb2
8831          // This must be a multiple of 4 between -508 and 508, for
8832          // ADD/SUB sp = sp + immediate.
8833          if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
8834            break;
8835        }
8836        return;
8837    }
8838    Result = DAG.getTargetConstant(CVal, Op.getValueType());
8839    break;
8840  }
8841
8842  if (Result.getNode()) {
8843    Ops.push_back(Result);
8844    return;
8845  }
8846  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8847}
8848
8849bool
8850ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
8851  // The ARM target isn't yet aware of offsets.
8852  return false;
8853}
8854
8855bool ARM::isBitFieldInvertedMask(unsigned v) {
8856  if (v == 0xffffffff)
8857    return 0;
8858  // there can be 1's on either or both "outsides", all the "inside"
8859  // bits must be 0's
8860  unsigned int lsb = 0, msb = 31;
8861  while (v & (1 << msb)) --msb;
8862  while (v & (1 << lsb)) ++lsb;
8863  for (unsigned int i = lsb; i <= msb; ++i) {
8864    if (v & (1 << i))
8865      return 0;
8866  }
8867  return 1;
8868}
8869
8870/// isFPImmLegal - Returns true if the target can instruction select the
8871/// specified FP immediate natively. If false, the legalizer will
8872/// materialize the FP immediate as a load from a constant pool.
8873bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
8874  if (!Subtarget->hasVFP3())
8875    return false;
8876  if (VT == MVT::f32)
8877    return ARM_AM::getFP32Imm(Imm) != -1;
8878  if (VT == MVT::f64)
8879    return ARM_AM::getFP64Imm(Imm) != -1;
8880  return false;
8881}
8882
8883/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
8884/// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
8885/// specified in the intrinsic calls.
8886bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
8887                                           const CallInst &I,
8888                                           unsigned Intrinsic) const {
8889  switch (Intrinsic) {
8890  case Intrinsic::arm_neon_vld1:
8891  case Intrinsic::arm_neon_vld2:
8892  case Intrinsic::arm_neon_vld3:
8893  case Intrinsic::arm_neon_vld4:
8894  case Intrinsic::arm_neon_vld2lane:
8895  case Intrinsic::arm_neon_vld3lane:
8896  case Intrinsic::arm_neon_vld4lane: {
8897    Info.opc = ISD::INTRINSIC_W_CHAIN;
8898    // Conservatively set memVT to the entire set of vectors loaded.
8899    uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
8900    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
8901    Info.ptrVal = I.getArgOperand(0);
8902    Info.offset = 0;
8903    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
8904    Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
8905    Info.vol = false; // volatile loads with NEON intrinsics not supported
8906    Info.readMem = true;
8907    Info.writeMem = false;
8908    return true;
8909  }
8910  case Intrinsic::arm_neon_vst1:
8911  case Intrinsic::arm_neon_vst2:
8912  case Intrinsic::arm_neon_vst3:
8913  case Intrinsic::arm_neon_vst4:
8914  case Intrinsic::arm_neon_vst2lane:
8915  case Intrinsic::arm_neon_vst3lane:
8916  case Intrinsic::arm_neon_vst4lane: {
8917    Info.opc = ISD::INTRINSIC_VOID;
8918    // Conservatively set memVT to the entire set of vectors stored.
8919    unsigned NumElts = 0;
8920    for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
8921      Type *ArgTy = I.getArgOperand(ArgI)->getType();
8922      if (!ArgTy->isVectorTy())
8923        break;
8924      NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
8925    }
8926    Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
8927    Info.ptrVal = I.getArgOperand(0);
8928    Info.offset = 0;
8929    Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
8930    Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
8931    Info.vol = false; // volatile stores with NEON intrinsics not supported
8932    Info.readMem = false;
8933    Info.writeMem = true;
8934    return true;
8935  }
8936  case Intrinsic::arm_strexd: {
8937    Info.opc = ISD::INTRINSIC_W_CHAIN;
8938    Info.memVT = MVT::i64;
8939    Info.ptrVal = I.getArgOperand(2);
8940    Info.offset = 0;
8941    Info.align = 8;
8942    Info.vol = true;
8943    Info.readMem = false;
8944    Info.writeMem = true;
8945    return true;
8946  }
8947  case Intrinsic::arm_ldrexd: {
8948    Info.opc = ISD::INTRINSIC_W_CHAIN;
8949    Info.memVT = MVT::i64;
8950    Info.ptrVal = I.getArgOperand(0);
8951    Info.offset = 0;
8952    Info.align = 8;
8953    Info.vol = true;
8954    Info.readMem = true;
8955    Info.writeMem = false;
8956    return true;
8957  }
8958  default:
8959    break;
8960  }
8961
8962  return false;
8963}
8964