ARMISelLowering.h revision b36ec86c01e3c3238dca621648f017aef96dda60
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARMSubtarget.h" 19#include "llvm/Target/TargetLowering.h" 20#include "llvm/CodeGen/SelectionDAG.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include <vector> 23 24namespace llvm { 25 class ARMConstantPoolValue; 26 27 namespace ARMISD { 28 // ARM Specific DAG Nodes 29 enum NodeType { 30 // Start the numbering where the builtin ops and target ops leave off. 31 FIRST_NUMBER = ISD::BUILTIN_OP_END, 32 33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 34 // TargetExternalSymbol, and TargetGlobalAddress. 35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 36 37 CALL, // Function call. 38 CALL_PRED, // Function call that's predicable. 39 CALL_NOLINK, // Function call with branch not branch-and-link. 40 tCALL, // Thumb function call. 41 BRCOND, // Conditional branch. 42 BR_JT, // Jumptable branch. 43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 44 RET_FLAG, // Return with a flag operand. 45 46 PIC_ADD, // Add with a PC operand and a PIC label. 47 48 CMP, // ARM compare instructions. 49 CMPZ, // ARM compare that sets only Z flag. 50 CMPFP, // ARM VFP compare instruction, sets FPSCR. 51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 52 FMSTAT, // ARM fmstat instruction. 53 CMOV, // ARM conditional move instructions. 54 CNEG, // ARM conditional negate instructions. 55 56 FTOSI, // FP to sint within a FP register. 57 FTOUI, // FP to uint within a FP register. 58 SITOF, // sint to FP within a FP register. 59 UITOF, // uint to FP within a FP register. 60 61 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 62 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 63 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 64 65 FMRRD, // double to two gprs. 66 FMDRR, // Two gprs to double. 67 68 EH_SJLJ_SETJMP, // SjLj exception handling setjmp 69 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp 70 71 THREAD_POINTER, 72 73 VCEQ, // Vector compare equal. 74 VCGE, // Vector compare greater than or equal. 75 VCGEU, // Vector compare unsigned greater than or equal. 76 VCGT, // Vector compare greater than. 77 VCGTU, // Vector compare unsigned greater than. 78 VTST, // Vector test bits. 79 80 // Vector shift by immediate: 81 VSHL, // ...left 82 VSHRs, // ...right (signed) 83 VSHRu, // ...right (unsigned) 84 VSHLLs, // ...left long (signed) 85 VSHLLu, // ...left long (unsigned) 86 VSHLLi, // ...left long (with maximum shift count) 87 VSHRN, // ...right narrow 88 89 // Vector rounding shift by immediate: 90 VRSHRs, // ...right (signed) 91 VRSHRu, // ...right (unsigned) 92 VRSHRN, // ...right narrow 93 94 // Vector saturating shift by immediate: 95 VQSHLs, // ...left (signed) 96 VQSHLu, // ...left (unsigned) 97 VQSHLsu, // ...left (signed to unsigned) 98 VQSHRNs, // ...right narrow (signed) 99 VQSHRNu, // ...right narrow (unsigned) 100 VQSHRNsu, // ...right narrow (signed to unsigned) 101 102 // Vector saturating rounding shift by immediate: 103 VQRSHRNs, // ...right narrow (signed) 104 VQRSHRNu, // ...right narrow (unsigned) 105 VQRSHRNsu, // ...right narrow (signed to unsigned) 106 107 // Vector shift and insert: 108 VSLI, // ...left 109 VSRI, // ...right 110 111 // Vector get lane (VMOV scalar to ARM core register) 112 // (These are used for 8- and 16-bit element types only.) 113 VGETLANEu, // zero-extend vector extract element 114 VGETLANEs, // sign-extend vector extract element 115 116 // Vector duplicate lane (128-bit result only; 64-bit is a shuffle) 117 VDUPLANEQ, // splat a lane from a 64-bit vector to a 128-bit vector 118 119 // Vector load/store with (de)interleaving 120 VLD2D, 121 VLD3D, 122 VLD4D, 123 VST2D, 124 VST3D, 125 VST4D 126 }; 127 } 128 129 /// Define some predicates that are used for node matching. 130 namespace ARM { 131 /// getVMOVImm - If this is a build_vector of constants which can be 132 /// formed by using a VMOV instruction of the specified element size, 133 /// return the constant being splatted. The ByteSize field indicates the 134 /// number of bytes of each element [1248]. 135 SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 136 137 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 138 /// instruction with the specified blocksize. (The order of the elements 139 /// within each block of the vector is reversed.) 140 bool isVREVMask(ShuffleVectorSDNode *N, unsigned blocksize); 141 } 142 143 //===--------------------------------------------------------------------===// 144 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 145 146 class ARMTargetLowering : public TargetLowering { 147 int VarArgsFrameIndex; // FrameIndex for start of varargs area. 148 public: 149 explicit ARMTargetLowering(TargetMachine &TM); 150 151 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); 152 153 /// ReplaceNodeResults - Replace the results of node with an illegal result 154 /// type with new values built out of custom code. 155 /// 156 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 157 SelectionDAG &DAG); 158 159 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 160 161 virtual const char *getTargetNodeName(unsigned Opcode) const; 162 163 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, 164 MachineBasicBlock *MBB) const; 165 166 /// isLegalAddressingMode - Return true if the addressing mode represented 167 /// by AM is legal for this target, for a load/store of the specified type. 168 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 169 170 /// getPreIndexedAddressParts - returns true by value, base pointer and 171 /// offset pointer and addressing mode by reference if the node's address 172 /// can be legally represented as pre-indexed load / store address. 173 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 174 SDValue &Offset, 175 ISD::MemIndexedMode &AM, 176 SelectionDAG &DAG) const; 177 178 /// getPostIndexedAddressParts - returns true by value, base pointer and 179 /// offset pointer and addressing mode by reference if this node can be 180 /// combined with a load / store to form a post-indexed load / store. 181 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 182 SDValue &Base, SDValue &Offset, 183 ISD::MemIndexedMode &AM, 184 SelectionDAG &DAG) const; 185 186 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 187 const APInt &Mask, 188 APInt &KnownZero, 189 APInt &KnownOne, 190 const SelectionDAG &DAG, 191 unsigned Depth) const; 192 ConstraintType getConstraintType(const std::string &Constraint) const; 193 std::pair<unsigned, const TargetRegisterClass*> 194 getRegForInlineAsmConstraint(const std::string &Constraint, 195 MVT VT) const; 196 std::vector<unsigned> 197 getRegClassForInlineAsmConstraint(const std::string &Constraint, 198 MVT VT) const; 199 200 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 201 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 202 /// true it means one of the asm constraint of the inline asm instruction 203 /// being processed is 'm'. 204 virtual void LowerAsmOperandForConstraint(SDValue Op, 205 char ConstraintLetter, 206 bool hasMemory, 207 std::vector<SDValue> &Ops, 208 SelectionDAG &DAG) const; 209 210 virtual const ARMSubtarget* getSubtarget() { 211 return Subtarget; 212 } 213 214 /// getFunctionAlignment - Return the Log2 alignment of this function. 215 virtual unsigned getFunctionAlignment(const Function *F) const; 216 217 private: 218 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 219 /// make the right decision when generating code for different targets. 220 const ARMSubtarget *Subtarget; 221 222 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 223 /// 224 unsigned ARMPCLabelIndex; 225 226 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 227 void addDRTypeForNEON(MVT VT); 228 void addQRTypeForNEON(MVT VT); 229 230 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 231 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 232 SDValue Chain, SDValue &Arg, 233 RegsToPassVector &RegsToPass, 234 CCValAssign &VA, CCValAssign &NextVA, 235 SDValue &StackPtr, 236 SmallVector<SDValue, 8> &MemOpChains, 237 ISD::ArgFlagsTy Flags); 238 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 239 SDValue &Root, SelectionDAG &DAG, DebugLoc dl); 240 241 CCAssignFn *CCAssignFnForNode(unsigned CC, bool Return, bool isVarArg) const; 242 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 243 DebugLoc dl, SelectionDAG &DAG, 244 const CCValAssign &VA, 245 ISD::ArgFlagsTy Flags); 246 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG); 247 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); 248 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG); 249 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG); 250 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); 251 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 252 SelectionDAG &DAG); 253 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 254 SelectionDAG &DAG); 255 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG); 256 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG); 257 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); 258 259 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, 260 SDValue Chain, 261 SDValue Dst, SDValue Src, 262 SDValue Size, unsigned Align, 263 bool AlwaysInline, 264 const Value *DstSV, uint64_t DstSVOff, 265 const Value *SrcSV, uint64_t SrcSVOff); 266 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 267 unsigned CallConv, bool isVarArg, 268 const SmallVectorImpl<ISD::InputArg> &Ins, 269 DebugLoc dl, SelectionDAG &DAG, 270 SmallVectorImpl<SDValue> &InVals); 271 272 virtual SDValue 273 LowerFormalArguments(SDValue Chain, 274 unsigned CallConv, bool isVarArg, 275 const SmallVectorImpl<ISD::InputArg> &Ins, 276 DebugLoc dl, SelectionDAG &DAG, 277 SmallVectorImpl<SDValue> &InVals); 278 279 virtual SDValue 280 LowerCall(SDValue Chain, SDValue Callee, 281 unsigned CallConv, bool isVarArg, 282 bool isTailCall, 283 const SmallVectorImpl<ISD::OutputArg> &Outs, 284 const SmallVectorImpl<ISD::InputArg> &Ins, 285 DebugLoc dl, SelectionDAG &DAG, 286 SmallVectorImpl<SDValue> &InVals); 287 288 virtual SDValue 289 LowerReturn(SDValue Chain, 290 unsigned CallConv, bool isVarArg, 291 const SmallVectorImpl<ISD::OutputArg> &Outs, 292 DebugLoc dl, SelectionDAG &DAG); 293 }; 294} 295 296#endif // ARMISELLOWERING_H 297