ARMISelLowering.h revision 088880cb192fb6dd5b1bf85af62023c5ca3da38f
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARMSubtarget.h" 19#include "llvm/Target/TargetLowering.h" 20#include "llvm/CodeGen/SelectionDAG.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include <vector> 23 24namespace llvm { 25 class ARMConstantPoolValue; 26 27 namespace ARMISD { 28 // ARM Specific DAG Nodes 29 enum NodeType { 30 // Start the numbering where the builtin ops and target ops leave off. 31 FIRST_NUMBER = ISD::BUILTIN_OP_END, 32 33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 34 // TargetExternalSymbol, and TargetGlobalAddress. 35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 36 37 CALL, // Function call. 38 CALL_PRED, // Function call that's predicable. 39 CALL_NOLINK, // Function call with branch not branch-and-link. 40 tCALL, // Thumb function call. 41 BRCOND, // Conditional branch. 42 BR_JT, // Jumptable branch. 43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 44 RET_FLAG, // Return with a flag operand. 45 46 PIC_ADD, // Add with a PC operand and a PIC label. 47 48 CMP, // ARM compare instructions. 49 CMPZ, // ARM compare that sets only Z flag. 50 CMPFP, // ARM VFP compare instruction, sets FPSCR. 51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 52 FMSTAT, // ARM fmstat instruction. 53 CMOV, // ARM conditional move instructions. 54 CNEG, // ARM conditional negate instructions. 55 56 FTOSI, // FP to sint within a FP register. 57 FTOUI, // FP to uint within a FP register. 58 SITOF, // sint to FP within a FP register. 59 UITOF, // uint to FP within a FP register. 60 61 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 62 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 63 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 64 65 FMRRD, // double to two gprs. 66 FMDRR, // Two gprs to double. 67 68 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 69 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 70 71 THREAD_POINTER, 72 73 DYN_ALLOC, // Dynamic allocation on the stack. 74 75 VCEQ, // Vector compare equal. 76 VCGE, // Vector compare greater than or equal. 77 VCGEU, // Vector compare unsigned greater than or equal. 78 VCGT, // Vector compare greater than. 79 VCGTU, // Vector compare unsigned greater than. 80 VTST, // Vector test bits. 81 82 // Vector shift by immediate: 83 VSHL, // ...left 84 VSHRs, // ...right (signed) 85 VSHRu, // ...right (unsigned) 86 VSHLLs, // ...left long (signed) 87 VSHLLu, // ...left long (unsigned) 88 VSHLLi, // ...left long (with maximum shift count) 89 VSHRN, // ...right narrow 90 91 // Vector rounding shift by immediate: 92 VRSHRs, // ...right (signed) 93 VRSHRu, // ...right (unsigned) 94 VRSHRN, // ...right narrow 95 96 // Vector saturating shift by immediate: 97 VQSHLs, // ...left (signed) 98 VQSHLu, // ...left (unsigned) 99 VQSHLsu, // ...left (signed to unsigned) 100 VQSHRNs, // ...right narrow (signed) 101 VQSHRNu, // ...right narrow (unsigned) 102 VQSHRNsu, // ...right narrow (signed to unsigned) 103 104 // Vector saturating rounding shift by immediate: 105 VQRSHRNs, // ...right narrow (signed) 106 VQRSHRNu, // ...right narrow (unsigned) 107 VQRSHRNsu, // ...right narrow (signed to unsigned) 108 109 // Vector shift and insert: 110 VSLI, // ...left 111 VSRI, // ...right 112 113 // Vector get lane (VMOV scalar to ARM core register) 114 // (These are used for 8- and 16-bit element types only.) 115 VGETLANEu, // zero-extend vector extract element 116 VGETLANEs, // sign-extend vector extract element 117 118 // Vector duplicate: 119 VDUP, 120 VDUPLANE, 121 122 // Vector load/store with (de)interleaving 123 VLD2D, 124 VLD3D, 125 VLD4D, 126 VST2D, 127 VST3D, 128 VST4D, 129 130 // Vector shuffles: 131 VREV64, // reverse elements within 64-bit doublewords 132 VREV32, // reverse elements within 32-bit words 133 VREV16 // reverse elements within 16-bit halfwords 134 }; 135 } 136 137 /// Define some predicates that are used for node matching. 138 namespace ARM { 139 /// getVMOVImm - If this is a build_vector of constants which can be 140 /// formed by using a VMOV instruction of the specified element size, 141 /// return the constant being splatted. The ByteSize field indicates the 142 /// number of bytes of each element [1248]. 143 SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 144 } 145 146 //===--------------------------------------------------------------------===// 147 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 148 149 class ARMTargetLowering : public TargetLowering { 150 int VarArgsFrameIndex; // FrameIndex for start of varargs area. 151 public: 152 explicit ARMTargetLowering(TargetMachine &TM); 153 154 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); 155 156 /// ReplaceNodeResults - Replace the results of node with an illegal result 157 /// type with new values built out of custom code. 158 /// 159 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 160 SelectionDAG &DAG); 161 162 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 163 164 virtual const char *getTargetNodeName(unsigned Opcode) const; 165 166 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, 167 MachineBasicBlock *MBB) const; 168 169 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 170 /// unaligned memory accesses. of the specified type. 171 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? 172 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; 173 174 /// isLegalAddressingMode - Return true if the addressing mode represented 175 /// by AM is legal for this target, for a load/store of the specified type. 176 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 177 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 178 179 /// getPreIndexedAddressParts - returns true by value, base pointer and 180 /// offset pointer and addressing mode by reference if the node's address 181 /// can be legally represented as pre-indexed load / store address. 182 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 183 SDValue &Offset, 184 ISD::MemIndexedMode &AM, 185 SelectionDAG &DAG) const; 186 187 /// getPostIndexedAddressParts - returns true by value, base pointer and 188 /// offset pointer and addressing mode by reference if this node can be 189 /// combined with a load / store to form a post-indexed load / store. 190 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 191 SDValue &Base, SDValue &Offset, 192 ISD::MemIndexedMode &AM, 193 SelectionDAG &DAG) const; 194 195 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 196 const APInt &Mask, 197 APInt &KnownZero, 198 APInt &KnownOne, 199 const SelectionDAG &DAG, 200 unsigned Depth) const; 201 202 203 ConstraintType getConstraintType(const std::string &Constraint) const; 204 std::pair<unsigned, const TargetRegisterClass*> 205 getRegForInlineAsmConstraint(const std::string &Constraint, 206 EVT VT) const; 207 std::vector<unsigned> 208 getRegClassForInlineAsmConstraint(const std::string &Constraint, 209 EVT VT) const; 210 211 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 212 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 213 /// true it means one of the asm constraint of the inline asm instruction 214 /// being processed is 'm'. 215 virtual void LowerAsmOperandForConstraint(SDValue Op, 216 char ConstraintLetter, 217 bool hasMemory, 218 std::vector<SDValue> &Ops, 219 SelectionDAG &DAG) const; 220 221 virtual const ARMSubtarget* getSubtarget() { 222 return Subtarget; 223 } 224 225 /// getFunctionAlignment - Return the Log2 alignment of this function. 226 virtual unsigned getFunctionAlignment(const Function *F) const; 227 228 private: 229 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 230 /// make the right decision when generating code for different targets. 231 const ARMSubtarget *Subtarget; 232 233 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 234 /// 235 unsigned ARMPCLabelIndex; 236 237 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT); 238 void addDRTypeForNEON(EVT VT); 239 void addQRTypeForNEON(EVT VT); 240 241 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 242 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 243 SDValue Chain, SDValue &Arg, 244 RegsToPassVector &RegsToPass, 245 CCValAssign &VA, CCValAssign &NextVA, 246 SDValue &StackPtr, 247 SmallVector<SDValue, 8> &MemOpChains, 248 ISD::ArgFlagsTy Flags); 249 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 250 SDValue &Root, SelectionDAG &DAG, DebugLoc dl); 251 252 CCAssignFn *CCAssignFnForNode(unsigned CC, bool Return, bool isVarArg) const; 253 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 254 DebugLoc dl, SelectionDAG &DAG, 255 const CCValAssign &VA, 256 ISD::ArgFlagsTy Flags); 257 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG); 258 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); 259 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG); 260 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG); 261 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); 262 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 263 SelectionDAG &DAG); 264 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 265 SelectionDAG &DAG); 266 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG); 267 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG); 268 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); 269 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); 270 271 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, 272 SDValue Chain, 273 SDValue Dst, SDValue Src, 274 SDValue Size, unsigned Align, 275 bool AlwaysInline, 276 const Value *DstSV, uint64_t DstSVOff, 277 const Value *SrcSV, uint64_t SrcSVOff); 278 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 279 unsigned CallConv, bool isVarArg, 280 const SmallVectorImpl<ISD::InputArg> &Ins, 281 DebugLoc dl, SelectionDAG &DAG, 282 SmallVectorImpl<SDValue> &InVals); 283 284 virtual SDValue 285 LowerFormalArguments(SDValue Chain, 286 unsigned CallConv, bool isVarArg, 287 const SmallVectorImpl<ISD::InputArg> &Ins, 288 DebugLoc dl, SelectionDAG &DAG, 289 SmallVectorImpl<SDValue> &InVals); 290 291 virtual SDValue 292 LowerCall(SDValue Chain, SDValue Callee, 293 unsigned CallConv, bool isVarArg, 294 bool isTailCall, 295 const SmallVectorImpl<ISD::OutputArg> &Outs, 296 const SmallVectorImpl<ISD::InputArg> &Ins, 297 DebugLoc dl, SelectionDAG &DAG, 298 SmallVectorImpl<SDValue> &InVals); 299 300 virtual SDValue 301 LowerReturn(SDValue Chain, 302 unsigned CallConv, bool isVarArg, 303 const SmallVectorImpl<ISD::OutputArg> &Outs, 304 DebugLoc dl, SelectionDAG &DAG); 305 }; 306} 307 308#endif // ARMISELLOWERING_H 309