ARMISelLowering.h revision d966817f3cb87897cbec29c967b974924fe939ba
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARMSubtarget.h" 19#include "llvm/Target/TargetLowering.h" 20#include "llvm/Target/TargetRegisterInfo.h" 21#include "llvm/CodeGen/FastISel.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include <vector> 25 26namespace llvm { 27 class ARMConstantPoolValue; 28 29 namespace ARMISD { 30 // ARM Specific DAG Nodes 31 enum NodeType { 32 // Start the numbering where the builtin ops and target ops leave off. 33 FIRST_NUMBER = ISD::BUILTIN_OP_END, 34 35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 36 // TargetExternalSymbol, and TargetGlobalAddress. 37 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 38 39 CALL, // Function call. 40 CALL_PRED, // Function call that's predicable. 41 CALL_NOLINK, // Function call with branch not branch-and-link. 42 tCALL, // Thumb function call. 43 BRCOND, // Conditional branch. 44 BR_JT, // Jumptable branch. 45 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 46 RET_FLAG, // Return with a flag operand. 47 48 PIC_ADD, // Add with a PC operand and a PIC label. 49 50 CMP, // ARM compare instructions. 51 CMPZ, // ARM compare that sets only Z flag. 52 CMPFP, // ARM VFP compare instruction, sets FPSCR. 53 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 54 FMSTAT, // ARM fmstat instruction. 55 CMOV, // ARM conditional move instructions. 56 CNEG, // ARM conditional negate instructions. 57 58 BCC_i64, 59 60 RBIT, // ARM bitreverse instruction 61 62 FTOSI, // FP to sint within a FP register. 63 FTOUI, // FP to uint within a FP register. 64 SITOF, // sint to FP within a FP register. 65 UITOF, // uint to FP within a FP register. 66 67 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 68 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 69 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 70 71 VMOVRRD, // double to two gprs. 72 VMOVDRR, // Two gprs to double. 73 74 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 75 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 76 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup. 77 78 TC_RETURN, // Tail call return pseudo. 79 80 THREAD_POINTER, 81 82 DYN_ALLOC, // Dynamic allocation on the stack. 83 84 MEMBARRIER, // Memory barrier (DMB) 85 MEMBARRIER_MCR, // Memory barrier (MCR) 86 87 PRELOAD, // Preload 88 89 VCEQ, // Vector compare equal. 90 VCGE, // Vector compare greater than or equal. 91 VCGEU, // Vector compare unsigned greater than or equal. 92 VCGT, // Vector compare greater than. 93 VCGTU, // Vector compare unsigned greater than. 94 VTST, // Vector test bits. 95 96 // Vector shift by immediate: 97 VSHL, // ...left 98 VSHRs, // ...right (signed) 99 VSHRu, // ...right (unsigned) 100 VSHLLs, // ...left long (signed) 101 VSHLLu, // ...left long (unsigned) 102 VSHLLi, // ...left long (with maximum shift count) 103 VSHRN, // ...right narrow 104 105 // Vector rounding shift by immediate: 106 VRSHRs, // ...right (signed) 107 VRSHRu, // ...right (unsigned) 108 VRSHRN, // ...right narrow 109 110 // Vector saturating shift by immediate: 111 VQSHLs, // ...left (signed) 112 VQSHLu, // ...left (unsigned) 113 VQSHLsu, // ...left (signed to unsigned) 114 VQSHRNs, // ...right narrow (signed) 115 VQSHRNu, // ...right narrow (unsigned) 116 VQSHRNsu, // ...right narrow (signed to unsigned) 117 118 // Vector saturating rounding shift by immediate: 119 VQRSHRNs, // ...right narrow (signed) 120 VQRSHRNu, // ...right narrow (unsigned) 121 VQRSHRNsu, // ...right narrow (signed to unsigned) 122 123 // Vector shift and insert: 124 VSLI, // ...left 125 VSRI, // ...right 126 127 // Vector get lane (VMOV scalar to ARM core register) 128 // (These are used for 8- and 16-bit element types only.) 129 VGETLANEu, // zero-extend vector extract element 130 VGETLANEs, // sign-extend vector extract element 131 132 // Vector move immediate and move negated immediate: 133 VMOVIMM, 134 VMVNIMM, 135 136 // Vector duplicate: 137 VDUP, 138 VDUPLANE, 139 140 // Vector shuffles: 141 VEXT, // extract 142 VREV64, // reverse elements within 64-bit doublewords 143 VREV32, // reverse elements within 32-bit words 144 VREV16, // reverse elements within 16-bit halfwords 145 VZIP, // zip (interleave) 146 VUZP, // unzip (deinterleave) 147 VTRN, // transpose 148 149 // Vector multiply long: 150 VMULLs, // ...signed 151 VMULLu, // ...unsigned 152 153 // Operands of the standard BUILD_VECTOR node are not legalized, which 154 // is fine if BUILD_VECTORs are always lowered to shuffles or other 155 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 156 // operands need to be legalized. Define an ARM-specific version of 157 // BUILD_VECTOR for this purpose. 158 BUILD_VECTOR, 159 160 // Floating-point max and min: 161 FMAX, 162 FMIN, 163 164 // Bit-field insert 165 BFI, 166 167 // Vector OR with immediate 168 VORRIMM 169 }; 170 } 171 172 /// Define some predicates that are used for node matching. 173 namespace ARM { 174 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be 175 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd) 176 /// instruction, returns its 8-bit integer representation. Otherwise, 177 /// returns -1. 178 int getVFPf32Imm(const APFloat &FPImm); 179 int getVFPf64Imm(const APFloat &FPImm); 180 bool isBitFieldInvertedMask(unsigned v); 181 } 182 183 //===--------------------------------------------------------------------===// 184 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 185 186 class ARMTargetLowering : public TargetLowering { 187 public: 188 explicit ARMTargetLowering(TargetMachine &TM); 189 190 virtual unsigned getJumpTableEncoding(void) const; 191 192 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 193 194 /// ReplaceNodeResults - Replace the results of node with an illegal result 195 /// type with new values built out of custom code. 196 /// 197 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 198 SelectionDAG &DAG) const; 199 200 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 201 202 virtual const char *getTargetNodeName(unsigned Opcode) const; 203 204 virtual MachineBasicBlock * 205 EmitInstrWithCustomInserter(MachineInstr *MI, 206 MachineBasicBlock *MBB) const; 207 208 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 209 /// unaligned memory accesses. of the specified type. 210 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? 211 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; 212 213 /// isLegalAddressingMode - Return true if the addressing mode represented 214 /// by AM is legal for this target, for a load/store of the specified type. 215 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 216 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 217 218 /// isLegalICmpImmediate - Return true if the specified immediate is legal 219 /// icmp immediate, that is the target has icmp instructions which can 220 /// compare a register against the immediate without having to materialize 221 /// the immediate into a register. 222 virtual bool isLegalICmpImmediate(int64_t Imm) const; 223 224 /// getPreIndexedAddressParts - returns true by value, base pointer and 225 /// offset pointer and addressing mode by reference if the node's address 226 /// can be legally represented as pre-indexed load / store address. 227 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 228 SDValue &Offset, 229 ISD::MemIndexedMode &AM, 230 SelectionDAG &DAG) const; 231 232 /// getPostIndexedAddressParts - returns true by value, base pointer and 233 /// offset pointer and addressing mode by reference if this node can be 234 /// combined with a load / store to form a post-indexed load / store. 235 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 236 SDValue &Base, SDValue &Offset, 237 ISD::MemIndexedMode &AM, 238 SelectionDAG &DAG) const; 239 240 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 241 const APInt &Mask, 242 APInt &KnownZero, 243 APInt &KnownOne, 244 const SelectionDAG &DAG, 245 unsigned Depth) const; 246 247 248 ConstraintType getConstraintType(const std::string &Constraint) const; 249 250 /// Examine constraint string and operand type and determine a weight value. 251 /// The operand object must already have been set up with the operand type. 252 ConstraintWeight getSingleConstraintMatchWeight( 253 AsmOperandInfo &info, const char *constraint) const; 254 255 std::pair<unsigned, const TargetRegisterClass*> 256 getRegForInlineAsmConstraint(const std::string &Constraint, 257 EVT VT) const; 258 std::vector<unsigned> 259 getRegClassForInlineAsmConstraint(const std::string &Constraint, 260 EVT VT) const; 261 262 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 263 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 264 /// true it means one of the asm constraint of the inline asm instruction 265 /// being processed is 'm'. 266 virtual void LowerAsmOperandForConstraint(SDValue Op, 267 char ConstraintLetter, 268 std::vector<SDValue> &Ops, 269 SelectionDAG &DAG) const; 270 271 const ARMSubtarget* getSubtarget() const { 272 return Subtarget; 273 } 274 275 /// getRegClassFor - Return the register class that should be used for the 276 /// specified value type. 277 virtual TargetRegisterClass *getRegClassFor(EVT VT) const; 278 279 /// getFunctionAlignment - Return the Log2 alignment of this function. 280 virtual unsigned getFunctionAlignment(const Function *F) const; 281 282 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 283 /// be used for loads / stores from the global. 284 virtual unsigned getMaximalGlobalOffset() const; 285 286 /// createFastISel - This method returns a target specific FastISel object, 287 /// or null if the target does not support "fast" ISel. 288 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; 289 290 Sched::Preference getSchedulingPreference(SDNode *N) const; 291 292 unsigned getRegPressureLimit(const TargetRegisterClass *RC, 293 MachineFunction &MF) const; 294 295 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 296 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 297 298 /// isFPImmLegal - Returns true if the target can instruction select the 299 /// specified FP immediate natively. If false, the legalizer will 300 /// materialize the FP immediate as a load from a constant pool. 301 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 302 303 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 304 const CallInst &I, 305 unsigned Intrinsic) const; 306 protected: 307 std::pair<const TargetRegisterClass*, uint8_t> 308 findRepresentativeClass(EVT VT) const; 309 310 private: 311 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 312 /// make the right decision when generating code for different targets. 313 const ARMSubtarget *Subtarget; 314 315 const TargetRegisterInfo *RegInfo; 316 317 const InstrItineraryData *Itins; 318 319 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 320 /// 321 unsigned ARMPCLabelIndex; 322 323 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT); 324 void addDRTypeForNEON(EVT VT); 325 void addQRTypeForNEON(EVT VT); 326 327 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 328 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 329 SDValue Chain, SDValue &Arg, 330 RegsToPassVector &RegsToPass, 331 CCValAssign &VA, CCValAssign &NextVA, 332 SDValue &StackPtr, 333 SmallVector<SDValue, 8> &MemOpChains, 334 ISD::ArgFlagsTy Flags) const; 335 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 336 SDValue &Root, SelectionDAG &DAG, 337 DebugLoc dl) const; 338 339 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 340 bool isVarArg) const; 341 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 342 DebugLoc dl, SelectionDAG &DAG, 343 const CCValAssign &VA, 344 ISD::ArgFlagsTy Flags) const; 345 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 346 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 347 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const; 348 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 349 const ARMSubtarget *Subtarget) const; 350 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 351 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 352 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 353 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 354 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 355 SelectionDAG &DAG) const; 356 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 357 SelectionDAG &DAG) const; 358 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 359 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 360 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 361 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 362 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 363 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 364 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 365 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 366 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 367 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 368 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 369 370 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 371 CallingConv::ID CallConv, bool isVarArg, 372 const SmallVectorImpl<ISD::InputArg> &Ins, 373 DebugLoc dl, SelectionDAG &DAG, 374 SmallVectorImpl<SDValue> &InVals) const; 375 376 virtual SDValue 377 LowerFormalArguments(SDValue Chain, 378 CallingConv::ID CallConv, bool isVarArg, 379 const SmallVectorImpl<ISD::InputArg> &Ins, 380 DebugLoc dl, SelectionDAG &DAG, 381 SmallVectorImpl<SDValue> &InVals) const; 382 383 virtual SDValue 384 LowerCall(SDValue Chain, SDValue Callee, 385 CallingConv::ID CallConv, bool isVarArg, 386 bool &isTailCall, 387 const SmallVectorImpl<ISD::OutputArg> &Outs, 388 const SmallVectorImpl<SDValue> &OutVals, 389 const SmallVectorImpl<ISD::InputArg> &Ins, 390 DebugLoc dl, SelectionDAG &DAG, 391 SmallVectorImpl<SDValue> &InVals) const; 392 393 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 394 /// for tail call optimization. Targets which want to do tail call 395 /// optimization should implement this function. 396 bool IsEligibleForTailCallOptimization(SDValue Callee, 397 CallingConv::ID CalleeCC, 398 bool isVarArg, 399 bool isCalleeStructRet, 400 bool isCallerStructRet, 401 const SmallVectorImpl<ISD::OutputArg> &Outs, 402 const SmallVectorImpl<SDValue> &OutVals, 403 const SmallVectorImpl<ISD::InputArg> &Ins, 404 SelectionDAG& DAG) const; 405 virtual SDValue 406 LowerReturn(SDValue Chain, 407 CallingConv::ID CallConv, bool isVarArg, 408 const SmallVectorImpl<ISD::OutputArg> &Outs, 409 const SmallVectorImpl<SDValue> &OutVals, 410 DebugLoc dl, SelectionDAG &DAG) const; 411 412 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 413 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const; 414 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 415 SelectionDAG &DAG, DebugLoc dl) const; 416 417 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 418 419 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 420 MachineBasicBlock *BB, 421 unsigned Size) const; 422 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 423 MachineBasicBlock *BB, 424 unsigned Size, 425 unsigned BinOpcode) const; 426 427 }; 428 429 namespace ARM { 430 FastISel *createFastISel(FunctionLoweringInfo &funcInfo); 431 } 432} 433 434#endif // ARMISELLOWERING_H 435