ARMISelLowering.h revision 5de5d4b6d0eb3fd379fa571d82f6fa764460b3b8
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARMSubtarget.h" 19#include "llvm/Target/TargetLowering.h" 20#include "llvm/Target/TargetRegisterInfo.h" 21#include "llvm/CodeGen/FastISel.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include <vector> 25 26namespace llvm { 27 class ARMConstantPoolValue; 28 29 namespace ARMISD { 30 // ARM Specific DAG Nodes 31 enum NodeType { 32 // Start the numbering where the builtin ops and target ops leave off. 33 FIRST_NUMBER = ISD::BUILTIN_OP_END, 34 35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 36 // TargetExternalSymbol, and TargetGlobalAddress. 37 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 38 // PIC mode. 39 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 40 41 CALL, // Function call. 42 CALL_PRED, // Function call that's predicable. 43 CALL_NOLINK, // Function call with branch not branch-and-link. 44 tCALL, // Thumb function call. 45 BRCOND, // Conditional branch. 46 BR_JT, // Jumptable branch. 47 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 48 RET_FLAG, // Return with a flag operand. 49 50 PIC_ADD, // Add with a PC operand and a PIC label. 51 52 CMP, // ARM compare instructions. 53 CMPZ, // ARM compare that sets only Z flag. 54 CMPFP, // ARM VFP compare instruction, sets FPSCR. 55 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 56 FMSTAT, // ARM fmstat instruction. 57 CMOV, // ARM conditional move instructions. 58 CNEG, // ARM conditional negate instructions. 59 60 BCC_i64, 61 62 RBIT, // ARM bitreverse instruction 63 64 FTOSI, // FP to sint within a FP register. 65 FTOUI, // FP to uint within a FP register. 66 SITOF, // sint to FP within a FP register. 67 UITOF, // uint to FP within a FP register. 68 69 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 70 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 71 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 72 73 VMOVRRD, // double to two gprs. 74 VMOVDRR, // Two gprs to double. 75 76 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 77 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 78 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup. 79 80 TC_RETURN, // Tail call return pseudo. 81 82 THREAD_POINTER, 83 84 DYN_ALLOC, // Dynamic allocation on the stack. 85 86 MEMBARRIER, // Memory barrier (DMB) 87 MEMBARRIER_MCR, // Memory barrier (MCR) 88 89 PRELOAD, // Preload 90 91 VCEQ, // Vector compare equal. 92 VCEQZ, // Vector compare equal to zero. 93 VCGE, // Vector compare greater than or equal. 94 VCGEZ, // Vector compare greater than or equal to zero. 95 VCLEZ, // Vector compare less than or equal to zero. 96 VCGEU, // Vector compare unsigned greater than or equal. 97 VCGT, // Vector compare greater than. 98 VCGTZ, // Vector compare greater than zero. 99 VCLTZ, // Vector compare less than zero. 100 VCGTU, // Vector compare unsigned greater than. 101 VTST, // Vector test bits. 102 103 // Vector shift by immediate: 104 VSHL, // ...left 105 VSHRs, // ...right (signed) 106 VSHRu, // ...right (unsigned) 107 VSHLLs, // ...left long (signed) 108 VSHLLu, // ...left long (unsigned) 109 VSHLLi, // ...left long (with maximum shift count) 110 VSHRN, // ...right narrow 111 112 // Vector rounding shift by immediate: 113 VRSHRs, // ...right (signed) 114 VRSHRu, // ...right (unsigned) 115 VRSHRN, // ...right narrow 116 117 // Vector saturating shift by immediate: 118 VQSHLs, // ...left (signed) 119 VQSHLu, // ...left (unsigned) 120 VQSHLsu, // ...left (signed to unsigned) 121 VQSHRNs, // ...right narrow (signed) 122 VQSHRNu, // ...right narrow (unsigned) 123 VQSHRNsu, // ...right narrow (signed to unsigned) 124 125 // Vector saturating rounding shift by immediate: 126 VQRSHRNs, // ...right narrow (signed) 127 VQRSHRNu, // ...right narrow (unsigned) 128 VQRSHRNsu, // ...right narrow (signed to unsigned) 129 130 // Vector shift and insert: 131 VSLI, // ...left 132 VSRI, // ...right 133 134 // Vector get lane (VMOV scalar to ARM core register) 135 // (These are used for 8- and 16-bit element types only.) 136 VGETLANEu, // zero-extend vector extract element 137 VGETLANEs, // sign-extend vector extract element 138 139 // Vector move immediate and move negated immediate: 140 VMOVIMM, 141 VMVNIMM, 142 143 // Vector duplicate: 144 VDUP, 145 VDUPLANE, 146 147 // Vector shuffles: 148 VEXT, // extract 149 VREV64, // reverse elements within 64-bit doublewords 150 VREV32, // reverse elements within 32-bit words 151 VREV16, // reverse elements within 16-bit halfwords 152 VZIP, // zip (interleave) 153 VUZP, // unzip (deinterleave) 154 VTRN, // transpose 155 156 // Vector multiply long: 157 VMULLs, // ...signed 158 VMULLu, // ...unsigned 159 160 // Operands of the standard BUILD_VECTOR node are not legalized, which 161 // is fine if BUILD_VECTORs are always lowered to shuffles or other 162 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 163 // operands need to be legalized. Define an ARM-specific version of 164 // BUILD_VECTOR for this purpose. 165 BUILD_VECTOR, 166 167 // Floating-point max and min: 168 FMAX, 169 FMIN, 170 171 // Bit-field insert 172 BFI, 173 174 // Vector OR with immediate 175 VORRIMM, 176 // Vector AND with NOT of immediate 177 VBICIMM, 178 179 // Vector load N-element structure to all lanes: 180 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 181 VLD3DUP, 182 VLD4DUP 183 }; 184 } 185 186 /// Define some predicates that are used for node matching. 187 namespace ARM { 188 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be 189 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd) 190 /// instruction, returns its 8-bit integer representation. Otherwise, 191 /// returns -1. 192 int getVFPf32Imm(const APFloat &FPImm); 193 int getVFPf64Imm(const APFloat &FPImm); 194 bool isBitFieldInvertedMask(unsigned v); 195 } 196 197 //===--------------------------------------------------------------------===// 198 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 199 200 class ARMTargetLowering : public TargetLowering { 201 public: 202 explicit ARMTargetLowering(TargetMachine &TM); 203 204 virtual unsigned getJumpTableEncoding(void) const; 205 206 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 207 208 /// ReplaceNodeResults - Replace the results of node with an illegal result 209 /// type with new values built out of custom code. 210 /// 211 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 212 SelectionDAG &DAG) const; 213 214 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 215 216 virtual const char *getTargetNodeName(unsigned Opcode) const; 217 218 virtual MachineBasicBlock * 219 EmitInstrWithCustomInserter(MachineInstr *MI, 220 MachineBasicBlock *MBB) const; 221 222 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 223 /// unaligned memory accesses. of the specified type. 224 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? 225 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; 226 227 /// isLegalAddressingMode - Return true if the addressing mode represented 228 /// by AM is legal for this target, for a load/store of the specified type. 229 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 230 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 231 232 /// isLegalICmpImmediate - Return true if the specified immediate is legal 233 /// icmp immediate, that is the target has icmp instructions which can 234 /// compare a register against the immediate without having to materialize 235 /// the immediate into a register. 236 virtual bool isLegalICmpImmediate(int64_t Imm) const; 237 238 /// getPreIndexedAddressParts - returns true by value, base pointer and 239 /// offset pointer and addressing mode by reference if the node's address 240 /// can be legally represented as pre-indexed load / store address. 241 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 242 SDValue &Offset, 243 ISD::MemIndexedMode &AM, 244 SelectionDAG &DAG) const; 245 246 /// getPostIndexedAddressParts - returns true by value, base pointer and 247 /// offset pointer and addressing mode by reference if this node can be 248 /// combined with a load / store to form a post-indexed load / store. 249 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 250 SDValue &Base, SDValue &Offset, 251 ISD::MemIndexedMode &AM, 252 SelectionDAG &DAG) const; 253 254 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 255 const APInt &Mask, 256 APInt &KnownZero, 257 APInt &KnownOne, 258 const SelectionDAG &DAG, 259 unsigned Depth) const; 260 261 262 virtual bool ExpandInlineAsm(CallInst *CI) const; 263 264 ConstraintType getConstraintType(const std::string &Constraint) const; 265 266 /// Examine constraint string and operand type and determine a weight value. 267 /// The operand object must already have been set up with the operand type. 268 ConstraintWeight getSingleConstraintMatchWeight( 269 AsmOperandInfo &info, const char *constraint) const; 270 271 std::pair<unsigned, const TargetRegisterClass*> 272 getRegForInlineAsmConstraint(const std::string &Constraint, 273 EVT VT) const; 274 std::vector<unsigned> 275 getRegClassForInlineAsmConstraint(const std::string &Constraint, 276 EVT VT) const; 277 278 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 279 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 280 /// true it means one of the asm constraint of the inline asm instruction 281 /// being processed is 'm'. 282 virtual void LowerAsmOperandForConstraint(SDValue Op, 283 char ConstraintLetter, 284 std::vector<SDValue> &Ops, 285 SelectionDAG &DAG) const; 286 287 const ARMSubtarget* getSubtarget() const { 288 return Subtarget; 289 } 290 291 /// getRegClassFor - Return the register class that should be used for the 292 /// specified value type. 293 virtual TargetRegisterClass *getRegClassFor(EVT VT) const; 294 295 /// getFunctionAlignment - Return the Log2 alignment of this function. 296 virtual unsigned getFunctionAlignment(const Function *F) const; 297 298 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 299 /// be used for loads / stores from the global. 300 virtual unsigned getMaximalGlobalOffset() const; 301 302 /// createFastISel - This method returns a target specific FastISel object, 303 /// or null if the target does not support "fast" ISel. 304 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; 305 306 Sched::Preference getSchedulingPreference(SDNode *N) const; 307 308 unsigned getRegPressureLimit(const TargetRegisterClass *RC, 309 MachineFunction &MF) const; 310 311 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 312 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 313 314 /// isFPImmLegal - Returns true if the target can instruction select the 315 /// specified FP immediate natively. If false, the legalizer will 316 /// materialize the FP immediate as a load from a constant pool. 317 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 318 319 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 320 const CallInst &I, 321 unsigned Intrinsic) const; 322 protected: 323 std::pair<const TargetRegisterClass*, uint8_t> 324 findRepresentativeClass(EVT VT) const; 325 326 private: 327 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 328 /// make the right decision when generating code for different targets. 329 const ARMSubtarget *Subtarget; 330 331 const TargetRegisterInfo *RegInfo; 332 333 const InstrItineraryData *Itins; 334 335 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 336 /// 337 unsigned ARMPCLabelIndex; 338 339 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT); 340 void addDRTypeForNEON(EVT VT); 341 void addQRTypeForNEON(EVT VT); 342 343 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 344 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 345 SDValue Chain, SDValue &Arg, 346 RegsToPassVector &RegsToPass, 347 CCValAssign &VA, CCValAssign &NextVA, 348 SDValue &StackPtr, 349 SmallVector<SDValue, 8> &MemOpChains, 350 ISD::ArgFlagsTy Flags) const; 351 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 352 SDValue &Root, SelectionDAG &DAG, 353 DebugLoc dl) const; 354 355 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 356 bool isVarArg) const; 357 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 358 DebugLoc dl, SelectionDAG &DAG, 359 const CCValAssign &VA, 360 ISD::ArgFlagsTy Flags) const; 361 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 362 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 363 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const; 364 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 365 const ARMSubtarget *Subtarget) const; 366 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 367 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 368 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 369 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 370 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 371 SelectionDAG &DAG) const; 372 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 373 SelectionDAG &DAG) const; 374 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 375 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 376 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 377 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 378 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 379 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 380 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 381 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 382 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 383 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 384 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 385 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 386 const ARMSubtarget *ST) const; 387 388 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 389 390 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 391 CallingConv::ID CallConv, bool isVarArg, 392 const SmallVectorImpl<ISD::InputArg> &Ins, 393 DebugLoc dl, SelectionDAG &DAG, 394 SmallVectorImpl<SDValue> &InVals) const; 395 396 virtual SDValue 397 LowerFormalArguments(SDValue Chain, 398 CallingConv::ID CallConv, bool isVarArg, 399 const SmallVectorImpl<ISD::InputArg> &Ins, 400 DebugLoc dl, SelectionDAG &DAG, 401 SmallVectorImpl<SDValue> &InVals) const; 402 403 virtual SDValue 404 LowerCall(SDValue Chain, SDValue Callee, 405 CallingConv::ID CallConv, bool isVarArg, 406 bool &isTailCall, 407 const SmallVectorImpl<ISD::OutputArg> &Outs, 408 const SmallVectorImpl<SDValue> &OutVals, 409 const SmallVectorImpl<ISD::InputArg> &Ins, 410 DebugLoc dl, SelectionDAG &DAG, 411 SmallVectorImpl<SDValue> &InVals) const; 412 413 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 414 /// for tail call optimization. Targets which want to do tail call 415 /// optimization should implement this function. 416 bool IsEligibleForTailCallOptimization(SDValue Callee, 417 CallingConv::ID CalleeCC, 418 bool isVarArg, 419 bool isCalleeStructRet, 420 bool isCallerStructRet, 421 const SmallVectorImpl<ISD::OutputArg> &Outs, 422 const SmallVectorImpl<SDValue> &OutVals, 423 const SmallVectorImpl<ISD::InputArg> &Ins, 424 SelectionDAG& DAG) const; 425 virtual SDValue 426 LowerReturn(SDValue Chain, 427 CallingConv::ID CallConv, bool isVarArg, 428 const SmallVectorImpl<ISD::OutputArg> &Outs, 429 const SmallVectorImpl<SDValue> &OutVals, 430 DebugLoc dl, SelectionDAG &DAG) const; 431 432 virtual bool isUsedByReturnOnly(SDNode *N) const; 433 434 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 435 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const; 436 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 437 SelectionDAG &DAG, DebugLoc dl) const; 438 439 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 440 441 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 442 MachineBasicBlock *BB, 443 unsigned Size) const; 444 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 445 MachineBasicBlock *BB, 446 unsigned Size, 447 unsigned BinOpcode) const; 448 449 }; 450 451 enum NEONModImmType { 452 VMOVModImm, 453 VMVNModImm, 454 OtherModImm 455 }; 456 457 458 namespace ARM { 459 FastISel *createFastISel(FunctionLoweringInfo &funcInfo); 460 } 461} 462 463#endif // ARMISELLOWERING_H 464