ARMISelLowering.h revision c73158730d43e7c8bdef32b2107566a6e78a8538
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARMSubtarget.h" 19#include "llvm/Target/TargetLowering.h" 20#include "llvm/Target/TargetRegisterInfo.h" 21#include "llvm/CodeGen/FastISel.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include <vector> 25 26namespace llvm { 27 class ARMConstantPoolValue; 28 29 namespace ARMISD { 30 // ARM Specific DAG Nodes 31 enum NodeType { 32 // Start the numbering where the builtin ops and target ops leave off. 33 FIRST_NUMBER = ISD::BUILTIN_OP_END, 34 35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 36 // TargetExternalSymbol, and TargetGlobalAddress. 37 WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in 38 // DYN mode. 39 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 40 // PIC mode. 41 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 tCALL, // Thumb function call. 47 BRCOND, // Conditional branch. 48 BR_JT, // Jumptable branch. 49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 50 RET_FLAG, // Return with a flag operand. 51 52 PIC_ADD, // Add with a PC operand and a PIC label. 53 54 CMP, // ARM compare instructions. 55 CMPZ, // ARM compare that sets only Z flag. 56 CMPFP, // ARM VFP compare instruction, sets FPSCR. 57 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 58 FMSTAT, // ARM fmstat instruction. 59 CMOV, // ARM conditional move instructions. 60 61 BCC_i64, 62 63 RBIT, // ARM bitreverse instruction 64 65 FTOSI, // FP to sint within a FP register. 66 FTOUI, // FP to uint within a FP register. 67 SITOF, // sint to FP within a FP register. 68 UITOF, // uint to FP within a FP register. 69 70 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 71 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 72 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 73 74 VMOVRRD, // double to two gprs. 75 VMOVDRR, // Two gprs to double. 76 77 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 78 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 79 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup. 80 81 TC_RETURN, // Tail call return pseudo. 82 83 THREAD_POINTER, 84 85 DYN_ALLOC, // Dynamic allocation on the stack. 86 87 MEMBARRIER, // Memory barrier (DMB) 88 MEMBARRIER_MCR, // Memory barrier (MCR) 89 90 PRELOAD, // Preload 91 92 VCEQ, // Vector compare equal. 93 VCEQZ, // Vector compare equal to zero. 94 VCGE, // Vector compare greater than or equal. 95 VCGEZ, // Vector compare greater than or equal to zero. 96 VCLEZ, // Vector compare less than or equal to zero. 97 VCGEU, // Vector compare unsigned greater than or equal. 98 VCGT, // Vector compare greater than. 99 VCGTZ, // Vector compare greater than zero. 100 VCLTZ, // Vector compare less than zero. 101 VCGTU, // Vector compare unsigned greater than. 102 VTST, // Vector test bits. 103 104 // Vector shift by immediate: 105 VSHL, // ...left 106 VSHRs, // ...right (signed) 107 VSHRu, // ...right (unsigned) 108 VSHLLs, // ...left long (signed) 109 VSHLLu, // ...left long (unsigned) 110 VSHLLi, // ...left long (with maximum shift count) 111 VSHRN, // ...right narrow 112 113 // Vector rounding shift by immediate: 114 VRSHRs, // ...right (signed) 115 VRSHRu, // ...right (unsigned) 116 VRSHRN, // ...right narrow 117 118 // Vector saturating shift by immediate: 119 VQSHLs, // ...left (signed) 120 VQSHLu, // ...left (unsigned) 121 VQSHLsu, // ...left (signed to unsigned) 122 VQSHRNs, // ...right narrow (signed) 123 VQSHRNu, // ...right narrow (unsigned) 124 VQSHRNsu, // ...right narrow (signed to unsigned) 125 126 // Vector saturating rounding shift by immediate: 127 VQRSHRNs, // ...right narrow (signed) 128 VQRSHRNu, // ...right narrow (unsigned) 129 VQRSHRNsu, // ...right narrow (signed to unsigned) 130 131 // Vector shift and insert: 132 VSLI, // ...left 133 VSRI, // ...right 134 135 // Vector get lane (VMOV scalar to ARM core register) 136 // (These are used for 8- and 16-bit element types only.) 137 VGETLANEu, // zero-extend vector extract element 138 VGETLANEs, // sign-extend vector extract element 139 140 // Vector move immediate and move negated immediate: 141 VMOVIMM, 142 VMVNIMM, 143 144 // Vector duplicate: 145 VDUP, 146 VDUPLANE, 147 148 // Vector shuffles: 149 VEXT, // extract 150 VREV64, // reverse elements within 64-bit doublewords 151 VREV32, // reverse elements within 32-bit words 152 VREV16, // reverse elements within 16-bit halfwords 153 VZIP, // zip (interleave) 154 VUZP, // unzip (deinterleave) 155 VTRN, // transpose 156 VTBL1, // 1-register shuffle with mask 157 VTBL2, // 2-register shuffle with mask 158 159 // Vector multiply long: 160 VMULLs, // ...signed 161 VMULLu, // ...unsigned 162 163 // Operands of the standard BUILD_VECTOR node are not legalized, which 164 // is fine if BUILD_VECTORs are always lowered to shuffles or other 165 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 166 // operands need to be legalized. Define an ARM-specific version of 167 // BUILD_VECTOR for this purpose. 168 BUILD_VECTOR, 169 170 // Floating-point max and min: 171 FMAX, 172 FMIN, 173 174 // Bit-field insert 175 BFI, 176 177 // Vector OR with immediate 178 VORRIMM, 179 // Vector AND with NOT of immediate 180 VBICIMM, 181 182 // Vector bitwise select 183 VBSL, 184 185 // Vector load N-element structure to all lanes: 186 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 187 VLD3DUP, 188 VLD4DUP, 189 190 // NEON loads with post-increment base updates: 191 VLD1_UPD, 192 VLD2_UPD, 193 VLD3_UPD, 194 VLD4_UPD, 195 VLD2LN_UPD, 196 VLD3LN_UPD, 197 VLD4LN_UPD, 198 VLD2DUP_UPD, 199 VLD3DUP_UPD, 200 VLD4DUP_UPD, 201 202 // NEON stores with post-increment base updates: 203 VST1_UPD, 204 VST2_UPD, 205 VST3_UPD, 206 VST4_UPD, 207 VST2LN_UPD, 208 VST3LN_UPD, 209 VST4LN_UPD 210 }; 211 } 212 213 /// Define some predicates that are used for node matching. 214 namespace ARM { 215 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be 216 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd) 217 /// instruction, returns its 8-bit integer representation. Otherwise, 218 /// returns -1. 219 int getVFPf32Imm(const APFloat &FPImm); 220 int getVFPf64Imm(const APFloat &FPImm); 221 bool isBitFieldInvertedMask(unsigned v); 222 } 223 224 //===--------------------------------------------------------------------===// 225 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 226 227 class ARMTargetLowering : public TargetLowering { 228 public: 229 explicit ARMTargetLowering(TargetMachine &TM); 230 231 virtual unsigned getJumpTableEncoding(void) const; 232 233 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 234 235 /// ReplaceNodeResults - Replace the results of node with an illegal result 236 /// type with new values built out of custom code. 237 /// 238 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 239 SelectionDAG &DAG) const; 240 241 virtual const char *getTargetNodeName(unsigned Opcode) const; 242 243 virtual MachineBasicBlock * 244 EmitInstrWithCustomInserter(MachineInstr *MI, 245 MachineBasicBlock *MBB) const; 246 247 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 248 249 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const; 250 251 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 252 /// unaligned memory accesses. of the specified type. 253 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? 254 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; 255 256 /// isLegalAddressingMode - Return true if the addressing mode represented 257 /// by AM is legal for this target, for a load/store of the specified type. 258 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 259 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 260 261 /// isLegalICmpImmediate - Return true if the specified immediate is legal 262 /// icmp immediate, that is the target has icmp instructions which can 263 /// compare a register against the immediate without having to materialize 264 /// the immediate into a register. 265 virtual bool isLegalICmpImmediate(int64_t Imm) const; 266 267 /// getPreIndexedAddressParts - returns true by value, base pointer and 268 /// offset pointer and addressing mode by reference if the node's address 269 /// can be legally represented as pre-indexed load / store address. 270 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 271 SDValue &Offset, 272 ISD::MemIndexedMode &AM, 273 SelectionDAG &DAG) const; 274 275 /// getPostIndexedAddressParts - returns true by value, base pointer and 276 /// offset pointer and addressing mode by reference if this node can be 277 /// combined with a load / store to form a post-indexed load / store. 278 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 279 SDValue &Base, SDValue &Offset, 280 ISD::MemIndexedMode &AM, 281 SelectionDAG &DAG) const; 282 283 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 284 const APInt &Mask, 285 APInt &KnownZero, 286 APInt &KnownOne, 287 const SelectionDAG &DAG, 288 unsigned Depth) const; 289 290 291 virtual bool ExpandInlineAsm(CallInst *CI) const; 292 293 ConstraintType getConstraintType(const std::string &Constraint) const; 294 295 /// Examine constraint string and operand type and determine a weight value. 296 /// The operand object must already have been set up with the operand type. 297 ConstraintWeight getSingleConstraintMatchWeight( 298 AsmOperandInfo &info, const char *constraint) const; 299 300 std::pair<unsigned, const TargetRegisterClass*> 301 getRegForInlineAsmConstraint(const std::string &Constraint, 302 EVT VT) const; 303 std::vector<unsigned> 304 getRegClassForInlineAsmConstraint(const std::string &Constraint, 305 EVT VT) const; 306 307 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 308 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 309 /// true it means one of the asm constraint of the inline asm instruction 310 /// being processed is 'm'. 311 virtual void LowerAsmOperandForConstraint(SDValue Op, 312 char ConstraintLetter, 313 std::vector<SDValue> &Ops, 314 SelectionDAG &DAG) const; 315 316 const ARMSubtarget* getSubtarget() const { 317 return Subtarget; 318 } 319 320 /// getRegClassFor - Return the register class that should be used for the 321 /// specified value type. 322 virtual TargetRegisterClass *getRegClassFor(EVT VT) const; 323 324 /// getFunctionAlignment - Return the Log2 alignment of this function. 325 virtual unsigned getFunctionAlignment(const Function *F) const; 326 327 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 328 /// be used for loads / stores from the global. 329 virtual unsigned getMaximalGlobalOffset() const; 330 331 /// createFastISel - This method returns a target specific FastISel object, 332 /// or null if the target does not support "fast" ISel. 333 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; 334 335 Sched::Preference getSchedulingPreference(SDNode *N) const; 336 337 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 338 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 339 340 /// isFPImmLegal - Returns true if the target can instruction select the 341 /// specified FP immediate natively. If false, the legalizer will 342 /// materialize the FP immediate as a load from a constant pool. 343 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 344 345 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 346 const CallInst &I, 347 unsigned Intrinsic) const; 348 protected: 349 std::pair<const TargetRegisterClass*, uint8_t> 350 findRepresentativeClass(EVT VT) const; 351 352 private: 353 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 354 /// make the right decision when generating code for different targets. 355 const ARMSubtarget *Subtarget; 356 357 const TargetRegisterInfo *RegInfo; 358 359 const InstrItineraryData *Itins; 360 361 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 362 /// 363 unsigned ARMPCLabelIndex; 364 365 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT); 366 void addDRTypeForNEON(EVT VT); 367 void addQRTypeForNEON(EVT VT); 368 369 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 370 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 371 SDValue Chain, SDValue &Arg, 372 RegsToPassVector &RegsToPass, 373 CCValAssign &VA, CCValAssign &NextVA, 374 SDValue &StackPtr, 375 SmallVector<SDValue, 8> &MemOpChains, 376 ISD::ArgFlagsTy Flags) const; 377 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 378 SDValue &Root, SelectionDAG &DAG, 379 DebugLoc dl) const; 380 381 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 382 bool isVarArg) const; 383 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 384 DebugLoc dl, SelectionDAG &DAG, 385 const CCValAssign &VA, 386 ISD::ArgFlagsTy Flags) const; 387 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 388 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 389 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const; 390 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 391 const ARMSubtarget *Subtarget) const; 392 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 393 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 394 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 395 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 396 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 397 SelectionDAG &DAG) const; 398 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 399 SelectionDAG &DAG) const; 400 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 401 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 402 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 403 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 404 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 405 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 406 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 407 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 408 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 409 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 410 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 411 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 412 const ARMSubtarget *ST) const; 413 414 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 415 416 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 417 CallingConv::ID CallConv, bool isVarArg, 418 const SmallVectorImpl<ISD::InputArg> &Ins, 419 DebugLoc dl, SelectionDAG &DAG, 420 SmallVectorImpl<SDValue> &InVals) const; 421 422 virtual SDValue 423 LowerFormalArguments(SDValue Chain, 424 CallingConv::ID CallConv, bool isVarArg, 425 const SmallVectorImpl<ISD::InputArg> &Ins, 426 DebugLoc dl, SelectionDAG &DAG, 427 SmallVectorImpl<SDValue> &InVals) const; 428 429 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 430 DebugLoc dl, SDValue &Chain, unsigned ArgOffset) 431 const; 432 433 void computeRegArea(CCState &CCInfo, MachineFunction &MF, 434 unsigned &VARegSize, unsigned &VARegSaveSize) const; 435 436 virtual SDValue 437 LowerCall(SDValue Chain, SDValue Callee, 438 CallingConv::ID CallConv, bool isVarArg, 439 bool &isTailCall, 440 const SmallVectorImpl<ISD::OutputArg> &Outs, 441 const SmallVectorImpl<SDValue> &OutVals, 442 const SmallVectorImpl<ISD::InputArg> &Ins, 443 DebugLoc dl, SelectionDAG &DAG, 444 SmallVectorImpl<SDValue> &InVals) const; 445 446 /// HandleByVal - Target-specific cleanup for ByVal support. 447 virtual void HandleByVal(CCState *, unsigned &) const; 448 449 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 450 /// for tail call optimization. Targets which want to do tail call 451 /// optimization should implement this function. 452 bool IsEligibleForTailCallOptimization(SDValue Callee, 453 CallingConv::ID CalleeCC, 454 bool isVarArg, 455 bool isCalleeStructRet, 456 bool isCallerStructRet, 457 const SmallVectorImpl<ISD::OutputArg> &Outs, 458 const SmallVectorImpl<SDValue> &OutVals, 459 const SmallVectorImpl<ISD::InputArg> &Ins, 460 SelectionDAG& DAG) const; 461 virtual SDValue 462 LowerReturn(SDValue Chain, 463 CallingConv::ID CallConv, bool isVarArg, 464 const SmallVectorImpl<ISD::OutputArg> &Outs, 465 const SmallVectorImpl<SDValue> &OutVals, 466 DebugLoc dl, SelectionDAG &DAG) const; 467 468 virtual bool isUsedByReturnOnly(SDNode *N) const; 469 470 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 471 472 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 473 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const; 474 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 475 SelectionDAG &DAG, DebugLoc dl) const; 476 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 477 478 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 479 480 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 481 MachineBasicBlock *BB, 482 unsigned Size) const; 483 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 484 MachineBasicBlock *BB, 485 unsigned Size, 486 unsigned BinOpcode) const; 487 488 }; 489 490 enum NEONModImmType { 491 VMOVModImm, 492 VMVNModImm, 493 OtherModImm 494 }; 495 496 497 namespace ARM { 498 FastISel *createFastISel(FunctionLoweringInfo &funcInfo); 499 } 500} 501 502#endif // ARMISELLOWERING_H 503