ARMISelLowering.h revision 6948897e478cbd66626159776a8017b3c18579b9
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 16#define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 17 18#include "MCTargetDesc/ARMBaseInfo.h" 19#include "llvm/CodeGen/CallingConvLower.h" 20#include "llvm/CodeGen/SelectionDAG.h" 21#include "llvm/Target/TargetLowering.h" 22#include <vector> 23 24namespace llvm { 25 class ARMConstantPoolValue; 26 class ARMSubtarget; 27 28 namespace ARMISD { 29 // ARM Specific DAG Nodes 30 enum NodeType : unsigned { 31 // Start the numbering where the builtin ops and target ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 35 // TargetExternalSymbol, and TargetGlobalAddress. 36 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 37 // PIC mode. 38 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 39 40 // Add pseudo op to model memcpy for struct byval. 41 COPY_STRUCT_BYVAL, 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 tCALL, // Thumb function call. 47 BRCOND, // Conditional branch. 48 BR_JT, // Jumptable branch. 49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 50 RET_FLAG, // Return with a flag operand. 51 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 52 53 PIC_ADD, // Add with a PC operand and a PIC label. 54 55 CMP, // ARM compare instructions. 56 CMN, // ARM CMN instructions. 57 CMPZ, // ARM compare that sets only Z flag. 58 CMPFP, // ARM VFP compare instruction, sets FPSCR. 59 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 60 FMSTAT, // ARM fmstat instruction. 61 62 CMOV, // ARM conditional move instructions. 63 64 BCC_i64, 65 66 RBIT, // ARM bitreverse instruction 67 68 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 69 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 70 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 71 72 ADDC, // Add with carry 73 ADDE, // Add using carry 74 SUBC, // Sub with carry 75 SUBE, // Sub using carry 76 77 VMOVRRD, // double to two gprs. 78 VMOVDRR, // Two gprs to double. 79 80 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 81 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 82 83 TC_RETURN, // Tail call return pseudo. 84 85 THREAD_POINTER, 86 87 DYN_ALLOC, // Dynamic allocation on the stack. 88 89 MEMBARRIER_MCR, // Memory barrier (MCR) 90 91 PRELOAD, // Preload 92 93 WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 94 95 VCEQ, // Vector compare equal. 96 VCEQZ, // Vector compare equal to zero. 97 VCGE, // Vector compare greater than or equal. 98 VCGEZ, // Vector compare greater than or equal to zero. 99 VCLEZ, // Vector compare less than or equal to zero. 100 VCGEU, // Vector compare unsigned greater than or equal. 101 VCGT, // Vector compare greater than. 102 VCGTZ, // Vector compare greater than zero. 103 VCLTZ, // Vector compare less than zero. 104 VCGTU, // Vector compare unsigned greater than. 105 VTST, // Vector test bits. 106 107 // Vector shift by immediate: 108 VSHL, // ...left 109 VSHRs, // ...right (signed) 110 VSHRu, // ...right (unsigned) 111 112 // Vector rounding shift by immediate: 113 VRSHRs, // ...right (signed) 114 VRSHRu, // ...right (unsigned) 115 VRSHRN, // ...right narrow 116 117 // Vector saturating shift by immediate: 118 VQSHLs, // ...left (signed) 119 VQSHLu, // ...left (unsigned) 120 VQSHLsu, // ...left (signed to unsigned) 121 VQSHRNs, // ...right narrow (signed) 122 VQSHRNu, // ...right narrow (unsigned) 123 VQSHRNsu, // ...right narrow (signed to unsigned) 124 125 // Vector saturating rounding shift by immediate: 126 VQRSHRNs, // ...right narrow (signed) 127 VQRSHRNu, // ...right narrow (unsigned) 128 VQRSHRNsu, // ...right narrow (signed to unsigned) 129 130 // Vector shift and insert: 131 VSLI, // ...left 132 VSRI, // ...right 133 134 // Vector get lane (VMOV scalar to ARM core register) 135 // (These are used for 8- and 16-bit element types only.) 136 VGETLANEu, // zero-extend vector extract element 137 VGETLANEs, // sign-extend vector extract element 138 139 // Vector move immediate and move negated immediate: 140 VMOVIMM, 141 VMVNIMM, 142 143 // Vector move f32 immediate: 144 VMOVFPIMM, 145 146 // Vector duplicate: 147 VDUP, 148 VDUPLANE, 149 150 // Vector shuffles: 151 VEXT, // extract 152 VREV64, // reverse elements within 64-bit doublewords 153 VREV32, // reverse elements within 32-bit words 154 VREV16, // reverse elements within 16-bit halfwords 155 VZIP, // zip (interleave) 156 VUZP, // unzip (deinterleave) 157 VTRN, // transpose 158 VTBL1, // 1-register shuffle with mask 159 VTBL2, // 2-register shuffle with mask 160 161 // Vector multiply long: 162 VMULLs, // ...signed 163 VMULLu, // ...unsigned 164 165 UMLAL, // 64bit Unsigned Accumulate Multiply 166 SMLAL, // 64bit Signed Accumulate Multiply 167 168 // Operands of the standard BUILD_VECTOR node are not legalized, which 169 // is fine if BUILD_VECTORs are always lowered to shuffles or other 170 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 171 // operands need to be legalized. Define an ARM-specific version of 172 // BUILD_VECTOR for this purpose. 173 BUILD_VECTOR, 174 175 // Floating-point max and min: 176 FMAX, 177 FMIN, 178 VMAXNM, 179 VMINNM, 180 181 // Bit-field insert 182 BFI, 183 184 // Vector OR with immediate 185 VORRIMM, 186 // Vector AND with NOT of immediate 187 VBICIMM, 188 189 // Vector bitwise select 190 VBSL, 191 192 // Vector load N-element structure to all lanes: 193 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 194 VLD3DUP, 195 VLD4DUP, 196 197 // NEON loads with post-increment base updates: 198 VLD1_UPD, 199 VLD2_UPD, 200 VLD3_UPD, 201 VLD4_UPD, 202 VLD2LN_UPD, 203 VLD3LN_UPD, 204 VLD4LN_UPD, 205 VLD2DUP_UPD, 206 VLD3DUP_UPD, 207 VLD4DUP_UPD, 208 209 // NEON stores with post-increment base updates: 210 VST1_UPD, 211 VST2_UPD, 212 VST3_UPD, 213 VST4_UPD, 214 VST2LN_UPD, 215 VST3LN_UPD, 216 VST4LN_UPD 217 }; 218 } 219 220 /// Define some predicates that are used for node matching. 221 namespace ARM { 222 bool isBitFieldInvertedMask(unsigned v); 223 } 224 225 //===--------------------------------------------------------------------===// 226 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 227 228 class ARMTargetLowering : public TargetLowering { 229 public: 230 explicit ARMTargetLowering(const TargetMachine &TM, 231 const ARMSubtarget &STI); 232 233 unsigned getJumpTableEncoding() const override; 234 bool useSoftFloat() const override; 235 236 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 237 238 /// ReplaceNodeResults - Replace the results of node with an illegal result 239 /// type with new values built out of custom code. 240 /// 241 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 242 SelectionDAG &DAG) const override; 243 244 const char *getTargetNodeName(unsigned Opcode) const override; 245 246 bool isSelectSupported(SelectSupportKind Kind) const override { 247 // ARM does not support scalar condition selects on vectors. 248 return (Kind != ScalarCondVectorVal); 249 } 250 251 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 252 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override; 253 254 MachineBasicBlock * 255 EmitInstrWithCustomInserter(MachineInstr *MI, 256 MachineBasicBlock *MBB) const override; 257 258 void AdjustInstrPostInstrSelection(MachineInstr *MI, 259 SDNode *Node) const override; 260 261 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 262 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 263 264 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 265 266 /// allowsMisalignedMemoryAccesses - Returns true if the target allows 267 /// unaligned memory accesses of the specified type. Returns whether it 268 /// is "fast" by reference in the second argument. 269 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 270 unsigned Align, 271 bool *Fast) const override; 272 273 EVT getOptimalMemOpType(uint64_t Size, 274 unsigned DstAlign, unsigned SrcAlign, 275 bool IsMemset, bool ZeroMemset, 276 bool MemcpyStrSrc, 277 MachineFunction &MF) const override; 278 279 using TargetLowering::isZExtFree; 280 bool isZExtFree(SDValue Val, EVT VT2) const override; 281 282 bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 283 284 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 285 286 287 /// isLegalAddressingMode - Return true if the addressing mode represented 288 /// by AM is legal for this target, for a load/store of the specified type. 289 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty, 290 unsigned AS) const override; 291 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 292 293 /// isLegalICmpImmediate - Return true if the specified immediate is legal 294 /// icmp immediate, that is the target has icmp instructions which can 295 /// compare a register against the immediate without having to materialize 296 /// the immediate into a register. 297 bool isLegalICmpImmediate(int64_t Imm) const override; 298 299 /// isLegalAddImmediate - Return true if the specified immediate is legal 300 /// add immediate, that is the target has add instructions which can 301 /// add a register and the immediate without having to materialize 302 /// the immediate into a register. 303 bool isLegalAddImmediate(int64_t Imm) const override; 304 305 /// getPreIndexedAddressParts - returns true by value, base pointer and 306 /// offset pointer and addressing mode by reference if the node's address 307 /// can be legally represented as pre-indexed load / store address. 308 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 309 ISD::MemIndexedMode &AM, 310 SelectionDAG &DAG) const override; 311 312 /// getPostIndexedAddressParts - returns true by value, base pointer and 313 /// offset pointer and addressing mode by reference if this node can be 314 /// combined with a load / store to form a post-indexed load / store. 315 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 316 SDValue &Offset, ISD::MemIndexedMode &AM, 317 SelectionDAG &DAG) const override; 318 319 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, 320 APInt &KnownOne, 321 const SelectionDAG &DAG, 322 unsigned Depth) const override; 323 324 325 bool ExpandInlineAsm(CallInst *CI) const override; 326 327 ConstraintType 328 getConstraintType(const std::string &Constraint) const override; 329 330 /// Examine constraint string and operand type and determine a weight value. 331 /// The operand object must already have been set up with the operand type. 332 ConstraintWeight getSingleConstraintMatchWeight( 333 AsmOperandInfo &info, const char *constraint) const override; 334 335 std::pair<unsigned, const TargetRegisterClass *> 336 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 337 const std::string &Constraint, 338 MVT VT) const override; 339 340 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 341 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 342 /// true it means one of the asm constraint of the inline asm instruction 343 /// being processed is 'm'. 344 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 345 std::vector<SDValue> &Ops, 346 SelectionDAG &DAG) const override; 347 348 unsigned getInlineAsmMemConstraint( 349 const std::string &ConstraintCode) const override { 350 if (ConstraintCode == "Q") 351 return InlineAsm::Constraint_Q; 352 else if (ConstraintCode.size() == 2) { 353 if (ConstraintCode[0] == 'U') { 354 switch(ConstraintCode[1]) { 355 default: 356 break; 357 case 'm': 358 return InlineAsm::Constraint_Um; 359 case 'n': 360 return InlineAsm::Constraint_Un; 361 case 'q': 362 return InlineAsm::Constraint_Uq; 363 case 's': 364 return InlineAsm::Constraint_Us; 365 case 't': 366 return InlineAsm::Constraint_Ut; 367 case 'v': 368 return InlineAsm::Constraint_Uv; 369 case 'y': 370 return InlineAsm::Constraint_Uy; 371 } 372 } 373 } 374 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 375 } 376 377 const ARMSubtarget* getSubtarget() const { 378 return Subtarget; 379 } 380 381 /// getRegClassFor - Return the register class that should be used for the 382 /// specified value type. 383 const TargetRegisterClass *getRegClassFor(MVT VT) const override; 384 385 /// Returns true if a cast between SrcAS and DestAS is a noop. 386 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 387 // Addrspacecasts are always noops. 388 return true; 389 } 390 391 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 392 unsigned &PrefAlign) const override; 393 394 /// createFastISel - This method returns a target specific FastISel object, 395 /// or null if the target does not support "fast" ISel. 396 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 397 const TargetLibraryInfo *libInfo) const override; 398 399 Sched::Preference getSchedulingPreference(SDNode *N) const override; 400 401 bool 402 isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override; 403 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 404 405 /// isFPImmLegal - Returns true if the target can instruction select the 406 /// specified FP immediate natively. If false, the legalizer will 407 /// materialize the FP immediate as a load from a constant pool. 408 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 409 410 bool getTgtMemIntrinsic(IntrinsicInfo &Info, 411 const CallInst &I, 412 unsigned Intrinsic) const override; 413 414 /// \brief Returns true if it is beneficial to convert a load of a constant 415 /// to just the constant itself. 416 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 417 Type *Ty) const override; 418 419 /// \brief Returns true if an argument of type Ty needs to be passed in a 420 /// contiguous block of registers in calling convention CallConv. 421 bool functionArgumentNeedsConsecutiveRegisters( 422 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 423 424 bool hasLoadLinkedStoreConditional() const override; 425 Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const; 426 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 427 AtomicOrdering Ord) const override; 428 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 429 Value *Addr, AtomicOrdering Ord) const override; 430 431 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 432 bool IsStore, bool IsLoad) const override; 433 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 434 bool IsStore, bool IsLoad) const override; 435 436 bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 437 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 438 TargetLoweringBase::AtomicRMWExpansionKind 439 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 440 441 bool useLoadStackGuardNode() const override; 442 443 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 444 unsigned &Cost) const override; 445 446 protected: 447 std::pair<const TargetRegisterClass *, uint8_t> 448 findRepresentativeClass(const TargetRegisterInfo *TRI, 449 MVT VT) const override; 450 451 private: 452 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 453 /// make the right decision when generating code for different targets. 454 const ARMSubtarget *Subtarget; 455 456 const TargetRegisterInfo *RegInfo; 457 458 const InstrItineraryData *Itins; 459 460 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 461 /// 462 unsigned ARMPCLabelIndex; 463 464 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 465 void addDRTypeForNEON(MVT VT); 466 void addQRTypeForNEON(MVT VT); 467 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 468 469 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 470 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 471 SDValue Chain, SDValue &Arg, 472 RegsToPassVector &RegsToPass, 473 CCValAssign &VA, CCValAssign &NextVA, 474 SDValue &StackPtr, 475 SmallVectorImpl<SDValue> &MemOpChains, 476 ISD::ArgFlagsTy Flags) const; 477 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 478 SDValue &Root, SelectionDAG &DAG, 479 SDLoc dl) const; 480 481 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 482 bool isVarArg) const; 483 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 484 bool isVarArg) const; 485 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 486 SDLoc dl, SelectionDAG &DAG, 487 const CCValAssign &VA, 488 ISD::ArgFlagsTy Flags) const; 489 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 490 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 491 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 492 const ARMSubtarget *Subtarget) const; 493 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 494 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 495 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 496 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 497 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 498 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 499 SelectionDAG &DAG) const; 500 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 501 SelectionDAG &DAG, 502 TLSModel::Model model) const; 503 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 504 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 505 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 506 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 507 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 508 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 509 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 510 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 511 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 512 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 513 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 514 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 515 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 516 const ARMSubtarget *ST) const; 517 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 518 const ARMSubtarget *ST) const; 519 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 520 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 521 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 522 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 523 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 524 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 525 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 526 527 unsigned getRegisterByName(const char* RegName, EVT VT) const override; 528 529 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 530 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 531 /// expanded to FMAs when this method returns true, otherwise fmuladd is 532 /// expanded to fmul + fadd. 533 /// 534 /// ARM supports both fused and unfused multiply-add operations; we already 535 /// lower a pair of fmul and fadd to the latter so it's not clear that there 536 /// would be a gain or that the gain would be worthwhile enough to risk 537 /// correctness bugs. 538 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; } 539 540 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 541 542 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 543 CallingConv::ID CallConv, bool isVarArg, 544 const SmallVectorImpl<ISD::InputArg> &Ins, 545 SDLoc dl, SelectionDAG &DAG, 546 SmallVectorImpl<SDValue> &InVals, 547 bool isThisReturn, SDValue ThisVal) const; 548 549 SDValue 550 LowerFormalArguments(SDValue Chain, 551 CallingConv::ID CallConv, bool isVarArg, 552 const SmallVectorImpl<ISD::InputArg> &Ins, 553 SDLoc dl, SelectionDAG &DAG, 554 SmallVectorImpl<SDValue> &InVals) const override; 555 556 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 557 SDLoc dl, SDValue &Chain, 558 const Value *OrigArg, 559 unsigned InRegsParamRecordIdx, 560 int ArgOffset, 561 unsigned ArgSize) const; 562 563 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 564 SDLoc dl, SDValue &Chain, 565 unsigned ArgOffset, 566 unsigned TotalArgRegsSaveSize, 567 bool ForceMutable = false) const; 568 569 SDValue 570 LowerCall(TargetLowering::CallLoweringInfo &CLI, 571 SmallVectorImpl<SDValue> &InVals) const override; 572 573 /// HandleByVal - Target-specific cleanup for ByVal support. 574 void HandleByVal(CCState *, unsigned &, unsigned) const override; 575 576 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 577 /// for tail call optimization. Targets which want to do tail call 578 /// optimization should implement this function. 579 bool IsEligibleForTailCallOptimization(SDValue Callee, 580 CallingConv::ID CalleeCC, 581 bool isVarArg, 582 bool isCalleeStructRet, 583 bool isCallerStructRet, 584 const SmallVectorImpl<ISD::OutputArg> &Outs, 585 const SmallVectorImpl<SDValue> &OutVals, 586 const SmallVectorImpl<ISD::InputArg> &Ins, 587 SelectionDAG& DAG) const; 588 589 bool CanLowerReturn(CallingConv::ID CallConv, 590 MachineFunction &MF, bool isVarArg, 591 const SmallVectorImpl<ISD::OutputArg> &Outs, 592 LLVMContext &Context) const override; 593 594 SDValue 595 LowerReturn(SDValue Chain, 596 CallingConv::ID CallConv, bool isVarArg, 597 const SmallVectorImpl<ISD::OutputArg> &Outs, 598 const SmallVectorImpl<SDValue> &OutVals, 599 SDLoc dl, SelectionDAG &DAG) const override; 600 601 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 602 603 bool mayBeEmittedAsTailCall(CallInst *CI) const override; 604 605 SDValue getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 606 SDValue ARMcc, SDValue CCR, SDValue Cmp, 607 SelectionDAG &DAG) const; 608 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 609 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; 610 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 611 SelectionDAG &DAG, SDLoc dl) const; 612 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 613 614 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 615 616 void SetupEntryBlockForSjLj(MachineInstr *MI, 617 MachineBasicBlock *MBB, 618 MachineBasicBlock *DispatchBB, int FI) const; 619 620 void EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const; 621 622 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 623 624 MachineBasicBlock *EmitStructByval(MachineInstr *MI, 625 MachineBasicBlock *MBB) const; 626 627 MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI, 628 MachineBasicBlock *MBB) const; 629 }; 630 631 enum NEONModImmType { 632 VMOVModImm, 633 VMVNModImm, 634 OtherModImm 635 }; 636 637 namespace ARM { 638 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 639 const TargetLibraryInfo *libInfo); 640 } 641} 642 643#endif // ARMISELLOWERING_H 644