ARMISelLowering.h revision 268c743a3ba44ada364938bc5ff9b1be219df54f
1//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef ARMISELLOWERING_H 16#define ARMISELLOWERING_H 17 18#include "ARM.h" 19#include "ARMSubtarget.h" 20#include "llvm/CodeGen/CallingConvLower.h" 21#include "llvm/CodeGen/FastISel.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/Target/TargetLowering.h" 24#include "llvm/Target/TargetRegisterInfo.h" 25#include <vector> 26 27namespace llvm { 28 class ARMConstantPoolValue; 29 30 namespace ARMISD { 31 // ARM Specific DAG Nodes 32 enum NodeType { 33 // Start the numbering where the builtin ops and target ops leave off. 34 FIRST_NUMBER = ISD::BUILTIN_OP_END, 35 36 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 37 // TargetExternalSymbol, and TargetGlobalAddress. 38 WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in 39 // DYN mode. 40 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 41 // PIC mode. 42 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 43 44 // Add pseudo op to model memcpy for struct byval. 45 COPY_STRUCT_BYVAL, 46 47 CALL, // Function call. 48 CALL_PRED, // Function call that's predicable. 49 CALL_NOLINK, // Function call with branch not branch-and-link. 50 tCALL, // Thumb function call. 51 BRCOND, // Conditional branch. 52 BR_JT, // Jumptable branch. 53 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 54 RET_FLAG, // Return with a flag operand. 55 56 PIC_ADD, // Add with a PC operand and a PIC label. 57 58 CMP, // ARM compare instructions. 59 CMN, // ARM CMN instructions. 60 CMPZ, // ARM compare that sets only Z flag. 61 CMPFP, // ARM VFP compare instruction, sets FPSCR. 62 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 63 FMSTAT, // ARM fmstat instruction. 64 65 CMOV, // ARM conditional move instructions. 66 67 BCC_i64, 68 69 RBIT, // ARM bitreverse instruction 70 71 FTOSI, // FP to sint within a FP register. 72 FTOUI, // FP to uint within a FP register. 73 SITOF, // sint to FP within a FP register. 74 UITOF, // uint to FP within a FP register. 75 76 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 77 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 78 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 79 80 ADDC, // Add with carry 81 ADDE, // Add using carry 82 SUBC, // Sub with carry 83 SUBE, // Sub using carry 84 85 VMOVRRD, // double to two gprs. 86 VMOVDRR, // Two gprs to double. 87 88 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 89 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 90 91 TC_RETURN, // Tail call return pseudo. 92 93 THREAD_POINTER, 94 95 DYN_ALLOC, // Dynamic allocation on the stack. 96 97 MEMBARRIER_MCR, // Memory barrier (MCR) 98 99 PRELOAD, // Preload 100 101 VCEQ, // Vector compare equal. 102 VCEQZ, // Vector compare equal to zero. 103 VCGE, // Vector compare greater than or equal. 104 VCGEZ, // Vector compare greater than or equal to zero. 105 VCLEZ, // Vector compare less than or equal to zero. 106 VCGEU, // Vector compare unsigned greater than or equal. 107 VCGT, // Vector compare greater than. 108 VCGTZ, // Vector compare greater than zero. 109 VCLTZ, // Vector compare less than zero. 110 VCGTU, // Vector compare unsigned greater than. 111 VTST, // Vector test bits. 112 113 // Vector shift by immediate: 114 VSHL, // ...left 115 VSHRs, // ...right (signed) 116 VSHRu, // ...right (unsigned) 117 VSHLLs, // ...left long (signed) 118 VSHLLu, // ...left long (unsigned) 119 VSHLLi, // ...left long (with maximum shift count) 120 VSHRN, // ...right narrow 121 122 // Vector rounding shift by immediate: 123 VRSHRs, // ...right (signed) 124 VRSHRu, // ...right (unsigned) 125 VRSHRN, // ...right narrow 126 127 // Vector saturating shift by immediate: 128 VQSHLs, // ...left (signed) 129 VQSHLu, // ...left (unsigned) 130 VQSHLsu, // ...left (signed to unsigned) 131 VQSHRNs, // ...right narrow (signed) 132 VQSHRNu, // ...right narrow (unsigned) 133 VQSHRNsu, // ...right narrow (signed to unsigned) 134 135 // Vector saturating rounding shift by immediate: 136 VQRSHRNs, // ...right narrow (signed) 137 VQRSHRNu, // ...right narrow (unsigned) 138 VQRSHRNsu, // ...right narrow (signed to unsigned) 139 140 // Vector shift and insert: 141 VSLI, // ...left 142 VSRI, // ...right 143 144 // Vector get lane (VMOV scalar to ARM core register) 145 // (These are used for 8- and 16-bit element types only.) 146 VGETLANEu, // zero-extend vector extract element 147 VGETLANEs, // sign-extend vector extract element 148 149 // Vector move immediate and move negated immediate: 150 VMOVIMM, 151 VMVNIMM, 152 153 // Vector move f32 immediate: 154 VMOVFPIMM, 155 156 // Vector duplicate: 157 VDUP, 158 VDUPLANE, 159 160 // Vector shuffles: 161 VEXT, // extract 162 VREV64, // reverse elements within 64-bit doublewords 163 VREV32, // reverse elements within 32-bit words 164 VREV16, // reverse elements within 16-bit halfwords 165 VZIP, // zip (interleave) 166 VUZP, // unzip (deinterleave) 167 VTRN, // transpose 168 VTBL1, // 1-register shuffle with mask 169 VTBL2, // 2-register shuffle with mask 170 171 // Vector multiply long: 172 VMULLs, // ...signed 173 VMULLu, // ...unsigned 174 175 UMLAL, // 64bit Unsigned Accumulate Multiply 176 SMLAL, // 64bit Signed Accumulate Multiply 177 178 // Operands of the standard BUILD_VECTOR node are not legalized, which 179 // is fine if BUILD_VECTORs are always lowered to shuffles or other 180 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 181 // operands need to be legalized. Define an ARM-specific version of 182 // BUILD_VECTOR for this purpose. 183 BUILD_VECTOR, 184 185 // Floating-point max and min: 186 FMAX, 187 FMIN, 188 VMAXNM, 189 VMINNM, 190 191 // Bit-field insert 192 BFI, 193 194 // Vector OR with immediate 195 VORRIMM, 196 // Vector AND with NOT of immediate 197 VBICIMM, 198 199 // Vector bitwise select 200 VBSL, 201 202 // Vector load N-element structure to all lanes: 203 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 204 VLD3DUP, 205 VLD4DUP, 206 207 // NEON loads with post-increment base updates: 208 VLD1_UPD, 209 VLD2_UPD, 210 VLD3_UPD, 211 VLD4_UPD, 212 VLD2LN_UPD, 213 VLD3LN_UPD, 214 VLD4LN_UPD, 215 VLD2DUP_UPD, 216 VLD3DUP_UPD, 217 VLD4DUP_UPD, 218 219 // NEON stores with post-increment base updates: 220 VST1_UPD, 221 VST2_UPD, 222 VST3_UPD, 223 VST4_UPD, 224 VST2LN_UPD, 225 VST3LN_UPD, 226 VST4LN_UPD 227 }; 228 } 229 230 /// Define some predicates that are used for node matching. 231 namespace ARM { 232 bool isBitFieldInvertedMask(unsigned v); 233 } 234 235 //===--------------------------------------------------------------------===// 236 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 237 238 class ARMTargetLowering : public TargetLowering { 239 public: 240 explicit ARMTargetLowering(TargetMachine &TM); 241 242 virtual unsigned getJumpTableEncoding() const; 243 244 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 245 246 /// ReplaceNodeResults - Replace the results of node with an illegal result 247 /// type with new values built out of custom code. 248 /// 249 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 250 SelectionDAG &DAG) const; 251 252 virtual const char *getTargetNodeName(unsigned Opcode) const; 253 254 virtual bool isSelectSupported(SelectSupportKind Kind) const { 255 // ARM does not support scalar condition selects on vectors. 256 return (Kind != ScalarCondVectorVal); 257 } 258 259 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 260 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; 261 262 virtual MachineBasicBlock * 263 EmitInstrWithCustomInserter(MachineInstr *MI, 264 MachineBasicBlock *MBB) const; 265 266 virtual void 267 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 268 269 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 270 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 271 272 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const; 273 274 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 275 /// unaligned memory accesses of the specified type. Returns whether it 276 /// is "fast" by reference in the second argument. 277 virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const; 278 279 virtual EVT getOptimalMemOpType(uint64_t Size, 280 unsigned DstAlign, unsigned SrcAlign, 281 bool IsMemset, bool ZeroMemset, 282 bool MemcpyStrSrc, 283 MachineFunction &MF) const; 284 285 using TargetLowering::isZExtFree; 286 virtual bool isZExtFree(SDValue Val, EVT VT2) const; 287 288 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; 289 290 291 /// isLegalAddressingMode - Return true if the addressing mode represented 292 /// by AM is legal for this target, for a load/store of the specified type. 293 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 294 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 295 296 /// isLegalICmpImmediate - Return true if the specified immediate is legal 297 /// icmp immediate, that is the target has icmp instructions which can 298 /// compare a register against the immediate without having to materialize 299 /// the immediate into a register. 300 virtual bool isLegalICmpImmediate(int64_t Imm) const; 301 302 /// isLegalAddImmediate - Return true if the specified immediate is legal 303 /// add immediate, that is the target has add instructions which can 304 /// add a register and the immediate without having to materialize 305 /// the immediate into a register. 306 virtual bool isLegalAddImmediate(int64_t Imm) const; 307 308 /// getPreIndexedAddressParts - returns true by value, base pointer and 309 /// offset pointer and addressing mode by reference if the node's address 310 /// can be legally represented as pre-indexed load / store address. 311 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 312 SDValue &Offset, 313 ISD::MemIndexedMode &AM, 314 SelectionDAG &DAG) const; 315 316 /// getPostIndexedAddressParts - returns true by value, base pointer and 317 /// offset pointer and addressing mode by reference if this node can be 318 /// combined with a load / store to form a post-indexed load / store. 319 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 320 SDValue &Base, SDValue &Offset, 321 ISD::MemIndexedMode &AM, 322 SelectionDAG &DAG) const; 323 324 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 325 APInt &KnownZero, 326 APInt &KnownOne, 327 const SelectionDAG &DAG, 328 unsigned Depth) const; 329 330 331 virtual bool ExpandInlineAsm(CallInst *CI) const; 332 333 ConstraintType getConstraintType(const std::string &Constraint) const; 334 335 /// Examine constraint string and operand type and determine a weight value. 336 /// The operand object must already have been set up with the operand type. 337 ConstraintWeight getSingleConstraintMatchWeight( 338 AsmOperandInfo &info, const char *constraint) const; 339 340 std::pair<unsigned, const TargetRegisterClass*> 341 getRegForInlineAsmConstraint(const std::string &Constraint, 342 MVT VT) const; 343 344 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 345 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 346 /// true it means one of the asm constraint of the inline asm instruction 347 /// being processed is 'm'. 348 virtual void LowerAsmOperandForConstraint(SDValue Op, 349 std::string &Constraint, 350 std::vector<SDValue> &Ops, 351 SelectionDAG &DAG) const; 352 353 const ARMSubtarget* getSubtarget() const { 354 return Subtarget; 355 } 356 357 /// getRegClassFor - Return the register class that should be used for the 358 /// specified value type. 359 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const; 360 361 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 362 /// be used for loads / stores from the global. 363 virtual unsigned getMaximalGlobalOffset() const; 364 365 /// createFastISel - This method returns a target specific FastISel object, 366 /// or null if the target does not support "fast" ISel. 367 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 368 const TargetLibraryInfo *libInfo) const; 369 370 Sched::Preference getSchedulingPreference(SDNode *N) const; 371 372 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 373 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 374 375 /// isFPImmLegal - Returns true if the target can instruction select the 376 /// specified FP immediate natively. If false, the legalizer will 377 /// materialize the FP immediate as a load from a constant pool. 378 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 379 380 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 381 const CallInst &I, 382 unsigned Intrinsic) const; 383 protected: 384 std::pair<const TargetRegisterClass*, uint8_t> 385 findRepresentativeClass(MVT VT) const; 386 387 private: 388 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 389 /// make the right decision when generating code for different targets. 390 const ARMSubtarget *Subtarget; 391 392 const TargetRegisterInfo *RegInfo; 393 394 const InstrItineraryData *Itins; 395 396 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 397 /// 398 unsigned ARMPCLabelIndex; 399 400 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 401 void addDRTypeForNEON(MVT VT); 402 void addQRTypeForNEON(MVT VT); 403 404 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 405 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 406 SDValue Chain, SDValue &Arg, 407 RegsToPassVector &RegsToPass, 408 CCValAssign &VA, CCValAssign &NextVA, 409 SDValue &StackPtr, 410 SmallVectorImpl<SDValue> &MemOpChains, 411 ISD::ArgFlagsTy Flags) const; 412 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 413 SDValue &Root, SelectionDAG &DAG, 414 SDLoc dl) const; 415 416 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 417 bool isVarArg) const; 418 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 419 SDLoc dl, SelectionDAG &DAG, 420 const CCValAssign &VA, 421 ISD::ArgFlagsTy Flags) const; 422 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 423 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 424 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 425 const ARMSubtarget *Subtarget) const; 426 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 427 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 428 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 429 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 430 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 431 SelectionDAG &DAG) const; 432 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 433 SelectionDAG &DAG, 434 TLSModel::Model model) const; 435 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 436 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 437 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 438 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 439 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 440 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 441 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 442 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 443 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 444 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 445 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 446 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 447 const ARMSubtarget *ST) const; 448 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 449 const ARMSubtarget *ST) const; 450 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 451 452 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 453 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 454 /// expanded to FMAs when this method returns true, otherwise fmuladd is 455 /// expanded to fmul + fadd. 456 /// 457 /// ARM supports both fused and unfused multiply-add operations; we already 458 /// lower a pair of fmul and fadd to the latter so it's not clear that there 459 /// would be a gain or that the gain would be worthwhile enough to risk 460 /// correctness bugs. 461 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; } 462 463 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 464 465 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 466 CallingConv::ID CallConv, bool isVarArg, 467 const SmallVectorImpl<ISD::InputArg> &Ins, 468 SDLoc dl, SelectionDAG &DAG, 469 SmallVectorImpl<SDValue> &InVals, 470 bool isThisReturn, SDValue ThisVal) const; 471 472 virtual SDValue 473 LowerFormalArguments(SDValue Chain, 474 CallingConv::ID CallConv, bool isVarArg, 475 const SmallVectorImpl<ISD::InputArg> &Ins, 476 SDLoc dl, SelectionDAG &DAG, 477 SmallVectorImpl<SDValue> &InVals) const; 478 479 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 480 SDLoc dl, SDValue &Chain, 481 const Value *OrigArg, 482 unsigned InRegsParamRecordIdx, 483 unsigned OffsetFromOrigArg, 484 unsigned ArgOffset, 485 unsigned ArgSize, 486 bool ForceMutable) const; 487 488 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 489 SDLoc dl, SDValue &Chain, 490 unsigned ArgOffset, 491 bool ForceMutable = false) const; 492 493 void computeRegArea(CCState &CCInfo, MachineFunction &MF, 494 unsigned InRegsParamRecordIdx, 495 unsigned ArgSize, 496 unsigned &ArgRegsSize, 497 unsigned &ArgRegsSaveSize) const; 498 499 virtual SDValue 500 LowerCall(TargetLowering::CallLoweringInfo &CLI, 501 SmallVectorImpl<SDValue> &InVals) const; 502 503 /// HandleByVal - Target-specific cleanup for ByVal support. 504 virtual void HandleByVal(CCState *, unsigned &, unsigned) const; 505 506 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 507 /// for tail call optimization. Targets which want to do tail call 508 /// optimization should implement this function. 509 bool IsEligibleForTailCallOptimization(SDValue Callee, 510 CallingConv::ID CalleeCC, 511 bool isVarArg, 512 bool isCalleeStructRet, 513 bool isCallerStructRet, 514 const SmallVectorImpl<ISD::OutputArg> &Outs, 515 const SmallVectorImpl<SDValue> &OutVals, 516 const SmallVectorImpl<ISD::InputArg> &Ins, 517 SelectionDAG& DAG) const; 518 519 virtual bool CanLowerReturn(CallingConv::ID CallConv, 520 MachineFunction &MF, bool isVarArg, 521 const SmallVectorImpl<ISD::OutputArg> &Outs, 522 LLVMContext &Context) const; 523 524 virtual SDValue 525 LowerReturn(SDValue Chain, 526 CallingConv::ID CallConv, bool isVarArg, 527 const SmallVectorImpl<ISD::OutputArg> &Outs, 528 const SmallVectorImpl<SDValue> &OutVals, 529 SDLoc dl, SelectionDAG &DAG) const; 530 531 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; 532 533 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 534 535 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 536 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; 537 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 538 SelectionDAG &DAG, SDLoc dl) const; 539 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 540 541 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 542 543 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 544 MachineBasicBlock *BB, 545 unsigned Size) const; 546 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 547 MachineBasicBlock *BB, 548 unsigned Size, 549 unsigned BinOpcode) const; 550 MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI, 551 MachineBasicBlock *BB, 552 unsigned Op1, 553 unsigned Op2, 554 bool NeedsCarry = false, 555 bool IsCmpxchg = false, 556 bool IsMinMax = false, 557 ARMCC::CondCodes CC = ARMCC::AL) const; 558 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI, 559 MachineBasicBlock *BB, 560 unsigned Size, 561 bool signExtend, 562 ARMCC::CondCodes Cond) const; 563 MachineBasicBlock *EmitAtomicLoad64(MachineInstr *MI, 564 MachineBasicBlock *BB) const; 565 566 void SetupEntryBlockForSjLj(MachineInstr *MI, 567 MachineBasicBlock *MBB, 568 MachineBasicBlock *DispatchBB, int FI) const; 569 570 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI, 571 MachineBasicBlock *MBB) const; 572 573 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 574 575 MachineBasicBlock *EmitStructByval(MachineInstr *MI, 576 MachineBasicBlock *MBB) const; 577 }; 578 579 enum NEONModImmType { 580 VMOVModImm, 581 VMVNModImm, 582 OtherModImm 583 }; 584 585 586 namespace ARM { 587 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 588 const TargetLibraryInfo *libInfo); 589 } 590} 591 592#endif // ARMISELLOWERING_H 593