1//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes the target machine instruction set to the code generator. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_TARGET_TARGETINSTRINFO_H 15#define LLVM_TARGET_TARGETINSTRINFO_H 16 17#include "llvm/ADT/DenseMap.h" 18#include "llvm/ADT/SmallSet.h" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "llvm/CodeGen/MachineCombinerPattern.h" 21#include "llvm/CodeGen/MachineFunction.h" 22#include "llvm/CodeGen/MachineLoopInfo.h" 23#include "llvm/MC/MCInstrInfo.h" 24#include "llvm/Support/BranchProbability.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26 27namespace llvm { 28 29class InstrItineraryData; 30class LiveVariables; 31class MCAsmInfo; 32class MachineMemOperand; 33class MachineRegisterInfo; 34class MDNode; 35class MCInst; 36struct MCSchedModel; 37class MCSymbolRefExpr; 38class SDNode; 39class ScheduleHazardRecognizer; 40class SelectionDAG; 41class ScheduleDAG; 42class TargetRegisterClass; 43class TargetRegisterInfo; 44class TargetSubtargetInfo; 45class TargetSchedModel; 46class DFAPacketizer; 47 48template<class T> class SmallVectorImpl; 49 50//--------------------------------------------------------------------------- 51/// 52/// TargetInstrInfo - Interface to description of machine instruction set 53/// 54class TargetInstrInfo : public MCInstrInfo { 55 TargetInstrInfo(const TargetInstrInfo &) = delete; 56 void operator=(const TargetInstrInfo &) = delete; 57public: 58 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, 59 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) 60 : CallFrameSetupOpcode(CFSetupOpcode), 61 CallFrameDestroyOpcode(CFDestroyOpcode), 62 CatchRetOpcode(CatchRetOpcode), 63 ReturnOpcode(ReturnOpcode) {} 64 65 virtual ~TargetInstrInfo(); 66 67 static bool isGenericOpcode(unsigned Opc) { 68 return Opc <= TargetOpcode::GENERIC_OP_END; 69 } 70 71 /// Given a machine instruction descriptor, returns the register 72 /// class constraint for OpNum, or NULL. 73 const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, 74 unsigned OpNum, 75 const TargetRegisterInfo *TRI, 76 const MachineFunction &MF) const; 77 78 /// Return true if the instruction is trivially rematerializable, meaning it 79 /// has no side effects and requires no operands that aren't always available. 80 /// This means the only allowed uses are constants and unallocatable physical 81 /// registers so that the instructions result is independent of the place 82 /// in the function. 83 bool isTriviallyReMaterializable(const MachineInstr &MI, 84 AliasAnalysis *AA = nullptr) const { 85 return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF || 86 (MI.getDesc().isRematerializable() && 87 (isReallyTriviallyReMaterializable(MI, AA) || 88 isReallyTriviallyReMaterializableGeneric(MI, AA))); 89 } 90 91protected: 92 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is 93 /// set, this hook lets the target specify whether the instruction is actually 94 /// trivially rematerializable, taking into consideration its operands. This 95 /// predicate must return false if the instruction has any side effects other 96 /// than producing a value, or if it requres any address registers that are 97 /// not always available. 98 /// Requirements must be check as stated in isTriviallyReMaterializable() . 99 virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, 100 AliasAnalysis *AA) const { 101 return false; 102 } 103 104 /// This method commutes the operands of the given machine instruction MI. 105 /// The operands to be commuted are specified by their indices OpIdx1 and 106 /// OpIdx2. 107 /// 108 /// If a target has any instructions that are commutable but require 109 /// converting to different instructions or making non-trivial changes 110 /// to commute them, this method can be overloaded to do that. 111 /// The default implementation simply swaps the commutable operands. 112 /// 113 /// If NewMI is false, MI is modified in place and returned; otherwise, a 114 /// new machine instruction is created and returned. 115 /// 116 /// Do not call this method for a non-commutable instruction. 117 /// Even though the instruction is commutable, the method may still 118 /// fail to commute the operands, null pointer is returned in such cases. 119 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, 120 unsigned OpIdx1, 121 unsigned OpIdx2) const; 122 123 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable 124 /// operand indices to (ResultIdx1, ResultIdx2). 125 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be 126 /// predefined to some indices or be undefined (designated by the special 127 /// value 'CommuteAnyOperandIndex'). 128 /// The predefined result indices cannot be re-defined. 129 /// The function returns true iff after the result pair redefinition 130 /// the fixed result pair is equal to or equivalent to the source pair of 131 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that 132 /// the pairs (x,y) and (y,x) are equivalent. 133 static bool fixCommutedOpIndices(unsigned &ResultIdx1, 134 unsigned &ResultIdx2, 135 unsigned CommutableOpIdx1, 136 unsigned CommutableOpIdx2); 137 138private: 139 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is 140 /// set and the target hook isReallyTriviallyReMaterializable returns false, 141 /// this function does target-independent tests to determine if the 142 /// instruction is really trivially rematerializable. 143 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI, 144 AliasAnalysis *AA) const; 145 146public: 147 /// These methods return the opcode of the frame setup/destroy instructions 148 /// if they exist (-1 otherwise). Some targets use pseudo instructions in 149 /// order to abstract away the difference between operating with a frame 150 /// pointer and operating without, through the use of these two instructions. 151 /// 152 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } 153 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } 154 155 /// Returns true if the argument is a frame pseudo instruction. 156 bool isFrameInstr(const MachineInstr &I) const { 157 return I.getOpcode() == getCallFrameSetupOpcode() || 158 I.getOpcode() == getCallFrameDestroyOpcode(); 159 } 160 161 /// Returns true if the argument is a frame setup pseudo instruction. 162 bool isFrameSetup(const MachineInstr &I) const { 163 return I.getOpcode() == getCallFrameSetupOpcode(); 164 } 165 166 /// Returns size of the frame associated with the given frame instruction. 167 /// For frame setup instruction this is frame that is set up space set up 168 /// after the instruction. For frame destroy instruction this is the frame 169 /// freed by the caller. 170 /// Note, in some cases a call frame (or a part of it) may be prepared prior 171 /// to the frame setup instruction. It occurs in the calls that involve 172 /// inalloca arguments. This function reports only the size of the frame part 173 /// that is set up between the frame setup and destroy pseudo instructions. 174 int64_t getFrameSize(const MachineInstr &I) const { 175 assert(isFrameInstr(I) && "Not a frame instruction"); 176 assert(I.getOperand(0).getImm() >= 0); 177 return I.getOperand(0).getImm(); 178 } 179 180 /// Returns the total frame size, which is made up of the space set up inside 181 /// the pair of frame start-stop instructions and the space that is set up 182 /// prior to the pair. 183 int64_t getFrameTotalSize(const MachineInstr &I) const { 184 if (isFrameSetup(I)) { 185 assert(I.getOperand(1).getImm() >= 0 && "Frame size must not be negative"); 186 return getFrameSize(I) + I.getOperand(1).getImm(); 187 } 188 return getFrameSize(I); 189 } 190 191 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } 192 unsigned getReturnOpcode() const { return ReturnOpcode; } 193 194 /// Returns the actual stack pointer adjustment made by an instruction 195 /// as part of a call sequence. By default, only call frame setup/destroy 196 /// instructions adjust the stack, but targets may want to override this 197 /// to enable more fine-grained adjustment, or adjust by a different value. 198 virtual int getSPAdjust(const MachineInstr &MI) const; 199 200 /// Return true if the instruction is a "coalescable" extension instruction. 201 /// That is, it's like a copy where it's legal for the source to overlap the 202 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's 203 /// expected the pre-extension value is available as a subreg of the result 204 /// register. This also returns the sub-register index in SubIdx. 205 virtual bool isCoalescableExtInstr(const MachineInstr &MI, 206 unsigned &SrcReg, unsigned &DstReg, 207 unsigned &SubIdx) const { 208 return false; 209 } 210 211 /// If the specified machine instruction is a direct 212 /// load from a stack slot, return the virtual or physical register number of 213 /// the destination along with the FrameIndex of the loaded stack slot. If 214 /// not, return 0. This predicate must return 0 if the instruction has 215 /// any side effects other than loading from the stack slot. 216 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, 217 int &FrameIndex) const { 218 return 0; 219 } 220 221 /// Check for post-frame ptr elimination stack locations as well. 222 /// This uses a heuristic so it isn't reliable for correctness. 223 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, 224 int &FrameIndex) const { 225 return 0; 226 } 227 228 /// If the specified machine instruction has a load from a stack slot, 229 /// return true along with the FrameIndex of the loaded stack slot and the 230 /// machine mem operand containing the reference. 231 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for 232 /// any instructions that loads from the stack. This is just a hint, as some 233 /// cases may be missed. 234 virtual bool hasLoadFromStackSlot(const MachineInstr &MI, 235 const MachineMemOperand *&MMO, 236 int &FrameIndex) const; 237 238 /// If the specified machine instruction is a direct 239 /// store to a stack slot, return the virtual or physical register number of 240 /// the source reg along with the FrameIndex of the loaded stack slot. If 241 /// not, return 0. This predicate must return 0 if the instruction has 242 /// any side effects other than storing to the stack slot. 243 virtual unsigned isStoreToStackSlot(const MachineInstr &MI, 244 int &FrameIndex) const { 245 return 0; 246 } 247 248 /// Check for post-frame ptr elimination stack locations as well. 249 /// This uses a heuristic, so it isn't reliable for correctness. 250 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, 251 int &FrameIndex) const { 252 return 0; 253 } 254 255 /// If the specified machine instruction has a store to a stack slot, 256 /// return true along with the FrameIndex of the loaded stack slot and the 257 /// machine mem operand containing the reference. 258 /// If not, return false. Unlike isStoreToStackSlot, 259 /// this returns true for any instructions that stores to the 260 /// stack. This is just a hint, as some cases may be missed. 261 virtual bool hasStoreToStackSlot(const MachineInstr &MI, 262 const MachineMemOperand *&MMO, 263 int &FrameIndex) const; 264 265 /// Return true if the specified machine instruction 266 /// is a copy of one stack slot to another and has no other effect. 267 /// Provide the identity of the two frame indices. 268 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, 269 int &SrcFrameIndex) const { 270 return false; 271 } 272 273 /// Compute the size in bytes and offset within a stack slot of a spilled 274 /// register or subregister. 275 /// 276 /// \param [out] Size in bytes of the spilled value. 277 /// \param [out] Offset in bytes within the stack slot. 278 /// \returns true if both Size and Offset are successfully computed. 279 /// 280 /// Not all subregisters have computable spill slots. For example, 281 /// subregisters registers may not be byte-sized, and a pair of discontiguous 282 /// subregisters has no single offset. 283 /// 284 /// Targets with nontrivial bigendian implementations may need to override 285 /// this, particularly to support spilled vector registers. 286 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, 287 unsigned &Size, unsigned &Offset, 288 const MachineFunction &MF) const; 289 290 /// Returns the size in bytes of the specified MachineInstr, or ~0U 291 /// when this function is not implemented by a target. 292 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { 293 return ~0U; 294 } 295 296 /// Return true if the instruction is as cheap as a move instruction. 297 /// 298 /// Targets for different archs need to override this, and different 299 /// micro-architectures can also be finely tuned inside. 300 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { 301 return MI.isAsCheapAsAMove(); 302 } 303 304 /// Return true if the instruction should be sunk by MachineSink. 305 /// 306 /// MachineSink determines on its own whether the instruction is safe to sink; 307 /// this gives the target a hook to override the default behavior with regards 308 /// to which instructions should be sunk. 309 virtual bool shouldSink(const MachineInstr &MI) const { 310 return true; 311 } 312 313 /// Re-issue the specified 'original' instruction at the 314 /// specific location targeting a new destination register. 315 /// The register in Orig->getOperand(0).getReg() will be substituted by 316 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with 317 /// SubIdx. 318 virtual void reMaterialize(MachineBasicBlock &MBB, 319 MachineBasicBlock::iterator MI, unsigned DestReg, 320 unsigned SubIdx, const MachineInstr &Orig, 321 const TargetRegisterInfo &TRI) const; 322 323 /// Create a duplicate of the Orig instruction in MF. This is like 324 /// MachineFunction::CloneMachineInstr(), but the target may update operands 325 /// that are required to be unique. 326 /// 327 /// The instruction must be duplicable as indicated by isNotDuplicable(). 328 virtual MachineInstr *duplicate(MachineInstr &Orig, 329 MachineFunction &MF) const; 330 331 /// This method must be implemented by targets that 332 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 333 /// may be able to convert a two-address instruction into one or more true 334 /// three-address instructions on demand. This allows the X86 target (for 335 /// example) to convert ADD and SHL instructions into LEA instructions if they 336 /// would require register copies due to two-addressness. 337 /// 338 /// This method returns a null pointer if the transformation cannot be 339 /// performed, otherwise it returns the last new instruction. 340 /// 341 virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, 342 MachineInstr &MI, 343 LiveVariables *LV) const { 344 return nullptr; 345 } 346 347 // This constant can be used as an input value of operand index passed to 348 // the method findCommutedOpIndices() to tell the method that the 349 // corresponding operand index is not pre-defined and that the method 350 // can pick any commutable operand. 351 static const unsigned CommuteAnyOperandIndex = ~0U; 352 353 /// This method commutes the operands of the given machine instruction MI. 354 /// 355 /// The operands to be commuted are specified by their indices OpIdx1 and 356 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value 357 /// 'CommuteAnyOperandIndex', which means that the method is free to choose 358 /// any arbitrarily chosen commutable operand. If both arguments are set to 359 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable 360 /// operands; then commutes them if such operands could be found. 361 /// 362 /// If NewMI is false, MI is modified in place and returned; otherwise, a 363 /// new machine instruction is created and returned. 364 /// 365 /// Do not call this method for a non-commutable instruction or 366 /// for non-commuable operands. 367 /// Even though the instruction is commutable, the method may still 368 /// fail to commute the operands, null pointer is returned in such cases. 369 MachineInstr * 370 commuteInstruction(MachineInstr &MI, bool NewMI = false, 371 unsigned OpIdx1 = CommuteAnyOperandIndex, 372 unsigned OpIdx2 = CommuteAnyOperandIndex) const; 373 374 /// Returns true iff the routine could find two commutable operands in the 375 /// given machine instruction. 376 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. 377 /// If any of the INPUT values is set to the special value 378 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable 379 /// operand, then returns its index in the corresponding argument. 380 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method 381 /// looks for 2 commutable operands. 382 /// If INPUT values refer to some operands of MI, then the method simply 383 /// returns true if the corresponding operands are commutable and returns 384 /// false otherwise. 385 /// 386 /// For example, calling this method this way: 387 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; 388 /// findCommutedOpIndices(MI, Op1, Op2); 389 /// can be interpreted as a query asking to find an operand that would be 390 /// commutable with the operand#1. 391 virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, 392 unsigned &SrcOpIdx2) const; 393 394 /// A pair composed of a register and a sub-register index. 395 /// Used to give some type checking when modeling Reg:SubReg. 396 struct RegSubRegPair { 397 unsigned Reg; 398 unsigned SubReg; 399 RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0) 400 : Reg(Reg), SubReg(SubReg) {} 401 }; 402 /// A pair composed of a pair of a register and a sub-register index, 403 /// and another sub-register index. 404 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. 405 struct RegSubRegPairAndIdx : RegSubRegPair { 406 unsigned SubIdx; 407 RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0, 408 unsigned SubIdx = 0) 409 : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} 410 }; 411 412 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI 413 /// and \p DefIdx. 414 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of 415 /// the list is modeled as <Reg:SubReg, SubIdx>. 416 /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce 417 /// two elements: 418 /// - vreg1:sub1, sub0 419 /// - vreg2<:0>, sub1 420 /// 421 /// \returns true if it is possible to build such an input sequence 422 /// with the pair \p MI, \p DefIdx. False otherwise. 423 /// 424 /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). 425 /// 426 /// \note The generic implementation does not provide any support for 427 /// MI.isRegSequenceLike(). In other words, one has to override 428 /// getRegSequenceLikeInputs for target specific instructions. 429 bool 430 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, 431 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const; 432 433 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI 434 /// and \p DefIdx. 435 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. 436 /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce: 437 /// - vreg1:sub1, sub0 438 /// 439 /// \returns true if it is possible to build such an input sequence 440 /// with the pair \p MI, \p DefIdx. False otherwise. 441 /// 442 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). 443 /// 444 /// \note The generic implementation does not provide any support for 445 /// MI.isExtractSubregLike(). In other words, one has to override 446 /// getExtractSubregLikeInputs for target specific instructions. 447 bool 448 getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, 449 RegSubRegPairAndIdx &InputReg) const; 450 451 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI 452 /// and \p DefIdx. 453 /// \p [out] BaseReg and \p [out] InsertedReg contain 454 /// the equivalent inputs of INSERT_SUBREG. 455 /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce: 456 /// - BaseReg: vreg0:sub0 457 /// - InsertedReg: vreg1:sub1, sub3 458 /// 459 /// \returns true if it is possible to build such an input sequence 460 /// with the pair \p MI, \p DefIdx. False otherwise. 461 /// 462 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). 463 /// 464 /// \note The generic implementation does not provide any support for 465 /// MI.isInsertSubregLike(). In other words, one has to override 466 /// getInsertSubregLikeInputs for target specific instructions. 467 bool 468 getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, 469 RegSubRegPair &BaseReg, 470 RegSubRegPairAndIdx &InsertedReg) const; 471 472 473 /// Return true if two machine instructions would produce identical values. 474 /// By default, this is only true when the two instructions 475 /// are deemed identical except for defs. If this function is called when the 476 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for 477 /// aggressive checks. 478 virtual bool produceSameValue(const MachineInstr &MI0, 479 const MachineInstr &MI1, 480 const MachineRegisterInfo *MRI = nullptr) const; 481 482 /// \returns true if a branch from an instruction with opcode \p BranchOpc 483 /// bytes is capable of jumping to a position \p BrOffset bytes away. 484 virtual bool isBranchOffsetInRange(unsigned BranchOpc, 485 int64_t BrOffset) const { 486 llvm_unreachable("target did not implement"); 487 } 488 489 /// \returns The block that branch instruction \p MI jumps to. 490 virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { 491 llvm_unreachable("target did not implement"); 492 } 493 494 /// Insert an unconditional indirect branch at the end of \p MBB to \p 495 /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to 496 /// the offset of the position to insert the new branch. 497 /// 498 /// \returns The number of bytes added to the block. 499 virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, 500 MachineBasicBlock &NewDestBB, 501 const DebugLoc &DL, 502 int64_t BrOffset = 0, 503 RegScavenger *RS = nullptr) const { 504 llvm_unreachable("target did not implement"); 505 } 506 507 /// Analyze the branching code at the end of MBB, returning 508 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't 509 /// implemented for a target). Upon success, this returns false and returns 510 /// with the following information in various cases: 511 /// 512 /// 1. If this block ends with no branches (it just falls through to its succ) 513 /// just return false, leaving TBB/FBB null. 514 /// 2. If this block ends with only an unconditional branch, it sets TBB to be 515 /// the destination block. 516 /// 3. If this block ends with a conditional branch and it falls through to a 517 /// successor block, it sets TBB to be the branch destination block and a 518 /// list of operands that evaluate the condition. These operands can be 519 /// passed to other TargetInstrInfo methods to create new branches. 520 /// 4. If this block ends with a conditional branch followed by an 521 /// unconditional branch, it returns the 'true' destination in TBB, the 522 /// 'false' destination in FBB, and a list of operands that evaluate the 523 /// condition. These operands can be passed to other TargetInstrInfo 524 /// methods to create new branches. 525 /// 526 /// Note that removeBranch and insertBranch must be implemented to support 527 /// cases where this method returns success. 528 /// 529 /// If AllowModify is true, then this routine is allowed to modify the basic 530 /// block (e.g. delete instructions after the unconditional branch). 531 /// 532 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 533 /// before calling this function. 534 virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 535 MachineBasicBlock *&FBB, 536 SmallVectorImpl<MachineOperand> &Cond, 537 bool AllowModify = false) const { 538 return true; 539 } 540 541 /// Represents a predicate at the MachineFunction level. The control flow a 542 /// MachineBranchPredicate represents is: 543 /// 544 /// Reg <def>= LHS `Predicate` RHS == ConditionDef 545 /// if Reg then goto TrueDest else goto FalseDest 546 /// 547 struct MachineBranchPredicate { 548 enum ComparePredicate { 549 PRED_EQ, // True if two values are equal 550 PRED_NE, // True if two values are not equal 551 PRED_INVALID // Sentinel value 552 }; 553 554 ComparePredicate Predicate; 555 MachineOperand LHS; 556 MachineOperand RHS; 557 MachineBasicBlock *TrueDest; 558 MachineBasicBlock *FalseDest; 559 MachineInstr *ConditionDef; 560 561 /// SingleUseCondition is true if ConditionDef is dead except for the 562 /// branch(es) at the end of the basic block. 563 /// 564 bool SingleUseCondition; 565 566 explicit MachineBranchPredicate() 567 : Predicate(PRED_INVALID), LHS(MachineOperand::CreateImm(0)), 568 RHS(MachineOperand::CreateImm(0)), TrueDest(nullptr), 569 FalseDest(nullptr), ConditionDef(nullptr), SingleUseCondition(false) { 570 } 571 }; 572 573 /// Analyze the branching code at the end of MBB and parse it into the 574 /// MachineBranchPredicate structure if possible. Returns false on success 575 /// and true on failure. 576 /// 577 /// If AllowModify is true, then this routine is allowed to modify the basic 578 /// block (e.g. delete instructions after the unconditional branch). 579 /// 580 virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, 581 MachineBranchPredicate &MBP, 582 bool AllowModify = false) const { 583 return true; 584 } 585 586 /// Remove the branching code at the end of the specific MBB. 587 /// This is only invoked in cases where AnalyzeBranch returns success. It 588 /// returns the number of instructions that were removed. 589 /// If \p BytesRemoved is non-null, report the change in code size from the 590 /// removed instructions. 591 virtual unsigned removeBranch(MachineBasicBlock &MBB, 592 int *BytesRemoved = nullptr) const { 593 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!"); 594 } 595 596 /// Insert branch code into the end of the specified MachineBasicBlock. The 597 /// operands to this method are the same as those returned by AnalyzeBranch. 598 /// This is only invoked in cases where AnalyzeBranch returns success. It 599 /// returns the number of instructions inserted. If \p BytesAdded is non-null, 600 /// report the change in code size from the added instructions. 601 /// 602 /// It is also invoked by tail merging to add unconditional branches in 603 /// cases where AnalyzeBranch doesn't apply because there was no original 604 /// branch to analyze. At least this much must be implemented, else tail 605 /// merging needs to be disabled. 606 /// 607 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 608 /// before calling this function. 609 virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 610 MachineBasicBlock *FBB, 611 ArrayRef<MachineOperand> Cond, 612 const DebugLoc &DL, 613 int *BytesAdded = nullptr) const { 614 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!"); 615 } 616 617 unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, 618 MachineBasicBlock *DestBB, 619 const DebugLoc &DL, 620 int *BytesAdded = nullptr) const { 621 return insertBranch(MBB, DestBB, nullptr, 622 ArrayRef<MachineOperand>(), DL, BytesAdded); 623 } 624 625 /// Analyze the loop code, return true if it cannot be understoo. Upon 626 /// success, this function returns false and returns information about the 627 /// induction variable and compare instruction used at the end. 628 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, 629 MachineInstr *&CmpInst) const { 630 return true; 631 } 632 633 /// Generate code to reduce the loop iteration by one and check if the loop is 634 /// finished. Return the value/register of the the new loop count. We need 635 /// this function when peeling off one or more iterations of a loop. This 636 /// function assumes the nth iteration is peeled first. 637 virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, 638 MachineInstr *IndVar, MachineInstr &Cmp, 639 SmallVectorImpl<MachineOperand> &Cond, 640 SmallVectorImpl<MachineInstr *> &PrevInsts, 641 unsigned Iter, unsigned MaxIter) const { 642 llvm_unreachable("Target didn't implement ReduceLoopCount"); 643 } 644 645 /// Delete the instruction OldInst and everything after it, replacing it with 646 /// an unconditional branch to NewDest. This is used by the tail merging pass. 647 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 648 MachineBasicBlock *NewDest) const; 649 650 /// Return true if it's legal to split the given basic 651 /// block at the specified instruction (i.e. instruction would be the start 652 /// of a new basic block). 653 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, 654 MachineBasicBlock::iterator MBBI) const { 655 return true; 656 } 657 658 /// Return true if it's profitable to predicate 659 /// instructions with accumulated instruction latency of "NumCycles" 660 /// of the specified basic block, where the probability of the instructions 661 /// being executed is given by Probability, and Confidence is a measure 662 /// of our confidence that it will be properly predicted. 663 virtual 664 bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 665 unsigned ExtraPredCycles, 666 BranchProbability Probability) const { 667 return false; 668 } 669 670 /// Second variant of isProfitableToIfCvt. This one 671 /// checks for the case where two basic blocks from true and false path 672 /// of a if-then-else (diamond) are predicated on mutally exclusive 673 /// predicates, where the probability of the true path being taken is given 674 /// by Probability, and Confidence is a measure of our confidence that it 675 /// will be properly predicted. 676 virtual bool 677 isProfitableToIfCvt(MachineBasicBlock &TMBB, 678 unsigned NumTCycles, unsigned ExtraTCycles, 679 MachineBasicBlock &FMBB, 680 unsigned NumFCycles, unsigned ExtraFCycles, 681 BranchProbability Probability) const { 682 return false; 683 } 684 685 /// Return true if it's profitable for if-converter to duplicate instructions 686 /// of specified accumulated instruction latencies in the specified MBB to 687 /// enable if-conversion. 688 /// The probability of the instructions being executed is given by 689 /// Probability, and Confidence is a measure of our confidence that it 690 /// will be properly predicted. 691 virtual bool 692 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 693 BranchProbability Probability) const { 694 return false; 695 } 696 697 /// Return true if it's profitable to unpredicate 698 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually 699 /// exclusive predicates. 700 /// e.g. 701 /// subeq r0, r1, #1 702 /// addne r0, r1, #1 703 /// => 704 /// sub r0, r1, #1 705 /// addne r0, r1, #1 706 /// 707 /// This may be profitable is conditional instructions are always executed. 708 virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, 709 MachineBasicBlock &FMBB) const { 710 return false; 711 } 712 713 /// Return true if it is possible to insert a select 714 /// instruction that chooses between TrueReg and FalseReg based on the 715 /// condition code in Cond. 716 /// 717 /// When successful, also return the latency in cycles from TrueReg, 718 /// FalseReg, and Cond to the destination register. In most cases, a select 719 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 720 /// 721 /// Some x86 implementations have 2-cycle cmov instructions. 722 /// 723 /// @param MBB Block where select instruction would be inserted. 724 /// @param Cond Condition returned by AnalyzeBranch. 725 /// @param TrueReg Virtual register to select when Cond is true. 726 /// @param FalseReg Virtual register to select when Cond is false. 727 /// @param CondCycles Latency from Cond+Branch to select output. 728 /// @param TrueCycles Latency from TrueReg to select output. 729 /// @param FalseCycles Latency from FalseReg to select output. 730 virtual bool canInsertSelect(const MachineBasicBlock &MBB, 731 ArrayRef<MachineOperand> Cond, 732 unsigned TrueReg, unsigned FalseReg, 733 int &CondCycles, 734 int &TrueCycles, int &FalseCycles) const { 735 return false; 736 } 737 738 /// Insert a select instruction into MBB before I that will copy TrueReg to 739 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. 740 /// 741 /// This function can only be called after canInsertSelect() returned true. 742 /// The condition in Cond comes from AnalyzeBranch, and it can be assumed 743 /// that the same flags or registers required by Cond are available at the 744 /// insertion point. 745 /// 746 /// @param MBB Block where select instruction should be inserted. 747 /// @param I Insertion point. 748 /// @param DL Source location for debugging. 749 /// @param DstReg Virtual register to be defined by select instruction. 750 /// @param Cond Condition as computed by AnalyzeBranch. 751 /// @param TrueReg Virtual register to copy when Cond is true. 752 /// @param FalseReg Virtual register to copy when Cons is false. 753 virtual void insertSelect(MachineBasicBlock &MBB, 754 MachineBasicBlock::iterator I, const DebugLoc &DL, 755 unsigned DstReg, ArrayRef<MachineOperand> Cond, 756 unsigned TrueReg, unsigned FalseReg) const { 757 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); 758 } 759 760 /// Analyze the given select instruction, returning true if 761 /// it cannot be understood. It is assumed that MI->isSelect() is true. 762 /// 763 /// When successful, return the controlling condition and the operands that 764 /// determine the true and false result values. 765 /// 766 /// Result = SELECT Cond, TrueOp, FalseOp 767 /// 768 /// Some targets can optimize select instructions, for example by predicating 769 /// the instruction defining one of the operands. Such targets should set 770 /// Optimizable. 771 /// 772 /// @param MI Select instruction to analyze. 773 /// @param Cond Condition controlling the select. 774 /// @param TrueOp Operand number of the value selected when Cond is true. 775 /// @param FalseOp Operand number of the value selected when Cond is false. 776 /// @param Optimizable Returned as true if MI is optimizable. 777 /// @returns False on success. 778 virtual bool analyzeSelect(const MachineInstr &MI, 779 SmallVectorImpl<MachineOperand> &Cond, 780 unsigned &TrueOp, unsigned &FalseOp, 781 bool &Optimizable) const { 782 assert(MI.getDesc().isSelect() && "MI must be a select instruction"); 783 return true; 784 } 785 786 /// Given a select instruction that was understood by 787 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by 788 /// merging it with one of its operands. Returns NULL on failure. 789 /// 790 /// When successful, returns the new select instruction. The client is 791 /// responsible for deleting MI. 792 /// 793 /// If both sides of the select can be optimized, PreferFalse is used to pick 794 /// a side. 795 /// 796 /// @param MI Optimizable select instruction. 797 /// @param NewMIs Set that record all MIs in the basic block up to \p 798 /// MI. Has to be updated with any newly created MI or deleted ones. 799 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. 800 /// @returns Optimized instruction or NULL. 801 virtual MachineInstr *optimizeSelect(MachineInstr &MI, 802 SmallPtrSetImpl<MachineInstr *> &NewMIs, 803 bool PreferFalse = false) const { 804 // This function must be implemented if Optimizable is ever set. 805 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); 806 } 807 808 /// Emit instructions to copy a pair of physical registers. 809 /// 810 /// This function should support copies within any legal register class as 811 /// well as any cross-class copies created during instruction selection. 812 /// 813 /// The source and destination registers may overlap, which may require a 814 /// careful implementation when multiple copy instructions are required for 815 /// large registers. See for example the ARM target. 816 virtual void copyPhysReg(MachineBasicBlock &MBB, 817 MachineBasicBlock::iterator MI, const DebugLoc &DL, 818 unsigned DestReg, unsigned SrcReg, 819 bool KillSrc) const { 820 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); 821 } 822 823 /// Store the specified register of the given register class to the specified 824 /// stack frame index. The store instruction is to be added to the given 825 /// machine basic block before the specified machine instruction. If isKill 826 /// is true, the register operand is the last use and must be marked kill. 827 virtual void storeRegToStackSlot(MachineBasicBlock &MBB, 828 MachineBasicBlock::iterator MI, 829 unsigned SrcReg, bool isKill, int FrameIndex, 830 const TargetRegisterClass *RC, 831 const TargetRegisterInfo *TRI) const { 832 llvm_unreachable("Target didn't implement " 833 "TargetInstrInfo::storeRegToStackSlot!"); 834 } 835 836 /// Load the specified register of the given register class from the specified 837 /// stack frame index. The load instruction is to be added to the given 838 /// machine basic block before the specified machine instruction. 839 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, 840 MachineBasicBlock::iterator MI, 841 unsigned DestReg, int FrameIndex, 842 const TargetRegisterClass *RC, 843 const TargetRegisterInfo *TRI) const { 844 llvm_unreachable("Target didn't implement " 845 "TargetInstrInfo::loadRegFromStackSlot!"); 846 } 847 848 /// This function is called for all pseudo instructions 849 /// that remain after register allocation. Many pseudo instructions are 850 /// created to help register allocation. This is the place to convert them 851 /// into real instructions. The target can edit MI in place, or it can insert 852 /// new instructions and erase MI. The function should return true if 853 /// anything was changed. 854 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } 855 856 /// Check whether the target can fold a load that feeds a subreg operand 857 /// (or a subreg operand that feeds a store). 858 /// For example, X86 may want to return true if it can fold 859 /// movl (%esp), %eax 860 /// subb, %al, ... 861 /// Into: 862 /// subb (%esp), ... 863 /// 864 /// Ideally, we'd like the target implementation of foldMemoryOperand() to 865 /// reject subregs - but since this behavior used to be enforced in the 866 /// target-independent code, moving this responsibility to the targets 867 /// has the potential of causing nasty silent breakage in out-of-tree targets. 868 virtual bool isSubregFoldable() const { return false; } 869 870 /// Attempt to fold a load or store of the specified stack 871 /// slot into the specified machine instruction for the specified operand(s). 872 /// If this is possible, a new instruction is returned with the specified 873 /// operand folded, otherwise NULL is returned. 874 /// The new instruction is inserted before MI, and the client is responsible 875 /// for removing the old instruction. 876 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 877 int FrameIndex, 878 LiveIntervals *LIS = nullptr) const; 879 880 /// Same as the previous version except it allows folding of any load and 881 /// store from / to any address, not just from a specific stack slot. 882 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 883 MachineInstr &LoadMI, 884 LiveIntervals *LIS = nullptr) const; 885 886 /// Return true when there is potentially a faster code sequence 887 /// for an instruction chain ending in \p Root. All potential patterns are 888 /// returned in the \p Pattern vector. Pattern should be sorted in priority 889 /// order since the pattern evaluator stops checking as soon as it finds a 890 /// faster sequence. 891 /// \param Root - Instruction that could be combined with one of its operands 892 /// \param Patterns - Vector of possible combination patterns 893 virtual bool getMachineCombinerPatterns( 894 MachineInstr &Root, 895 SmallVectorImpl<MachineCombinerPattern> &Patterns) const; 896 897 /// Return true when a code sequence can improve throughput. It 898 /// should be called only for instructions in loops. 899 /// \param Pattern - combiner pattern 900 virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const; 901 902 /// Return true if the input \P Inst is part of a chain of dependent ops 903 /// that are suitable for reassociation, otherwise return false. 904 /// If the instruction's operands must be commuted to have a previous 905 /// instruction of the same type define the first source operand, \P Commuted 906 /// will be set to true. 907 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; 908 909 /// Return true when \P Inst is both associative and commutative. 910 virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const { 911 return false; 912 } 913 914 /// Return true when \P Inst has reassociable operands in the same \P MBB. 915 virtual bool hasReassociableOperands(const MachineInstr &Inst, 916 const MachineBasicBlock *MBB) const; 917 918 /// Return true when \P Inst has reassociable sibling. 919 bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const; 920 921 /// When getMachineCombinerPatterns() finds patterns, this function generates 922 /// the instructions that could replace the original code sequence. The client 923 /// has to decide whether the actual replacement is beneficial or not. 924 /// \param Root - Instruction that could be combined with one of its operands 925 /// \param Pattern - Combination pattern for Root 926 /// \param InsInstrs - Vector of new instructions that implement P 927 /// \param DelInstrs - Old instructions, including Root, that could be 928 /// replaced by InsInstr 929 /// \param InstrIdxForVirtReg - map of virtual register to instruction in 930 /// InsInstr that defines it 931 virtual void genAlternativeCodeSequence( 932 MachineInstr &Root, MachineCombinerPattern Pattern, 933 SmallVectorImpl<MachineInstr *> &InsInstrs, 934 SmallVectorImpl<MachineInstr *> &DelInstrs, 935 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; 936 937 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to 938 /// reduce critical path length. 939 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, 940 MachineCombinerPattern Pattern, 941 SmallVectorImpl<MachineInstr *> &InsInstrs, 942 SmallVectorImpl<MachineInstr *> &DelInstrs, 943 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; 944 945 /// This is an architecture-specific helper function of reassociateOps. 946 /// Set special operand attributes for new instructions after reassociation. 947 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, 948 MachineInstr &NewMI1, 949 MachineInstr &NewMI2) const { 950 } 951 952 /// Return true when a target supports MachineCombiner. 953 virtual bool useMachineCombiner() const { return false; } 954 955protected: 956 /// Target-dependent implementation for foldMemoryOperand. 957 /// Target-independent code in foldMemoryOperand will 958 /// take care of adding a MachineMemOperand to the newly created instruction. 959 /// The instruction and any auxiliary instructions necessary will be inserted 960 /// at InsertPt. 961 virtual MachineInstr * 962 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 963 ArrayRef<unsigned> Ops, 964 MachineBasicBlock::iterator InsertPt, int FrameIndex, 965 LiveIntervals *LIS = nullptr) const { 966 return nullptr; 967 } 968 969 /// Target-dependent implementation for foldMemoryOperand. 970 /// Target-independent code in foldMemoryOperand will 971 /// take care of adding a MachineMemOperand to the newly created instruction. 972 /// The instruction and any auxiliary instructions necessary will be inserted 973 /// at InsertPt. 974 virtual MachineInstr *foldMemoryOperandImpl( 975 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 976 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 977 LiveIntervals *LIS = nullptr) const { 978 return nullptr; 979 } 980 981 /// \brief Target-dependent implementation of getRegSequenceInputs. 982 /// 983 /// \returns true if it is possible to build the equivalent 984 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. 985 /// 986 /// \pre MI.isRegSequenceLike(). 987 /// 988 /// \see TargetInstrInfo::getRegSequenceInputs. 989 virtual bool getRegSequenceLikeInputs( 990 const MachineInstr &MI, unsigned DefIdx, 991 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 992 return false; 993 } 994 995 /// \brief Target-dependent implementation of getExtractSubregInputs. 996 /// 997 /// \returns true if it is possible to build the equivalent 998 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 999 /// 1000 /// \pre MI.isExtractSubregLike(). 1001 /// 1002 /// \see TargetInstrInfo::getExtractSubregInputs. 1003 virtual bool getExtractSubregLikeInputs( 1004 const MachineInstr &MI, unsigned DefIdx, 1005 RegSubRegPairAndIdx &InputReg) const { 1006 return false; 1007 } 1008 1009 /// \brief Target-dependent implementation of getInsertSubregInputs. 1010 /// 1011 /// \returns true if it is possible to build the equivalent 1012 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 1013 /// 1014 /// \pre MI.isInsertSubregLike(). 1015 /// 1016 /// \see TargetInstrInfo::getInsertSubregInputs. 1017 virtual bool 1018 getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, 1019 RegSubRegPair &BaseReg, 1020 RegSubRegPairAndIdx &InsertedReg) const { 1021 return false; 1022 } 1023 1024public: 1025 /// unfoldMemoryOperand - Separate a single instruction which folded a load or 1026 /// a store or a load and a store into two or more instruction. If this is 1027 /// possible, returns true as well as the new instructions by reference. 1028 virtual bool 1029 unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, 1030 bool UnfoldLoad, bool UnfoldStore, 1031 SmallVectorImpl<MachineInstr *> &NewMIs) const { 1032 return false; 1033 } 1034 1035 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 1036 SmallVectorImpl<SDNode*> &NewNodes) const { 1037 return false; 1038 } 1039 1040 /// Returns the opcode of the would be new 1041 /// instruction after load / store are unfolded from an instruction of the 1042 /// specified opcode. It returns zero if the specified unfolding is not 1043 /// possible. If LoadRegIndex is non-null, it is filled in with the operand 1044 /// index of the operand which will hold the register holding the loaded 1045 /// value. 1046 virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, 1047 bool UnfoldLoad, bool UnfoldStore, 1048 unsigned *LoadRegIndex = nullptr) const { 1049 return 0; 1050 } 1051 1052 /// This is used by the pre-regalloc scheduler to determine if two loads are 1053 /// loading from the same base address. It should only return true if the base 1054 /// pointers are the same and the only differences between the two addresses 1055 /// are the offset. It also returns the offsets by reference. 1056 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1057 int64_t &Offset1, int64_t &Offset2) const { 1058 return false; 1059 } 1060 1061 /// This is a used by the pre-regalloc scheduler to determine (in conjunction 1062 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. 1063 /// On some targets if two loads are loading from 1064 /// addresses in the same cache line, it's better if they are scheduled 1065 /// together. This function takes two integers that represent the load offsets 1066 /// from the common base address. It returns true if it decides it's desirable 1067 /// to schedule the two loads together. "NumLoads" is the number of loads that 1068 /// have already been scheduled after Load1. 1069 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1070 int64_t Offset1, int64_t Offset2, 1071 unsigned NumLoads) const { 1072 return false; 1073 } 1074 1075 /// Get the base register and byte offset of an instruction that reads/writes 1076 /// memory. 1077 virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg, 1078 int64_t &Offset, 1079 const TargetRegisterInfo *TRI) const { 1080 return false; 1081 } 1082 1083 /// Return true if the instruction contains a base register and offset. If 1084 /// true, the function also sets the operand position in the instruction 1085 /// for the base register and offset. 1086 virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, 1087 unsigned &BasePos, 1088 unsigned &OffsetPos) const { 1089 return false; 1090 } 1091 1092 /// If the instruction is an increment of a constant value, return the amount. 1093 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { 1094 return false; 1095 } 1096 1097 /// Returns true if the two given memory operations should be scheduled 1098 /// adjacent. Note that you have to add: 1099 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 1100 /// or 1101 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 1102 /// to TargetPassConfig::createMachineScheduler() to have an effect. 1103 virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, 1104 MachineInstr &SecondLdSt, 1105 unsigned NumLoads) const { 1106 llvm_unreachable("target did not implement shouldClusterMemOps()"); 1107 } 1108 1109 /// Reverses the branch condition of the specified condition list, 1110 /// returning false on success and true if it cannot be reversed. 1111 virtual 1112 bool reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 1113 return true; 1114 } 1115 1116 /// Insert a noop into the instruction stream at the specified point. 1117 virtual void insertNoop(MachineBasicBlock &MBB, 1118 MachineBasicBlock::iterator MI) const; 1119 1120 1121 /// Return the noop instruction to use for a noop. 1122 virtual void getNoop(MCInst &NopInst) const; 1123 1124 /// Return true for post-incremented instructions. 1125 virtual bool isPostIncrement(const MachineInstr &MI) const { 1126 return false; 1127 } 1128 1129 /// Returns true if the instruction is already predicated. 1130 virtual bool isPredicated(const MachineInstr &MI) const { 1131 return false; 1132 } 1133 1134 /// Returns true if the instruction is a 1135 /// terminator instruction that has not been predicated. 1136 virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const; 1137 1138 /// Returns true if MI is an unconditional tail call. 1139 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { 1140 return false; 1141 } 1142 1143 /// Returns true if the tail call can be made conditional on BranchCond. 1144 virtual bool 1145 canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, 1146 const MachineInstr &TailCall) const { 1147 return false; 1148 } 1149 1150 /// Replace the conditional branch in MBB with a conditional tail call. 1151 virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, 1152 SmallVectorImpl<MachineOperand> &Cond, 1153 const MachineInstr &TailCall) const { 1154 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!"); 1155 } 1156 1157 /// Convert the instruction into a predicated instruction. 1158 /// It returns true if the operation was successful. 1159 virtual bool PredicateInstruction(MachineInstr &MI, 1160 ArrayRef<MachineOperand> Pred) const; 1161 1162 /// Returns true if the first specified predicate 1163 /// subsumes the second, e.g. GE subsumes GT. 1164 virtual 1165 bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 1166 ArrayRef<MachineOperand> Pred2) const { 1167 return false; 1168 } 1169 1170 /// If the specified instruction defines any predicate 1171 /// or condition code register(s) used for predication, returns true as well 1172 /// as the definition predicate(s) by reference. 1173 virtual bool DefinesPredicate(MachineInstr &MI, 1174 std::vector<MachineOperand> &Pred) const { 1175 return false; 1176 } 1177 1178 /// Return true if the specified instruction can be predicated. 1179 /// By default, this returns true for every instruction with a 1180 /// PredicateOperand. 1181 virtual bool isPredicable(const MachineInstr &MI) const { 1182 return MI.getDesc().isPredicable(); 1183 } 1184 1185 /// Return true if it's safe to move a machine 1186 /// instruction that defines the specified register class. 1187 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 1188 return true; 1189 } 1190 1191 /// Test if the given instruction should be considered a scheduling boundary. 1192 /// This primarily includes labels and terminators. 1193 virtual bool isSchedulingBoundary(const MachineInstr &MI, 1194 const MachineBasicBlock *MBB, 1195 const MachineFunction &MF) const; 1196 1197 /// Measure the specified inline asm to determine an approximation of its 1198 /// length. 1199 virtual unsigned getInlineAsmLength(const char *Str, 1200 const MCAsmInfo &MAI) const; 1201 1202 /// Allocate and return a hazard recognizer to use for this target when 1203 /// scheduling the machine instructions before register allocation. 1204 virtual ScheduleHazardRecognizer* 1205 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1206 const ScheduleDAG *DAG) const; 1207 1208 /// Allocate and return a hazard recognizer to use for this target when 1209 /// scheduling the machine instructions before register allocation. 1210 virtual ScheduleHazardRecognizer* 1211 CreateTargetMIHazardRecognizer(const InstrItineraryData*, 1212 const ScheduleDAG *DAG) const; 1213 1214 /// Allocate and return a hazard recognizer to use for this target when 1215 /// scheduling the machine instructions after register allocation. 1216 virtual ScheduleHazardRecognizer* 1217 CreateTargetPostRAHazardRecognizer(const InstrItineraryData*, 1218 const ScheduleDAG *DAG) const; 1219 1220 /// Allocate and return a hazard recognizer to use for by non-scheduling 1221 /// passes. 1222 virtual ScheduleHazardRecognizer* 1223 CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 1224 return nullptr; 1225 } 1226 1227 /// Provide a global flag for disabling the PreRA hazard recognizer that 1228 /// targets may choose to honor. 1229 bool usePreRAHazardRecognizer() const; 1230 1231 /// For a comparison instruction, return the source registers 1232 /// in SrcReg and SrcReg2 if having two register operands, and the value it 1233 /// compares against in CmpValue. Return true if the comparison instruction 1234 /// can be analyzed. 1235 virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 1236 unsigned &SrcReg2, int &Mask, int &Value) const { 1237 return false; 1238 } 1239 1240 /// See if the comparison instruction can be converted 1241 /// into something more efficient. E.g., on ARM most instructions can set the 1242 /// flags register, obviating the need for a separate CMP. 1243 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, 1244 unsigned SrcReg2, int Mask, int Value, 1245 const MachineRegisterInfo *MRI) const { 1246 return false; 1247 } 1248 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } 1249 1250 /// Try to remove the load by folding it to a register operand at the use. 1251 /// We fold the load instructions if and only if the 1252 /// def and use are in the same BB. We only look at one load and see 1253 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register 1254 /// defined by the load we are trying to fold. DefMI returns the machine 1255 /// instruction that defines FoldAsLoadDefReg, and the function returns 1256 /// the machine instruction generated due to folding. 1257 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, 1258 const MachineRegisterInfo *MRI, 1259 unsigned &FoldAsLoadDefReg, 1260 MachineInstr *&DefMI) const { 1261 return nullptr; 1262 } 1263 1264 /// 'Reg' is known to be defined by a move immediate instruction, 1265 /// try to fold the immediate into the use instruction. 1266 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, 1267 /// then the caller may assume that DefMI has been erased from its parent 1268 /// block. The caller may assume that it will not be erased by this 1269 /// function otherwise. 1270 virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1271 unsigned Reg, MachineRegisterInfo *MRI) const { 1272 return false; 1273 } 1274 1275 /// Return the number of u-operations the given machine 1276 /// instruction will be decoded to on the target cpu. The itinerary's 1277 /// IssueWidth is the number of microops that can be dispatched each 1278 /// cycle. An instruction with zero microops takes no dispatch resources. 1279 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 1280 const MachineInstr &MI) const; 1281 1282 /// Return true for pseudo instructions that don't consume any 1283 /// machine resources in their current form. These are common cases that the 1284 /// scheduler should consider free, rather than conservatively handling them 1285 /// as instructions with no itinerary. 1286 bool isZeroCost(unsigned Opcode) const { 1287 return Opcode <= TargetOpcode::COPY; 1288 } 1289 1290 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1291 SDNode *DefNode, unsigned DefIdx, 1292 SDNode *UseNode, unsigned UseIdx) const; 1293 1294 /// Compute and return the use operand latency of a given pair of def and use. 1295 /// In most cases, the static scheduling itinerary was enough to determine the 1296 /// operand latency. But it may not be possible for instructions with variable 1297 /// number of defs / uses. 1298 /// 1299 /// This is a raw interface to the itinerary that may be directly overridden 1300 /// by a target. Use computeOperandLatency to get the best estimate of 1301 /// latency. 1302 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1303 const MachineInstr &DefMI, unsigned DefIdx, 1304 const MachineInstr &UseMI, 1305 unsigned UseIdx) const; 1306 1307 /// Compute the instruction latency of a given instruction. 1308 /// If the instruction has higher cost when predicated, it's returned via 1309 /// PredCost. 1310 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 1311 const MachineInstr &MI, 1312 unsigned *PredCost = nullptr) const; 1313 1314 virtual unsigned getPredicationCost(const MachineInstr &MI) const; 1315 1316 virtual int getInstrLatency(const InstrItineraryData *ItinData, 1317 SDNode *Node) const; 1318 1319 /// Return the default expected latency for a def based on its opcode. 1320 unsigned defaultDefLatency(const MCSchedModel &SchedModel, 1321 const MachineInstr &DefMI) const; 1322 1323 int computeDefOperandLatency(const InstrItineraryData *ItinData, 1324 const MachineInstr &DefMI) const; 1325 1326 /// Return true if this opcode has high latency to its result. 1327 virtual bool isHighLatencyDef(int opc) const { return false; } 1328 1329 /// Compute operand latency between a def of 'Reg' 1330 /// and a use in the current loop. Return true if the target considered 1331 /// it 'high'. This is used by optimization passes such as machine LICM to 1332 /// determine whether it makes sense to hoist an instruction out even in a 1333 /// high register pressure situation. 1334 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, 1335 const MachineRegisterInfo *MRI, 1336 const MachineInstr &DefMI, unsigned DefIdx, 1337 const MachineInstr &UseMI, 1338 unsigned UseIdx) const { 1339 return false; 1340 } 1341 1342 /// Compute operand latency of a def of 'Reg'. Return true 1343 /// if the target considered it 'low'. 1344 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, 1345 const MachineInstr &DefMI, 1346 unsigned DefIdx) const; 1347 1348 /// Perform target-specific instruction verification. 1349 virtual bool verifyInstruction(const MachineInstr &MI, 1350 StringRef &ErrInfo) const { 1351 return true; 1352 } 1353 1354 /// Return the current execution domain and bit mask of 1355 /// possible domains for instruction. 1356 /// 1357 /// Some micro-architectures have multiple execution domains, and multiple 1358 /// opcodes that perform the same operation in different domains. For 1359 /// example, the x86 architecture provides the por, orps, and orpd 1360 /// instructions that all do the same thing. There is a latency penalty if a 1361 /// register is written in one domain and read in another. 1362 /// 1363 /// This function returns a pair (domain, mask) containing the execution 1364 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain 1365 /// function can be used to change the opcode to one of the domains in the 1366 /// bit mask. Instructions whose execution domain can't be changed should 1367 /// return a 0 mask. 1368 /// 1369 /// The execution domain numbers don't have any special meaning except domain 1370 /// 0 is used for instructions that are not associated with any interesting 1371 /// execution domain. 1372 /// 1373 virtual std::pair<uint16_t, uint16_t> 1374 getExecutionDomain(const MachineInstr &MI) const { 1375 return std::make_pair(0, 0); 1376 } 1377 1378 /// Change the opcode of MI to execute in Domain. 1379 /// 1380 /// The bit (1 << Domain) must be set in the mask returned from 1381 /// getExecutionDomain(MI). 1382 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} 1383 1384 /// Returns the preferred minimum clearance 1385 /// before an instruction with an unwanted partial register update. 1386 /// 1387 /// Some instructions only write part of a register, and implicitly need to 1388 /// read the other parts of the register. This may cause unwanted stalls 1389 /// preventing otherwise unrelated instructions from executing in parallel in 1390 /// an out-of-order CPU. 1391 /// 1392 /// For example, the x86 instruction cvtsi2ss writes its result to bits 1393 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so 1394 /// the instruction needs to wait for the old value of the register to become 1395 /// available: 1396 /// 1397 /// addps %xmm1, %xmm0 1398 /// movaps %xmm0, (%rax) 1399 /// cvtsi2ss %rbx, %xmm0 1400 /// 1401 /// In the code above, the cvtsi2ss instruction needs to wait for the addps 1402 /// instruction before it can issue, even though the high bits of %xmm0 1403 /// probably aren't needed. 1404 /// 1405 /// This hook returns the preferred clearance before MI, measured in 1406 /// instructions. Other defs of MI's operand OpNum are avoided in the last N 1407 /// instructions before MI. It should only return a positive value for 1408 /// unwanted dependencies. If the old bits of the defined register have 1409 /// useful values, or if MI is determined to otherwise read the dependency, 1410 /// the hook should return 0. 1411 /// 1412 /// The unwanted dependency may be handled by: 1413 /// 1414 /// 1. Allocating the same register for an MI def and use. That makes the 1415 /// unwanted dependency identical to a required dependency. 1416 /// 1417 /// 2. Allocating a register for the def that has no defs in the previous N 1418 /// instructions. 1419 /// 1420 /// 3. Calling breakPartialRegDependency() with the same arguments. This 1421 /// allows the target to insert a dependency breaking instruction. 1422 /// 1423 virtual unsigned 1424 getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, 1425 const TargetRegisterInfo *TRI) const { 1426 // The default implementation returns 0 for no partial register dependency. 1427 return 0; 1428 } 1429 1430 /// \brief Return the minimum clearance before an instruction that reads an 1431 /// unused register. 1432 /// 1433 /// For example, AVX instructions may copy part of a register operand into 1434 /// the unused high bits of the destination register. 1435 /// 1436 /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14 1437 /// 1438 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a 1439 /// false dependence on any previous write to %xmm0. 1440 /// 1441 /// This hook works similarly to getPartialRegUpdateClearance, except that it 1442 /// does not take an operand index. Instead sets \p OpNum to the index of the 1443 /// unused register. 1444 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, 1445 const TargetRegisterInfo *TRI) const { 1446 // The default implementation returns 0 for no undef register dependency. 1447 return 0; 1448 } 1449 1450 /// Insert a dependency-breaking instruction 1451 /// before MI to eliminate an unwanted dependency on OpNum. 1452 /// 1453 /// If it wasn't possible to avoid a def in the last N instructions before MI 1454 /// (see getPartialRegUpdateClearance), this hook will be called to break the 1455 /// unwanted dependency. 1456 /// 1457 /// On x86, an xorps instruction can be used as a dependency breaker: 1458 /// 1459 /// addps %xmm1, %xmm0 1460 /// movaps %xmm0, (%rax) 1461 /// xorps %xmm0, %xmm0 1462 /// cvtsi2ss %rbx, %xmm0 1463 /// 1464 /// An <imp-kill> operand should be added to MI if an instruction was 1465 /// inserted. This ties the instructions together in the post-ra scheduler. 1466 /// 1467 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, 1468 const TargetRegisterInfo *TRI) const {} 1469 1470 /// Create machine specific model for scheduling. 1471 virtual DFAPacketizer * 1472 CreateTargetScheduleState(const TargetSubtargetInfo &) const { 1473 return nullptr; 1474 } 1475 1476 /// Sometimes, it is possible for the target 1477 /// to tell, even without aliasing information, that two MIs access different 1478 /// memory addresses. This function returns true if two MIs access different 1479 /// memory addresses and false otherwise. 1480 /// 1481 /// Assumes any physical registers used to compute addresses have the same 1482 /// value for both instructions. (This is the most useful assumption for 1483 /// post-RA scheduling.) 1484 /// 1485 /// See also MachineInstr::mayAlias, which is implemented on top of this 1486 /// function. 1487 virtual bool 1488 areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, 1489 AliasAnalysis *AA = nullptr) const { 1490 assert((MIa.mayLoad() || MIa.mayStore()) && 1491 "MIa must load from or modify a memory location"); 1492 assert((MIb.mayLoad() || MIb.mayStore()) && 1493 "MIb must load from or modify a memory location"); 1494 return false; 1495 } 1496 1497 /// \brief Return the value to use for the MachineCSE's LookAheadLimit, 1498 /// which is a heuristic used for CSE'ing phys reg defs. 1499 virtual unsigned getMachineCSELookAheadLimit () const { 1500 // The default lookahead is small to prevent unprofitable quadratic 1501 // behavior. 1502 return 5; 1503 } 1504 1505 /// Return an array that contains the ids of the target indices (used for the 1506 /// TargetIndex machine operand) and their names. 1507 /// 1508 /// MIR Serialization is able to serialize only the target indices that are 1509 /// defined by this method. 1510 virtual ArrayRef<std::pair<int, const char *>> 1511 getSerializableTargetIndices() const { 1512 return None; 1513 } 1514 1515 /// Decompose the machine operand's target flags into two values - the direct 1516 /// target flag value and any of bit flags that are applied. 1517 virtual std::pair<unsigned, unsigned> 1518 decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { 1519 return std::make_pair(0u, 0u); 1520 } 1521 1522 /// Return an array that contains the direct target flag values and their 1523 /// names. 1524 /// 1525 /// MIR Serialization is able to serialize only the target flags that are 1526 /// defined by this method. 1527 virtual ArrayRef<std::pair<unsigned, const char *>> 1528 getSerializableDirectMachineOperandTargetFlags() const { 1529 return None; 1530 } 1531 1532 /// Return an array that contains the bitmask target flag values and their 1533 /// names. 1534 /// 1535 /// MIR Serialization is able to serialize only the target flags that are 1536 /// defined by this method. 1537 virtual ArrayRef<std::pair<unsigned, const char *>> 1538 getSerializableBitmaskMachineOperandTargetFlags() const { 1539 return None; 1540 } 1541 1542 /// Determines whether \p Inst is a tail call instruction. Override this 1543 /// method on targets that do not properly set MCID::Return and MCID::Call on 1544 /// tail call instructions." 1545 virtual bool isTailCall(const MachineInstr &Inst) const { 1546 return Inst.isReturn() && Inst.isCall(); 1547 } 1548 1549 /// True if the instruction is bound to the top of its basic block and no 1550 /// other instructions shall be inserted before it. This can be implemented 1551 /// to prevent register allocator to insert spills before such instructions. 1552 virtual bool isBasicBlockPrologue(const MachineInstr &MI) const { 1553 return false; 1554 } 1555 1556 /// \brief Return how many instructions would be saved by outlining a 1557 /// sequence containing \p SequenceSize instructions that appears 1558 /// \p Occurrences times in a module. 1559 virtual unsigned getOutliningBenefit(size_t SequenceSize, size_t Occurrences, 1560 bool CanBeTailCall) const { 1561 llvm_unreachable( 1562 "Target didn't implement TargetInstrInfo::getOutliningBenefit!"); 1563 } 1564 1565 /// Represents how an instruction should be mapped by the outliner. 1566 /// \p Legal instructions are those which are safe to outline. 1567 /// \p Illegal instructions are those which cannot be outlined. 1568 /// \p Invisible instructions are instructions which can be outlined, but 1569 /// shouldn't actually impact the outlining result. 1570 enum MachineOutlinerInstrType {Legal, Illegal, Invisible}; 1571 1572 /// Returns how or if \p MI should be outlined. 1573 virtual MachineOutlinerInstrType getOutliningType(MachineInstr &MI) const { 1574 llvm_unreachable( 1575 "Target didn't implement TargetInstrInfo::getOutliningType!"); 1576 } 1577 1578 /// Insert a custom epilogue for outlined functions. 1579 /// This may be empty, in which case no epilogue or return statement will be 1580 /// emitted. 1581 virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB, 1582 MachineFunction &MF, 1583 bool IsTailCall) const { 1584 llvm_unreachable( 1585 "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!"); 1586 } 1587 1588 /// Insert a call to an outlined function into the program. 1589 /// Returns an iterator to the spot where we inserted the call. This must be 1590 /// implemented by the target. 1591 virtual MachineBasicBlock::iterator 1592 insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 1593 MachineBasicBlock::iterator &It, MachineFunction &MF, 1594 bool IsTailCall) const { 1595 llvm_unreachable( 1596 "Target didn't implement TargetInstrInfo::insertOutlinedCall!"); 1597 } 1598 1599 /// Insert a custom prologue for outlined functions. 1600 /// This may be empty, in which case no prologue will be emitted. 1601 virtual void insertOutlinerPrologue(MachineBasicBlock &MBB, 1602 MachineFunction &MF, 1603 bool IsTailCall) const { 1604 llvm_unreachable( 1605 "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!"); 1606 } 1607 1608 /// Return true if the function can safely be outlined from. 1609 /// By default, this means that the function has no red zone. 1610 virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF) const { 1611 llvm_unreachable("Target didn't implement " 1612 "TargetInstrInfo::isFunctionSafeToOutlineFrom!"); 1613 } 1614 1615private: 1616 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; 1617 unsigned CatchRetOpcode; 1618 unsigned ReturnOpcode; 1619}; 1620 1621/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. 1622template<> 1623struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> { 1624 typedef DenseMapInfo<unsigned> RegInfo; 1625 1626 static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { 1627 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), 1628 RegInfo::getEmptyKey()); 1629 } 1630 static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { 1631 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), 1632 RegInfo::getTombstoneKey()); 1633 } 1634 /// \brief Reuse getHashValue implementation from 1635 /// std::pair<unsigned, unsigned>. 1636 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { 1637 std::pair<unsigned, unsigned> PairVal = 1638 std::make_pair(Val.Reg, Val.SubReg); 1639 return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal); 1640 } 1641 static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, 1642 const TargetInstrInfo::RegSubRegPair &RHS) { 1643 return RegInfo::isEqual(LHS.Reg, RHS.Reg) && 1644 RegInfo::isEqual(LHS.SubReg, RHS.SubReg); 1645 } 1646}; 1647 1648} // end namespace llvm 1649 1650#endif // LLVM_TARGET_TARGETINSTRINFO_H 1651