TargetInstrInfo.h revision e1b53287179b4b9b5c3c549586f688d3fa2ae8ef
1//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes the target machine instruction set to the code generator. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef LLVM_TARGET_TARGETINSTRINFO_H 15#define LLVM_TARGET_TARGETINSTRINFO_H 16 17#include "llvm/ADT/SmallSet.h" 18#include "llvm/MC/MCInstrInfo.h" 19#include "llvm/CodeGen/DFAPacketizer.h" 20#include "llvm/CodeGen/MachineFunction.h" 21 22namespace llvm { 23 24class InstrItineraryData; 25class LiveVariables; 26class MCAsmInfo; 27class MachineMemOperand; 28class MachineRegisterInfo; 29class MDNode; 30class MCInst; 31class MCSchedModel; 32class SDNode; 33class ScheduleHazardRecognizer; 34class SelectionDAG; 35class ScheduleDAG; 36class TargetRegisterClass; 37class TargetRegisterInfo; 38class BranchProbability; 39 40template<class T> class SmallVectorImpl; 41 42 43//--------------------------------------------------------------------------- 44/// 45/// TargetInstrInfo - Interface to description of machine instruction set 46/// 47class TargetInstrInfo : public MCInstrInfo { 48 TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION; 49 void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION; 50public: 51 TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1) 52 : CallFrameSetupOpcode(CFSetupOpcode), 53 CallFrameDestroyOpcode(CFDestroyOpcode) { 54 } 55 56 virtual ~TargetInstrInfo(); 57 58 /// getRegClass - Givem a machine instruction descriptor, returns the register 59 /// class constraint for OpNum, or NULL. 60 const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, 61 unsigned OpNum, 62 const TargetRegisterInfo *TRI, 63 const MachineFunction &MF) const; 64 65 /// isTriviallyReMaterializable - Return true if the instruction is trivially 66 /// rematerializable, meaning it has no side effects and requires no operands 67 /// that aren't always available. 68 bool isTriviallyReMaterializable(const MachineInstr *MI, 69 AliasAnalysis *AA = 0) const { 70 return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF || 71 (MI->getDesc().isRematerializable() && 72 (isReallyTriviallyReMaterializable(MI, AA) || 73 isReallyTriviallyReMaterializableGeneric(MI, AA))); 74 } 75 76protected: 77 /// isReallyTriviallyReMaterializable - For instructions with opcodes for 78 /// which the M_REMATERIALIZABLE flag is set, this hook lets the target 79 /// specify whether the instruction is actually trivially rematerializable, 80 /// taking into consideration its operands. This predicate must return false 81 /// if the instruction has any side effects other than producing a value, or 82 /// if it requres any address registers that are not always available. 83 virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI, 84 AliasAnalysis *AA) const { 85 return false; 86 } 87 88private: 89 /// isReallyTriviallyReMaterializableGeneric - For instructions with opcodes 90 /// for which the M_REMATERIALIZABLE flag is set and the target hook 91 /// isReallyTriviallyReMaterializable returns false, this function does 92 /// target-independent tests to determine if the instruction is really 93 /// trivially rematerializable. 94 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, 95 AliasAnalysis *AA) const; 96 97public: 98 /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the 99 /// frame setup/destroy instructions if they exist (-1 otherwise). Some 100 /// targets use pseudo instructions in order to abstract away the difference 101 /// between operating with a frame pointer and operating without, through the 102 /// use of these two instructions. 103 /// 104 int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } 105 int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } 106 107 /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" 108 /// extension instruction. That is, it's like a copy where it's legal for the 109 /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns 110 /// true, then it's expected the pre-extension value is available as a subreg 111 /// of the result register. This also returns the sub-register index in 112 /// SubIdx. 113 virtual bool isCoalescableExtInstr(const MachineInstr &MI, 114 unsigned &SrcReg, unsigned &DstReg, 115 unsigned &SubIdx) const { 116 return false; 117 } 118 119 /// isLoadFromStackSlot - If the specified machine instruction is a direct 120 /// load from a stack slot, return the virtual or physical register number of 121 /// the destination along with the FrameIndex of the loaded stack slot. If 122 /// not, return 0. This predicate must return 0 if the instruction has 123 /// any side effects other than loading from the stack slot. 124 virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, 125 int &FrameIndex) const { 126 return 0; 127 } 128 129 /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination 130 /// stack locations as well. This uses a heuristic so it isn't 131 /// reliable for correctness. 132 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, 133 int &FrameIndex) const { 134 return 0; 135 } 136 137 /// hasLoadFromStackSlot - If the specified machine instruction has 138 /// a load from a stack slot, return true along with the FrameIndex 139 /// of the loaded stack slot and the machine mem operand containing 140 /// the reference. If not, return false. Unlike 141 /// isLoadFromStackSlot, this returns true for any instructions that 142 /// loads from the stack. This is just a hint, as some cases may be 143 /// missed. 144 virtual bool hasLoadFromStackSlot(const MachineInstr *MI, 145 const MachineMemOperand *&MMO, 146 int &FrameIndex) const { 147 return 0; 148 } 149 150 /// isStoreToStackSlot - If the specified machine instruction is a direct 151 /// store to a stack slot, return the virtual or physical register number of 152 /// the source reg along with the FrameIndex of the loaded stack slot. If 153 /// not, return 0. This predicate must return 0 if the instruction has 154 /// any side effects other than storing to the stack slot. 155 virtual unsigned isStoreToStackSlot(const MachineInstr *MI, 156 int &FrameIndex) const { 157 return 0; 158 } 159 160 /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination 161 /// stack locations as well. This uses a heuristic so it isn't 162 /// reliable for correctness. 163 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI, 164 int &FrameIndex) const { 165 return 0; 166 } 167 168 /// hasStoreToStackSlot - If the specified machine instruction has a 169 /// store to a stack slot, return true along with the FrameIndex of 170 /// the loaded stack slot and the machine mem operand containing the 171 /// reference. If not, return false. Unlike isStoreToStackSlot, 172 /// this returns true for any instructions that stores to the 173 /// stack. This is just a hint, as some cases may be missed. 174 virtual bool hasStoreToStackSlot(const MachineInstr *MI, 175 const MachineMemOperand *&MMO, 176 int &FrameIndex) const { 177 return 0; 178 } 179 180 /// reMaterialize - Re-issue the specified 'original' instruction at the 181 /// specific location targeting a new destination register. 182 /// The register in Orig->getOperand(0).getReg() will be substituted by 183 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with 184 /// SubIdx. 185 virtual void reMaterialize(MachineBasicBlock &MBB, 186 MachineBasicBlock::iterator MI, 187 unsigned DestReg, unsigned SubIdx, 188 const MachineInstr *Orig, 189 const TargetRegisterInfo &TRI) const = 0; 190 191 /// duplicate - Create a duplicate of the Orig instruction in MF. This is like 192 /// MachineFunction::CloneMachineInstr(), but the target may update operands 193 /// that are required to be unique. 194 /// 195 /// The instruction must be duplicable as indicated by isNotDuplicable(). 196 virtual MachineInstr *duplicate(MachineInstr *Orig, 197 MachineFunction &MF) const = 0; 198 199 /// convertToThreeAddress - This method must be implemented by targets that 200 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 201 /// may be able to convert a two-address instruction into one or more true 202 /// three-address instructions on demand. This allows the X86 target (for 203 /// example) to convert ADD and SHL instructions into LEA instructions if they 204 /// would require register copies due to two-addressness. 205 /// 206 /// This method returns a null pointer if the transformation cannot be 207 /// performed, otherwise it returns the last new instruction. 208 /// 209 virtual MachineInstr * 210 convertToThreeAddress(MachineFunction::iterator &MFI, 211 MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const { 212 return 0; 213 } 214 215 /// commuteInstruction - If a target has any instructions that are 216 /// commutable but require converting to different instructions or making 217 /// non-trivial changes to commute them, this method can overloaded to do 218 /// that. The default implementation simply swaps the commutable operands. 219 /// If NewMI is false, MI is modified in place and returned; otherwise, a 220 /// new machine instruction is created and returned. Do not call this 221 /// method for a non-commutable instruction, but there may be some cases 222 /// where this method fails and returns null. 223 virtual MachineInstr *commuteInstruction(MachineInstr *MI, 224 bool NewMI = false) const = 0; 225 226 /// findCommutedOpIndices - If specified MI is commutable, return the two 227 /// operand indices that would swap value. Return false if the instruction 228 /// is not in a form which this routine understands. 229 virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, 230 unsigned &SrcOpIdx2) const = 0; 231 232 /// produceSameValue - Return true if two machine instructions would produce 233 /// identical values. By default, this is only true when the two instructions 234 /// are deemed identical except for defs. If this function is called when the 235 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for 236 /// aggressive checks. 237 virtual bool produceSameValue(const MachineInstr *MI0, 238 const MachineInstr *MI1, 239 const MachineRegisterInfo *MRI = 0) const = 0; 240 241 /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning 242 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't 243 /// implemented for a target). Upon success, this returns false and returns 244 /// with the following information in various cases: 245 /// 246 /// 1. If this block ends with no branches (it just falls through to its succ) 247 /// just return false, leaving TBB/FBB null. 248 /// 2. If this block ends with only an unconditional branch, it sets TBB to be 249 /// the destination block. 250 /// 3. If this block ends with a conditional branch and it falls through to a 251 /// successor block, it sets TBB to be the branch destination block and a 252 /// list of operands that evaluate the condition. These operands can be 253 /// passed to other TargetInstrInfo methods to create new branches. 254 /// 4. If this block ends with a conditional branch followed by an 255 /// unconditional branch, it returns the 'true' destination in TBB, the 256 /// 'false' destination in FBB, and a list of operands that evaluate the 257 /// condition. These operands can be passed to other TargetInstrInfo 258 /// methods to create new branches. 259 /// 260 /// Note that RemoveBranch and InsertBranch must be implemented to support 261 /// cases where this method returns success. 262 /// 263 /// If AllowModify is true, then this routine is allowed to modify the basic 264 /// block (e.g. delete instructions after the unconditional branch). 265 /// 266 virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 267 MachineBasicBlock *&FBB, 268 SmallVectorImpl<MachineOperand> &Cond, 269 bool AllowModify = false) const { 270 return true; 271 } 272 273 /// RemoveBranch - Remove the branching code at the end of the specific MBB. 274 /// This is only invoked in cases where AnalyzeBranch returns success. It 275 /// returns the number of instructions that were removed. 276 virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const { 277 llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!"); 278 } 279 280 /// InsertBranch - Insert branch code into the end of the specified 281 /// MachineBasicBlock. The operands to this method are the same as those 282 /// returned by AnalyzeBranch. This is only invoked in cases where 283 /// AnalyzeBranch returns success. It returns the number of instructions 284 /// inserted. 285 /// 286 /// It is also invoked by tail merging to add unconditional branches in 287 /// cases where AnalyzeBranch doesn't apply because there was no original 288 /// branch to analyze. At least this much must be implemented, else tail 289 /// merging needs to be disabled. 290 virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 291 MachineBasicBlock *FBB, 292 const SmallVectorImpl<MachineOperand> &Cond, 293 DebugLoc DL) const { 294 llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!"); 295 } 296 297 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 298 /// after it, replacing it with an unconditional branch to NewDest. This is 299 /// used by the tail merging pass. 300 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 301 MachineBasicBlock *NewDest) const = 0; 302 303 /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic 304 /// block at the specified instruction (i.e. instruction would be the start 305 /// of a new basic block). 306 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, 307 MachineBasicBlock::iterator MBBI) const { 308 return true; 309 } 310 311 /// isProfitableToIfCvt - Return true if it's profitable to predicate 312 /// instructions with accumulated instruction latency of "NumCycles" 313 /// of the specified basic block, where the probability of the instructions 314 /// being executed is given by Probability, and Confidence is a measure 315 /// of our confidence that it will be properly predicted. 316 virtual 317 bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 318 unsigned ExtraPredCycles, 319 const BranchProbability &Probability) const { 320 return false; 321 } 322 323 /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one 324 /// checks for the case where two basic blocks from true and false path 325 /// of a if-then-else (diamond) are predicated on mutally exclusive 326 /// predicates, where the probability of the true path being taken is given 327 /// by Probability, and Confidence is a measure of our confidence that it 328 /// will be properly predicted. 329 virtual bool 330 isProfitableToIfCvt(MachineBasicBlock &TMBB, 331 unsigned NumTCycles, unsigned ExtraTCycles, 332 MachineBasicBlock &FMBB, 333 unsigned NumFCycles, unsigned ExtraFCycles, 334 const BranchProbability &Probability) const { 335 return false; 336 } 337 338 /// isProfitableToDupForIfCvt - Return true if it's profitable for 339 /// if-converter to duplicate instructions of specified accumulated 340 /// instruction latencies in the specified MBB to enable if-conversion. 341 /// The probability of the instructions being executed is given by 342 /// Probability, and Confidence is a measure of our confidence that it 343 /// will be properly predicted. 344 virtual bool 345 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 346 const BranchProbability &Probability) const { 347 return false; 348 } 349 350 /// isProfitableToUnpredicate - Return true if it's profitable to unpredicate 351 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually 352 /// exclusive predicates. 353 /// e.g. 354 /// subeq r0, r1, #1 355 /// addne r0, r1, #1 356 /// => 357 /// sub r0, r1, #1 358 /// addne r0, r1, #1 359 /// 360 /// This may be profitable is conditional instructions are always executed. 361 virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, 362 MachineBasicBlock &FMBB) const { 363 return false; 364 } 365 366 /// canInsertSelect - Return true if it is possible to insert a select 367 /// instruction that chooses between TrueReg and FalseReg based on the 368 /// condition code in Cond. 369 /// 370 /// When successful, also return the latency in cycles from TrueReg, 371 /// FalseReg, and Cond to the destination register. The Cond latency should 372 /// compensate for a conditional branch being removed. For example, if a 373 /// conditional branch has a 3 cycle latency from the condition code read, 374 /// and a cmov instruction has a 2 cycle latency from the condition code 375 /// read, CondCycles should be returned as -1. 376 /// 377 /// @param MBB Block where select instruction would be inserted. 378 /// @param Cond Condition returned by AnalyzeBranch. 379 /// @param TrueReg Virtual register to select when Cond is true. 380 /// @param FalseReg Virtual register to select when Cond is false. 381 /// @param CondCycles Latency from Cond+Branch to select output. 382 /// @param TrueCycles Latency from TrueReg to select output. 383 /// @param FalseCycles Latency from FalseReg to select output. 384 virtual bool canInsertSelect(const MachineBasicBlock &MBB, 385 const SmallVectorImpl<MachineOperand> &Cond, 386 unsigned TrueReg, unsigned FalseReg, 387 int &CondCycles, 388 int &TrueCycles, int &FalseCycles) const { 389 return false; 390 } 391 392 /// insertSelect - Insert a select instruction into MBB before I that will 393 /// copy TrueReg to DstReg when Cond is true, and FalseReg to DstReg when 394 /// Cond is false. 395 /// 396 /// This function can only be called after canInsertSelect() returned true. 397 /// The condition in Cond comes from AnalyzeBranch, and it can be assumed 398 /// that the same flags or registers required by Cond are available at the 399 /// insertion point. 400 /// 401 /// @param MBB Block where select instruction should be inserted. 402 /// @param I Insertion point. 403 /// @param DL Source location for debugging. 404 /// @param DstReg Virtual register to be defined by select instruction. 405 /// @param Cond Condition as computed by AnalyzeBranch. 406 /// @param TrueReg Virtual register to copy when Cond is true. 407 /// @param FalseReg Virtual register to copy when Cons is false. 408 virtual void insertSelect(MachineBasicBlock &MBB, 409 MachineBasicBlock::iterator I, DebugLoc DL, 410 unsigned DstReg, 411 const SmallVectorImpl<MachineOperand> &Cond, 412 unsigned TrueReg, unsigned FalseReg) const { 413 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); 414 } 415 416 /// analyzeSelect - Analyze the given select instruction, returning true if 417 /// it cannot be understood. It is assumed that MI->isSelect() is true. 418 /// 419 /// When successful, return the controlling condition and the operands that 420 /// determine the true and false result values. 421 /// 422 /// Result = SELECT Cond, TrueOp, FalseOp 423 /// 424 /// Some targets can optimize select instructions, for example by predicating 425 /// the instruction defining one of the operands. Such targets should set 426 /// Optimizable. 427 /// 428 /// @param MI Select instruction to analyze. 429 /// @param Cond Condition controlling the select. 430 /// @param TrueOp Operand number of the value selected when Cond is true. 431 /// @param FalseOp Operand number of the value selected when Cond is false. 432 /// @param Optimizable Returned as true if MI is optimizable. 433 /// @returns False on success. 434 virtual bool analyzeSelect(const MachineInstr *MI, 435 SmallVectorImpl<MachineOperand> &Cond, 436 unsigned &TrueOp, unsigned &FalseOp, 437 bool &Optimizable) const { 438 assert(MI && MI->isSelect() && "MI must be a select instruction"); 439 return true; 440 } 441 442 /// optimizeSelect - Given a select instruction that was understood by 443 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by 444 /// merging it with one of its operands. Returns NULL on failure. 445 /// 446 /// When successful, returns the new select instruction. The client is 447 /// responsible for deleting MI. 448 /// 449 /// If both sides of the select can be optimized, PreferFalse is used to pick 450 /// a side. 451 /// 452 /// @param MI Optimizable select instruction. 453 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. 454 /// @returns Optimized instruction or NULL. 455 virtual MachineInstr *optimizeSelect(MachineInstr *MI, 456 bool PreferFalse = false) const { 457 // This function must be implemented if Optimizable is ever set. 458 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); 459 } 460 461 /// copyPhysReg - Emit instructions to copy a pair of physical registers. 462 /// 463 /// This function should support copies within any legal register class as 464 /// well as any cross-class copies created during instruction selection. 465 /// 466 /// The source and destination registers may overlap, which may require a 467 /// careful implementation when multiple copy instructions are required for 468 /// large registers. See for example the ARM target. 469 virtual void copyPhysReg(MachineBasicBlock &MBB, 470 MachineBasicBlock::iterator MI, DebugLoc DL, 471 unsigned DestReg, unsigned SrcReg, 472 bool KillSrc) const { 473 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); 474 } 475 476 /// storeRegToStackSlot - Store the specified register of the given register 477 /// class to the specified stack frame index. The store instruction is to be 478 /// added to the given machine basic block before the specified machine 479 /// instruction. If isKill is true, the register operand is the last use and 480 /// must be marked kill. 481 virtual void storeRegToStackSlot(MachineBasicBlock &MBB, 482 MachineBasicBlock::iterator MI, 483 unsigned SrcReg, bool isKill, int FrameIndex, 484 const TargetRegisterClass *RC, 485 const TargetRegisterInfo *TRI) const { 486 llvm_unreachable("Target didn't implement " 487 "TargetInstrInfo::storeRegToStackSlot!"); 488 } 489 490 /// loadRegFromStackSlot - Load the specified register of the given register 491 /// class from the specified stack frame index. The load instruction is to be 492 /// added to the given machine basic block before the specified machine 493 /// instruction. 494 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, 495 MachineBasicBlock::iterator MI, 496 unsigned DestReg, int FrameIndex, 497 const TargetRegisterClass *RC, 498 const TargetRegisterInfo *TRI) const { 499 llvm_unreachable("Target didn't implement " 500 "TargetInstrInfo::loadRegFromStackSlot!"); 501 } 502 503 /// expandPostRAPseudo - This function is called for all pseudo instructions 504 /// that remain after register allocation. Many pseudo instructions are 505 /// created to help register allocation. This is the place to convert them 506 /// into real instructions. The target can edit MI in place, or it can insert 507 /// new instructions and erase MI. The function should return true if 508 /// anything was changed. 509 virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 510 return false; 511 } 512 513 /// emitFrameIndexDebugValue - Emit a target-dependent form of 514 /// DBG_VALUE encoding the address of a frame index. Addresses would 515 /// normally be lowered the same way as other addresses on the target, 516 /// e.g. in load instructions. For targets that do not support this 517 /// the debug info is simply lost. 518 /// If you add this for a target you should handle this DBG_VALUE in the 519 /// target-specific AsmPrinter code as well; you will probably get invalid 520 /// assembly output if you don't. 521 virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, 522 int FrameIx, 523 uint64_t Offset, 524 const MDNode *MDPtr, 525 DebugLoc dl) const { 526 return 0; 527 } 528 529 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack 530 /// slot into the specified machine instruction for the specified operand(s). 531 /// If this is possible, a new instruction is returned with the specified 532 /// operand folded, otherwise NULL is returned. 533 /// The new instruction is inserted before MI, and the client is responsible 534 /// for removing the old instruction. 535 MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI, 536 const SmallVectorImpl<unsigned> &Ops, 537 int FrameIndex) const; 538 539 /// foldMemoryOperand - Same as the previous version except it allows folding 540 /// of any load and store from / to any address, not just from a specific 541 /// stack slot. 542 MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI, 543 const SmallVectorImpl<unsigned> &Ops, 544 MachineInstr* LoadMI) const; 545 546protected: 547 /// foldMemoryOperandImpl - Target-dependent implementation for 548 /// foldMemoryOperand. Target-independent code in foldMemoryOperand will 549 /// take care of adding a MachineMemOperand to the newly created instruction. 550 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, 551 MachineInstr* MI, 552 const SmallVectorImpl<unsigned> &Ops, 553 int FrameIndex) const { 554 return 0; 555 } 556 557 /// foldMemoryOperandImpl - Target-dependent implementation for 558 /// foldMemoryOperand. Target-independent code in foldMemoryOperand will 559 /// take care of adding a MachineMemOperand to the newly created instruction. 560 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, 561 MachineInstr* MI, 562 const SmallVectorImpl<unsigned> &Ops, 563 MachineInstr* LoadMI) const { 564 return 0; 565 } 566 567public: 568 /// canFoldMemoryOperand - Returns true for the specified load / store if 569 /// folding is possible. 570 virtual 571 bool canFoldMemoryOperand(const MachineInstr *MI, 572 const SmallVectorImpl<unsigned> &Ops) const =0; 573 574 /// unfoldMemoryOperand - Separate a single instruction which folded a load or 575 /// a store or a load and a store into two or more instruction. If this is 576 /// possible, returns true as well as the new instructions by reference. 577 virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 578 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 579 SmallVectorImpl<MachineInstr*> &NewMIs) const{ 580 return false; 581 } 582 583 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 584 SmallVectorImpl<SDNode*> &NewNodes) const { 585 return false; 586 } 587 588 /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new 589 /// instruction after load / store are unfolded from an instruction of the 590 /// specified opcode. It returns zero if the specified unfolding is not 591 /// possible. If LoadRegIndex is non-null, it is filled in with the operand 592 /// index of the operand which will hold the register holding the loaded 593 /// value. 594 virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, 595 bool UnfoldLoad, bool UnfoldStore, 596 unsigned *LoadRegIndex = 0) const { 597 return 0; 598 } 599 600 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler 601 /// to determine if two loads are loading from the same base address. It 602 /// should only return true if the base pointers are the same and the 603 /// only differences between the two addresses are the offset. It also returns 604 /// the offsets by reference. 605 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 606 int64_t &Offset1, int64_t &Offset2) const { 607 return false; 608 } 609 610 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 611 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 612 /// be scheduled togther. On some targets if two loads are loading from 613 /// addresses in the same cache line, it's better if they are scheduled 614 /// together. This function takes two integers that represent the load offsets 615 /// from the common base address. It returns true if it decides it's desirable 616 /// to schedule the two loads together. "NumLoads" is the number of loads that 617 /// have already been scheduled after Load1. 618 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 619 int64_t Offset1, int64_t Offset2, 620 unsigned NumLoads) const { 621 return false; 622 } 623 624 /// ReverseBranchCondition - Reverses the branch condition of the specified 625 /// condition list, returning false on success and true if it cannot be 626 /// reversed. 627 virtual 628 bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 629 return true; 630 } 631 632 /// insertNoop - Insert a noop into the instruction stream at the specified 633 /// point. 634 virtual void insertNoop(MachineBasicBlock &MBB, 635 MachineBasicBlock::iterator MI) const; 636 637 638 /// getNoopForMachoTarget - Return the noop instruction to use for a noop. 639 virtual void getNoopForMachoTarget(MCInst &NopInst) const { 640 // Default to just using 'nop' string. 641 } 642 643 644 /// isPredicated - Returns true if the instruction is already predicated. 645 /// 646 virtual bool isPredicated(const MachineInstr *MI) const { 647 return false; 648 } 649 650 /// isUnpredicatedTerminator - Returns true if the instruction is a 651 /// terminator instruction that has not been predicated. 652 virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const = 0; 653 654 /// PredicateInstruction - Convert the instruction into a predicated 655 /// instruction. It returns true if the operation was successful. 656 virtual 657 bool PredicateInstruction(MachineInstr *MI, 658 const SmallVectorImpl<MachineOperand> &Pred) const = 0; 659 660 /// SubsumesPredicate - Returns true if the first specified predicate 661 /// subsumes the second, e.g. GE subsumes GT. 662 virtual 663 bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 664 const SmallVectorImpl<MachineOperand> &Pred2) const { 665 return false; 666 } 667 668 /// DefinesPredicate - If the specified instruction defines any predicate 669 /// or condition code register(s) used for predication, returns true as well 670 /// as the definition predicate(s) by reference. 671 virtual bool DefinesPredicate(MachineInstr *MI, 672 std::vector<MachineOperand> &Pred) const { 673 return false; 674 } 675 676 /// isPredicable - Return true if the specified instruction can be predicated. 677 /// By default, this returns true for every instruction with a 678 /// PredicateOperand. 679 virtual bool isPredicable(MachineInstr *MI) const { 680 return MI->getDesc().isPredicable(); 681 } 682 683 /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine 684 /// instruction that defines the specified register class. 685 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 686 return true; 687 } 688 689 /// isSchedulingBoundary - Test if the given instruction should be 690 /// considered a scheduling boundary. This primarily includes labels and 691 /// terminators. 692 virtual bool isSchedulingBoundary(const MachineInstr *MI, 693 const MachineBasicBlock *MBB, 694 const MachineFunction &MF) const = 0; 695 696 /// Measure the specified inline asm to determine an approximation of its 697 /// length. 698 virtual unsigned getInlineAsmLength(const char *Str, 699 const MCAsmInfo &MAI) const; 700 701 /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to 702 /// use for this target when scheduling the machine instructions before 703 /// register allocation. 704 virtual ScheduleHazardRecognizer* 705 CreateTargetHazardRecognizer(const TargetMachine *TM, 706 const ScheduleDAG *DAG) const = 0; 707 708 /// CreateTargetMIHazardRecognizer - Allocate and return a hazard recognizer 709 /// to use for this target when scheduling the machine instructions before 710 /// register allocation. 711 virtual ScheduleHazardRecognizer* 712 CreateTargetMIHazardRecognizer(const InstrItineraryData*, 713 const ScheduleDAG *DAG) const = 0; 714 715 /// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard 716 /// recognizer to use for this target when scheduling the machine instructions 717 /// after register allocation. 718 virtual ScheduleHazardRecognizer* 719 CreateTargetPostRAHazardRecognizer(const InstrItineraryData*, 720 const ScheduleDAG *DAG) const = 0; 721 722 /// analyzeCompare - For a comparison instruction, return the source registers 723 /// in SrcReg and SrcReg2 if having two register operands, and the value it 724 /// compares against in CmpValue. Return true if the comparison instruction 725 /// can be analyzed. 726 virtual bool analyzeCompare(const MachineInstr *MI, 727 unsigned &SrcReg, unsigned &SrcReg2, 728 int &Mask, int &Value) const { 729 return false; 730 } 731 732 /// optimizeCompareInstr - See if the comparison instruction can be converted 733 /// into something more efficient. E.g., on ARM most instructions can set the 734 /// flags register, obviating the need for a separate CMP. 735 virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, 736 unsigned SrcReg, unsigned SrcReg2, 737 int Mask, int Value, 738 const MachineRegisterInfo *MRI) const { 739 return false; 740 } 741 742 /// optimizeLoadInstr - Try to remove the load by folding it to a register 743 /// operand at the use. We fold the load instructions if and only if the 744 /// def and use are in the same BB. We only look at one load and see 745 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register 746 /// defined by the load we are trying to fold. DefMI returns the machine 747 /// instruction that defines FoldAsLoadDefReg, and the function returns 748 /// the machine instruction generated due to folding. 749 virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI, 750 const MachineRegisterInfo *MRI, 751 unsigned &FoldAsLoadDefReg, 752 MachineInstr *&DefMI) const { 753 return 0; 754 } 755 756 /// FoldImmediate - 'Reg' is known to be defined by a move immediate 757 /// instruction, try to fold the immediate into the use instruction. 758 virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 759 unsigned Reg, MachineRegisterInfo *MRI) const { 760 return false; 761 } 762 763 /// getNumMicroOps - Return the number of u-operations the given machine 764 /// instruction will be decoded to on the target cpu. The itinerary's 765 /// IssueWidth is the number of microops that can be dispatched each 766 /// cycle. An instruction with zero microops takes no dispatch resources. 767 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 768 const MachineInstr *MI) const = 0; 769 770 /// isZeroCost - Return true for pseudo instructions that don't consume any 771 /// machine resources in their current form. These are common cases that the 772 /// scheduler should consider free, rather than conservatively handling them 773 /// as instructions with no itinerary. 774 bool isZeroCost(unsigned Opcode) const { 775 return Opcode <= TargetOpcode::COPY; 776 } 777 778 virtual int getOperandLatency(const InstrItineraryData *ItinData, 779 SDNode *DefNode, unsigned DefIdx, 780 SDNode *UseNode, unsigned UseIdx) const = 0; 781 782 /// getOperandLatency - Compute and return the use operand latency of a given 783 /// pair of def and use. 784 /// In most cases, the static scheduling itinerary was enough to determine the 785 /// operand latency. But it may not be possible for instructions with variable 786 /// number of defs / uses. 787 /// 788 /// This is a raw interface to the itinerary that may be directly overriden by 789 /// a target. Use computeOperandLatency to get the best estimate of latency. 790 virtual int getOperandLatency(const InstrItineraryData *ItinData, 791 const MachineInstr *DefMI, unsigned DefIdx, 792 const MachineInstr *UseMI, 793 unsigned UseIdx) const = 0; 794 795 /// computeOperandLatency - Compute and return the latency of the given data 796 /// dependent def and use when the operand indices are already known. 797 /// 798 /// FindMin may be set to get the minimum vs. expected latency. 799 unsigned computeOperandLatency(const InstrItineraryData *ItinData, 800 const MachineInstr *DefMI, unsigned DefIdx, 801 const MachineInstr *UseMI, unsigned UseIdx, 802 bool FindMin = false) const; 803 804 /// getOutputLatency - Compute and return the output dependency latency of a 805 /// a given pair of defs which both target the same register. This is usually 806 /// one. 807 virtual unsigned getOutputLatency(const InstrItineraryData *ItinData, 808 const MachineInstr *DefMI, unsigned DefIdx, 809 const MachineInstr *DepMI) const { 810 return 1; 811 } 812 813 /// getInstrLatency - Compute the instruction latency of a given instruction. 814 /// If the instruction has higher cost when predicated, it's returned via 815 /// PredCost. 816 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 817 const MachineInstr *MI, 818 unsigned *PredCost = 0) const = 0; 819 820 virtual int getInstrLatency(const InstrItineraryData *ItinData, 821 SDNode *Node) const = 0; 822 823 /// Return the default expected latency for a def based on it's opcode. 824 unsigned defaultDefLatency(const MCSchedModel *SchedModel, 825 const MachineInstr *DefMI) const; 826 827 /// isHighLatencyDef - Return true if this opcode has high latency to its 828 /// result. 829 virtual bool isHighLatencyDef(int opc) const { return false; } 830 831 /// hasHighOperandLatency - Compute operand latency between a def of 'Reg' 832 /// and an use in the current loop, return true if the target considered 833 /// it 'high'. This is used by optimization passes such as machine LICM to 834 /// determine whether it makes sense to hoist an instruction out even in 835 /// high register pressure situation. 836 virtual 837 bool hasHighOperandLatency(const InstrItineraryData *ItinData, 838 const MachineRegisterInfo *MRI, 839 const MachineInstr *DefMI, unsigned DefIdx, 840 const MachineInstr *UseMI, unsigned UseIdx) const { 841 return false; 842 } 843 844 /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true 845 /// if the target considered it 'low'. 846 virtual 847 bool hasLowDefLatency(const InstrItineraryData *ItinData, 848 const MachineInstr *DefMI, unsigned DefIdx) const = 0; 849 850 /// verifyInstruction - Perform target specific instruction verification. 851 virtual 852 bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const { 853 return true; 854 } 855 856 /// getExecutionDomain - Return the current execution domain and bit mask of 857 /// possible domains for instruction. 858 /// 859 /// Some micro-architectures have multiple execution domains, and multiple 860 /// opcodes that perform the same operation in different domains. For 861 /// example, the x86 architecture provides the por, orps, and orpd 862 /// instructions that all do the same thing. There is a latency penalty if a 863 /// register is written in one domain and read in another. 864 /// 865 /// This function returns a pair (domain, mask) containing the execution 866 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain 867 /// function can be used to change the opcode to one of the domains in the 868 /// bit mask. Instructions whose execution domain can't be changed should 869 /// return a 0 mask. 870 /// 871 /// The execution domain numbers don't have any special meaning except domain 872 /// 0 is used for instructions that are not associated with any interesting 873 /// execution domain. 874 /// 875 virtual std::pair<uint16_t, uint16_t> 876 getExecutionDomain(const MachineInstr *MI) const { 877 return std::make_pair(0, 0); 878 } 879 880 /// setExecutionDomain - Change the opcode of MI to execute in Domain. 881 /// 882 /// The bit (1 << Domain) must be set in the mask returned from 883 /// getExecutionDomain(MI). 884 /// 885 virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {} 886 887 888 /// getPartialRegUpdateClearance - Returns the preferred minimum clearance 889 /// before an instruction with an unwanted partial register update. 890 /// 891 /// Some instructions only write part of a register, and implicitly need to 892 /// read the other parts of the register. This may cause unwanted stalls 893 /// preventing otherwise unrelated instructions from executing in parallel in 894 /// an out-of-order CPU. 895 /// 896 /// For example, the x86 instruction cvtsi2ss writes its result to bits 897 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so 898 /// the instruction needs to wait for the old value of the register to become 899 /// available: 900 /// 901 /// addps %xmm1, %xmm0 902 /// movaps %xmm0, (%rax) 903 /// cvtsi2ss %rbx, %xmm0 904 /// 905 /// In the code above, the cvtsi2ss instruction needs to wait for the addps 906 /// instruction before it can issue, even though the high bits of %xmm0 907 /// probably aren't needed. 908 /// 909 /// This hook returns the preferred clearance before MI, measured in 910 /// instructions. Other defs of MI's operand OpNum are avoided in the last N 911 /// instructions before MI. It should only return a positive value for 912 /// unwanted dependencies. If the old bits of the defined register have 913 /// useful values, or if MI is determined to otherwise read the dependency, 914 /// the hook should return 0. 915 /// 916 /// The unwanted dependency may be handled by: 917 /// 918 /// 1. Allocating the same register for an MI def and use. That makes the 919 /// unwanted dependency identical to a required dependency. 920 /// 921 /// 2. Allocating a register for the def that has no defs in the previous N 922 /// instructions. 923 /// 924 /// 3. Calling breakPartialRegDependency() with the same arguments. This 925 /// allows the target to insert a dependency breaking instruction. 926 /// 927 virtual unsigned 928 getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, 929 const TargetRegisterInfo *TRI) const { 930 // The default implementation returns 0 for no partial register dependency. 931 return 0; 932 } 933 934 /// breakPartialRegDependency - Insert a dependency-breaking instruction 935 /// before MI to eliminate an unwanted dependency on OpNum. 936 /// 937 /// If it wasn't possible to avoid a def in the last N instructions before MI 938 /// (see getPartialRegUpdateClearance), this hook will be called to break the 939 /// unwanted dependency. 940 /// 941 /// On x86, an xorps instruction can be used as a dependency breaker: 942 /// 943 /// addps %xmm1, %xmm0 944 /// movaps %xmm0, (%rax) 945 /// xorps %xmm0, %xmm0 946 /// cvtsi2ss %rbx, %xmm0 947 /// 948 /// An <imp-kill> operand should be added to MI if an instruction was 949 /// inserted. This ties the instructions together in the post-ra scheduler. 950 /// 951 virtual void 952 breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, 953 const TargetRegisterInfo *TRI) const {} 954 955 /// Create machine specific model for scheduling. 956 virtual DFAPacketizer* 957 CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const { 958 return NULL; 959 } 960 961private: 962 int CallFrameSetupOpcode, CallFrameDestroyOpcode; 963}; 964 965/// TargetInstrInfoImpl - This is the default implementation of 966/// TargetInstrInfo, which just provides a couple of default implementations 967/// for various methods. This separated out because it is implemented in 968/// libcodegen, not in libtarget. 969class TargetInstrInfoImpl : public TargetInstrInfo { 970protected: 971 TargetInstrInfoImpl(int CallFrameSetupOpcode = -1, 972 int CallFrameDestroyOpcode = -1) 973 : TargetInstrInfo(CallFrameSetupOpcode, CallFrameDestroyOpcode) {} 974public: 975 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst, 976 MachineBasicBlock *NewDest) const; 977 virtual MachineInstr *commuteInstruction(MachineInstr *MI, 978 bool NewMI = false) const; 979 virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, 980 unsigned &SrcOpIdx2) const; 981 virtual bool canFoldMemoryOperand(const MachineInstr *MI, 982 const SmallVectorImpl<unsigned> &Ops) const; 983 virtual bool hasLoadFromStackSlot(const MachineInstr *MI, 984 const MachineMemOperand *&MMO, 985 int &FrameIndex) const; 986 virtual bool hasStoreToStackSlot(const MachineInstr *MI, 987 const MachineMemOperand *&MMO, 988 int &FrameIndex) const; 989 virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const; 990 virtual bool PredicateInstruction(MachineInstr *MI, 991 const SmallVectorImpl<MachineOperand> &Pred) const; 992 virtual void reMaterialize(MachineBasicBlock &MBB, 993 MachineBasicBlock::iterator MI, 994 unsigned DestReg, unsigned SubReg, 995 const MachineInstr *Orig, 996 const TargetRegisterInfo &TRI) const; 997 virtual MachineInstr *duplicate(MachineInstr *Orig, 998 MachineFunction &MF) const; 999 virtual bool produceSameValue(const MachineInstr *MI0, 1000 const MachineInstr *MI1, 1001 const MachineRegisterInfo *MRI) const; 1002 virtual bool isSchedulingBoundary(const MachineInstr *MI, 1003 const MachineBasicBlock *MBB, 1004 const MachineFunction &MF) const; 1005 1006 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1007 SDNode *DefNode, unsigned DefIdx, 1008 SDNode *UseNode, unsigned UseIdx) const; 1009 1010 virtual int getInstrLatency(const InstrItineraryData *ItinData, 1011 SDNode *Node) const; 1012 1013 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 1014 const MachineInstr *MI) const; 1015 1016 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 1017 const MachineInstr *MI, 1018 unsigned *PredCost = 0) const; 1019 1020 virtual 1021 bool hasLowDefLatency(const InstrItineraryData *ItinData, 1022 const MachineInstr *DefMI, unsigned DefIdx) const; 1023 1024 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1025 const MachineInstr *DefMI, unsigned DefIdx, 1026 const MachineInstr *UseMI, 1027 unsigned UseIdx) const; 1028 1029 bool usePreRAHazardRecognizer() const; 1030 1031 virtual ScheduleHazardRecognizer * 1032 CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const; 1033 1034 virtual ScheduleHazardRecognizer * 1035 CreateTargetMIHazardRecognizer(const InstrItineraryData*, 1036 const ScheduleDAG*) const; 1037 1038 virtual ScheduleHazardRecognizer * 1039 CreateTargetPostRAHazardRecognizer(const InstrItineraryData*, 1040 const ScheduleDAG*) const; 1041}; 1042 1043} // End llvm namespace 1044 1045#endif 1046