TargetInstrInfo.h revision 7a2bdde0a0eebcd2125055e0eacaca040f0b766c
1//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the target machine instruction set to the code generator.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_TARGET_TARGETINSTRINFO_H
15#define LLVM_TARGET_TARGETINSTRINFO_H
16
17#include "llvm/Target/TargetInstrDesc.h"
18#include "llvm/CodeGen/MachineFunction.h"
19
20namespace llvm {
21
22class InstrItineraryData;
23class LiveVariables;
24class MCAsmInfo;
25class MachineMemOperand;
26class MachineRegisterInfo;
27class MDNode;
28class MCInst;
29class SDNode;
30class ScheduleHazardRecognizer;
31class SelectionDAG;
32class ScheduleDAG;
33class TargetRegisterClass;
34class TargetRegisterInfo;
35
36template<class T> class SmallVectorImpl;
37
38
39//---------------------------------------------------------------------------
40///
41/// TargetInstrInfo - Interface to description of machine instruction set
42///
43class TargetInstrInfo {
44  const TargetInstrDesc *Descriptors; // Raw array to allow static init'n
45  unsigned NumOpcodes;                // Number of entries in the desc array
46
47  TargetInstrInfo(const TargetInstrInfo &);  // DO NOT IMPLEMENT
48  void operator=(const TargetInstrInfo &);   // DO NOT IMPLEMENT
49public:
50  TargetInstrInfo(const TargetInstrDesc *desc, unsigned NumOpcodes);
51  virtual ~TargetInstrInfo();
52
53  unsigned getNumOpcodes() const { return NumOpcodes; }
54
55  /// get - Return the machine instruction descriptor that corresponds to the
56  /// specified instruction opcode.
57  ///
58  const TargetInstrDesc &get(unsigned Opcode) const {
59    assert(Opcode < NumOpcodes && "Invalid opcode!");
60    return Descriptors[Opcode];
61  }
62
63  /// isTriviallyReMaterializable - Return true if the instruction is trivially
64  /// rematerializable, meaning it has no side effects and requires no operands
65  /// that aren't always available.
66  bool isTriviallyReMaterializable(const MachineInstr *MI,
67                                   AliasAnalysis *AA = 0) const {
68    return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
69           (MI->getDesc().isRematerializable() &&
70            (isReallyTriviallyReMaterializable(MI, AA) ||
71             isReallyTriviallyReMaterializableGeneric(MI, AA)));
72  }
73
74protected:
75  /// isReallyTriviallyReMaterializable - For instructions with opcodes for
76  /// which the M_REMATERIALIZABLE flag is set, this hook lets the target
77  /// specify whether the instruction is actually trivially rematerializable,
78  /// taking into consideration its operands. This predicate must return false
79  /// if the instruction has any side effects other than producing a value, or
80  /// if it requres any address registers that are not always available.
81  virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
82                                                 AliasAnalysis *AA) const {
83    return false;
84  }
85
86private:
87  /// isReallyTriviallyReMaterializableGeneric - For instructions with opcodes
88  /// for which the M_REMATERIALIZABLE flag is set and the target hook
89  /// isReallyTriviallyReMaterializable returns false, this function does
90  /// target-independent tests to determine if the instruction is really
91  /// trivially rematerializable.
92  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
93                                                AliasAnalysis *AA) const;
94
95public:
96  /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
97  /// extension instruction. That is, it's like a copy where it's legal for the
98  /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
99  /// true, then it's expected the pre-extension value is available as a subreg
100  /// of the result register. This also returns the sub-register index in
101  /// SubIdx.
102  virtual bool isCoalescableExtInstr(const MachineInstr &MI,
103                                     unsigned &SrcReg, unsigned &DstReg,
104                                     unsigned &SubIdx) const {
105    return false;
106  }
107
108  /// isLoadFromStackSlot - If the specified machine instruction is a direct
109  /// load from a stack slot, return the virtual or physical register number of
110  /// the destination along with the FrameIndex of the loaded stack slot.  If
111  /// not, return 0.  This predicate must return 0 if the instruction has
112  /// any side effects other than loading from the stack slot.
113  virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
114                                       int &FrameIndex) const {
115    return 0;
116  }
117
118  /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
119  /// stack locations as well.  This uses a heuristic so it isn't
120  /// reliable for correctness.
121  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
122                                             int &FrameIndex) const {
123    return 0;
124  }
125
126  /// hasLoadFromStackSlot - If the specified machine instruction has
127  /// a load from a stack slot, return true along with the FrameIndex
128  /// of the loaded stack slot and the machine mem operand containing
129  /// the reference.  If not, return false.  Unlike
130  /// isLoadFromStackSlot, this returns true for any instructions that
131  /// loads from the stack.  This is just a hint, as some cases may be
132  /// missed.
133  virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
134                                    const MachineMemOperand *&MMO,
135                                    int &FrameIndex) const {
136    return 0;
137  }
138
139  /// isStoreToStackSlot - If the specified machine instruction is a direct
140  /// store to a stack slot, return the virtual or physical register number of
141  /// the source reg along with the FrameIndex of the loaded stack slot.  If
142  /// not, return 0.  This predicate must return 0 if the instruction has
143  /// any side effects other than storing to the stack slot.
144  virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
145                                      int &FrameIndex) const {
146    return 0;
147  }
148
149  /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
150  /// stack locations as well.  This uses a heuristic so it isn't
151  /// reliable for correctness.
152  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
153                                            int &FrameIndex) const {
154    return 0;
155  }
156
157  /// hasStoreToStackSlot - If the specified machine instruction has a
158  /// store to a stack slot, return true along with the FrameIndex of
159  /// the loaded stack slot and the machine mem operand containing the
160  /// reference.  If not, return false.  Unlike isStoreToStackSlot,
161  /// this returns true for any instructions that stores to the
162  /// stack.  This is just a hint, as some cases may be missed.
163  virtual bool hasStoreToStackSlot(const MachineInstr *MI,
164                                   const MachineMemOperand *&MMO,
165                                   int &FrameIndex) const {
166    return 0;
167  }
168
169  /// reMaterialize - Re-issue the specified 'original' instruction at the
170  /// specific location targeting a new destination register.
171  /// The register in Orig->getOperand(0).getReg() will be substituted by
172  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
173  /// SubIdx.
174  virtual void reMaterialize(MachineBasicBlock &MBB,
175                             MachineBasicBlock::iterator MI,
176                             unsigned DestReg, unsigned SubIdx,
177                             const MachineInstr *Orig,
178                             const TargetRegisterInfo &TRI) const = 0;
179
180  /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
181  /// two-addrss instruction inserted by two-address pass.
182  virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
183                                     MachineInstr *UseMI,
184                                     const TargetRegisterInfo &TRI) const {
185    // Do nothing.
186  }
187
188  /// duplicate - Create a duplicate of the Orig instruction in MF. This is like
189  /// MachineFunction::CloneMachineInstr(), but the target may update operands
190  /// that are required to be unique.
191  ///
192  /// The instruction must be duplicable as indicated by isNotDuplicable().
193  virtual MachineInstr *duplicate(MachineInstr *Orig,
194                                  MachineFunction &MF) const = 0;
195
196  /// convertToThreeAddress - This method must be implemented by targets that
197  /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
198  /// may be able to convert a two-address instruction into one or more true
199  /// three-address instructions on demand.  This allows the X86 target (for
200  /// example) to convert ADD and SHL instructions into LEA instructions if they
201  /// would require register copies due to two-addressness.
202  ///
203  /// This method returns a null pointer if the transformation cannot be
204  /// performed, otherwise it returns the last new instruction.
205  ///
206  virtual MachineInstr *
207  convertToThreeAddress(MachineFunction::iterator &MFI,
208                   MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
209    return 0;
210  }
211
212  /// commuteInstruction - If a target has any instructions that are
213  /// commutable but require converting to different instructions or making
214  /// non-trivial changes to commute them, this method can overloaded to do
215  /// that.  The default implementation simply swaps the commutable operands.
216  /// If NewMI is false, MI is modified in place and returned; otherwise, a
217  /// new machine instruction is created and returned.  Do not call this
218  /// method for a non-commutable instruction, but there may be some cases
219  /// where this method fails and returns null.
220  virtual MachineInstr *commuteInstruction(MachineInstr *MI,
221                                           bool NewMI = false) const = 0;
222
223  /// findCommutedOpIndices - If specified MI is commutable, return the two
224  /// operand indices that would swap value. Return false if the instruction
225  /// is not in a form which this routine understands.
226  virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
227                                     unsigned &SrcOpIdx2) const = 0;
228
229  /// produceSameValue - Return true if two machine instructions would produce
230  /// identical values. By default, this is only true when the two instructions
231  /// are deemed identical except for defs. If this function is called when the
232  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
233  /// aggressive checks.
234  virtual bool produceSameValue(const MachineInstr *MI0,
235                                const MachineInstr *MI1,
236                                const MachineRegisterInfo *MRI = 0) const = 0;
237
238  /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
239  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
240  /// implemented for a target).  Upon success, this returns false and returns
241  /// with the following information in various cases:
242  ///
243  /// 1. If this block ends with no branches (it just falls through to its succ)
244  ///    just return false, leaving TBB/FBB null.
245  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
246  ///    the destination block.
247  /// 3. If this block ends with a conditional branch and it falls through to a
248  ///    successor block, it sets TBB to be the branch destination block and a
249  ///    list of operands that evaluate the condition. These operands can be
250  ///    passed to other TargetInstrInfo methods to create new branches.
251  /// 4. If this block ends with a conditional branch followed by an
252  ///    unconditional branch, it returns the 'true' destination in TBB, the
253  ///    'false' destination in FBB, and a list of operands that evaluate the
254  ///    condition.  These operands can be passed to other TargetInstrInfo
255  ///    methods to create new branches.
256  ///
257  /// Note that RemoveBranch and InsertBranch must be implemented to support
258  /// cases where this method returns success.
259  ///
260  /// If AllowModify is true, then this routine is allowed to modify the basic
261  /// block (e.g. delete instructions after the unconditional branch).
262  ///
263  virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
264                             MachineBasicBlock *&FBB,
265                             SmallVectorImpl<MachineOperand> &Cond,
266                             bool AllowModify = false) const {
267    return true;
268  }
269
270  /// RemoveBranch - Remove the branching code at the end of the specific MBB.
271  /// This is only invoked in cases where AnalyzeBranch returns success. It
272  /// returns the number of instructions that were removed.
273  virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
274    assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
275    return 0;
276  }
277
278  /// InsertBranch - Insert branch code into the end of the specified
279  /// MachineBasicBlock.  The operands to this method are the same as those
280  /// returned by AnalyzeBranch.  This is only invoked in cases where
281  /// AnalyzeBranch returns success. It returns the number of instructions
282  /// inserted.
283  ///
284  /// It is also invoked by tail merging to add unconditional branches in
285  /// cases where AnalyzeBranch doesn't apply because there was no original
286  /// branch to analyze.  At least this much must be implemented, else tail
287  /// merging needs to be disabled.
288  virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
289                                MachineBasicBlock *FBB,
290                                const SmallVectorImpl<MachineOperand> &Cond,
291                                DebugLoc DL) const {
292    assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
293    return 0;
294  }
295
296  /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
297  /// after it, replacing it with an unconditional branch to NewDest. This is
298  /// used by the tail merging pass.
299  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
300                                       MachineBasicBlock *NewDest) const = 0;
301
302  /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
303  /// block at the specified instruction (i.e. instruction would be the start
304  /// of a new basic block).
305  virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
306                                   MachineBasicBlock::iterator MBBI) const {
307    return true;
308  }
309
310  /// isProfitableToIfCvt - Return true if it's profitable to predicate
311  /// instructions with accumulated instruction latency of "NumCycles"
312  /// of the specified basic block, where the probability of the instructions
313  /// being executed is given by Probability, and Confidence is a measure
314  /// of our confidence that it will be properly predicted.
315  virtual
316  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
317                           unsigned ExtraPredCycles,
318                           float Probability, float Confidence) const {
319    return false;
320  }
321
322  /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
323  /// checks for the case where two basic blocks from true and false path
324  /// of a if-then-else (diamond) are predicated on mutally exclusive
325  /// predicates, where the probability of the true path being taken is given
326  /// by Probability, and Confidence is a measure of our confidence that it
327  /// will be properly predicted.
328  virtual bool
329  isProfitableToIfCvt(MachineBasicBlock &TMBB,
330                      unsigned NumTCycles, unsigned ExtraTCycles,
331                      MachineBasicBlock &FMBB,
332                      unsigned NumFCycles, unsigned ExtraFCycles,
333                      float Probability, float Confidence) const {
334    return false;
335  }
336
337  /// isProfitableToDupForIfCvt - Return true if it's profitable for
338  /// if-converter to duplicate instructions of specified accumulated
339  /// instruction latencies in the specified MBB to enable if-conversion.
340  /// The probability of the instructions being executed is given by
341  /// Probability, and Confidence is a measure of our confidence that it
342  /// will be properly predicted.
343  virtual bool
344  isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
345                            float Probability, float Confidence) const {
346    return false;
347  }
348
349  /// copyPhysReg - Emit instructions to copy a pair of physical registers.
350  virtual void copyPhysReg(MachineBasicBlock &MBB,
351                           MachineBasicBlock::iterator MI, DebugLoc DL,
352                           unsigned DestReg, unsigned SrcReg,
353                           bool KillSrc) const {
354    assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
355  }
356
357  /// storeRegToStackSlot - Store the specified register of the given register
358  /// class to the specified stack frame index. The store instruction is to be
359  /// added to the given machine basic block before the specified machine
360  /// instruction. If isKill is true, the register operand is the last use and
361  /// must be marked kill.
362  virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
363                                   MachineBasicBlock::iterator MI,
364                                   unsigned SrcReg, bool isKill, int FrameIndex,
365                                   const TargetRegisterClass *RC,
366                                   const TargetRegisterInfo *TRI) const {
367  assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
368  }
369
370  /// loadRegFromStackSlot - Load the specified register of the given register
371  /// class from the specified stack frame index. The load instruction is to be
372  /// added to the given machine basic block before the specified machine
373  /// instruction.
374  virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
375                                    MachineBasicBlock::iterator MI,
376                                    unsigned DestReg, int FrameIndex,
377                                    const TargetRegisterClass *RC,
378                                    const TargetRegisterInfo *TRI) const {
379  assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
380  }
381
382  /// emitFrameIndexDebugValue - Emit a target-dependent form of
383  /// DBG_VALUE encoding the address of a frame index.  Addresses would
384  /// normally be lowered the same way as other addresses on the target,
385  /// e.g. in load instructions.  For targets that do not support this
386  /// the debug info is simply lost.
387  /// If you add this for a target you should handle this DBG_VALUE in the
388  /// target-specific AsmPrinter code as well; you will probably get invalid
389  /// assembly output if you don't.
390  virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
391                                                 int FrameIx,
392                                                 uint64_t Offset,
393                                                 const MDNode *MDPtr,
394                                                 DebugLoc dl) const {
395    return 0;
396  }
397
398  /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
399  /// slot into the specified machine instruction for the specified operand(s).
400  /// If this is possible, a new instruction is returned with the specified
401  /// operand folded, otherwise NULL is returned.
402  /// The new instruction is inserted before MI, and the client is responsible
403  /// for removing the old instruction.
404  MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
405                                  const SmallVectorImpl<unsigned> &Ops,
406                                  int FrameIndex) const;
407
408  /// foldMemoryOperand - Same as the previous version except it allows folding
409  /// of any load and store from / to any address, not just from a specific
410  /// stack slot.
411  MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
412                                  const SmallVectorImpl<unsigned> &Ops,
413                                  MachineInstr* LoadMI) const;
414
415protected:
416  /// foldMemoryOperandImpl - Target-dependent implementation for
417  /// foldMemoryOperand. Target-independent code in foldMemoryOperand will
418  /// take care of adding a MachineMemOperand to the newly created instruction.
419  virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
420                                          MachineInstr* MI,
421                                          const SmallVectorImpl<unsigned> &Ops,
422                                          int FrameIndex) const {
423    return 0;
424  }
425
426  /// foldMemoryOperandImpl - Target-dependent implementation for
427  /// foldMemoryOperand. Target-independent code in foldMemoryOperand will
428  /// take care of adding a MachineMemOperand to the newly created instruction.
429  virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
430                                              MachineInstr* MI,
431                                          const SmallVectorImpl<unsigned> &Ops,
432                                              MachineInstr* LoadMI) const {
433    return 0;
434  }
435
436public:
437  /// canFoldMemoryOperand - Returns true for the specified load / store if
438  /// folding is possible.
439  virtual
440  bool canFoldMemoryOperand(const MachineInstr *MI,
441                            const SmallVectorImpl<unsigned> &Ops) const =0;
442
443  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
444  /// a store or a load and a store into two or more instruction. If this is
445  /// possible, returns true as well as the new instructions by reference.
446  virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
447                                unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
448                                 SmallVectorImpl<MachineInstr*> &NewMIs) const{
449    return false;
450  }
451
452  virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
453                                   SmallVectorImpl<SDNode*> &NewNodes) const {
454    return false;
455  }
456
457  /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
458  /// instruction after load / store are unfolded from an instruction of the
459  /// specified opcode. It returns zero if the specified unfolding is not
460  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
461  /// index of the operand which will hold the register holding the loaded
462  /// value.
463  virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
464                                      bool UnfoldLoad, bool UnfoldStore,
465                                      unsigned *LoadRegIndex = 0) const {
466    return 0;
467  }
468
469  /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
470  /// to determine if two loads are loading from the same base address. It
471  /// should only return true if the base pointers are the same and the
472  /// only differences between the two addresses are the offset. It also returns
473  /// the offsets by reference.
474  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
475                                    int64_t &Offset1, int64_t &Offset2) const {
476    return false;
477  }
478
479  /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
480  /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
481  /// be scheduled togther. On some targets if two loads are loading from
482  /// addresses in the same cache line, it's better if they are scheduled
483  /// together. This function takes two integers that represent the load offsets
484  /// from the common base address. It returns true if it decides it's desirable
485  /// to schedule the two loads together. "NumLoads" is the number of loads that
486  /// have already been scheduled after Load1.
487  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
488                                       int64_t Offset1, int64_t Offset2,
489                                       unsigned NumLoads) const {
490    return false;
491  }
492
493  /// ReverseBranchCondition - Reverses the branch condition of the specified
494  /// condition list, returning false on success and true if it cannot be
495  /// reversed.
496  virtual
497  bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
498    return true;
499  }
500
501  /// insertNoop - Insert a noop into the instruction stream at the specified
502  /// point.
503  virtual void insertNoop(MachineBasicBlock &MBB,
504                          MachineBasicBlock::iterator MI) const;
505
506
507  /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
508  virtual void getNoopForMachoTarget(MCInst &NopInst) const {
509    // Default to just using 'nop' string.
510  }
511
512
513  /// isPredicated - Returns true if the instruction is already predicated.
514  ///
515  virtual bool isPredicated(const MachineInstr *MI) const {
516    return false;
517  }
518
519  /// isUnpredicatedTerminator - Returns true if the instruction is a
520  /// terminator instruction that has not been predicated.
521  virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
522
523  /// PredicateInstruction - Convert the instruction into a predicated
524  /// instruction. It returns true if the operation was successful.
525  virtual
526  bool PredicateInstruction(MachineInstr *MI,
527                        const SmallVectorImpl<MachineOperand> &Pred) const = 0;
528
529  /// SubsumesPredicate - Returns true if the first specified predicate
530  /// subsumes the second, e.g. GE subsumes GT.
531  virtual
532  bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
533                         const SmallVectorImpl<MachineOperand> &Pred2) const {
534    return false;
535  }
536
537  /// DefinesPredicate - If the specified instruction defines any predicate
538  /// or condition code register(s) used for predication, returns true as well
539  /// as the definition predicate(s) by reference.
540  virtual bool DefinesPredicate(MachineInstr *MI,
541                                std::vector<MachineOperand> &Pred) const {
542    return false;
543  }
544
545  /// isPredicable - Return true if the specified instruction can be predicated.
546  /// By default, this returns true for every instruction with a
547  /// PredicateOperand.
548  virtual bool isPredicable(MachineInstr *MI) const {
549    return MI->getDesc().isPredicable();
550  }
551
552  /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
553  /// instruction that defines the specified register class.
554  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
555    return true;
556  }
557
558  /// isSchedulingBoundary - Test if the given instruction should be
559  /// considered a scheduling boundary. This primarily includes labels and
560  /// terminators.
561  virtual bool isSchedulingBoundary(const MachineInstr *MI,
562                                    const MachineBasicBlock *MBB,
563                                    const MachineFunction &MF) const = 0;
564
565  /// Measure the specified inline asm to determine an approximation of its
566  /// length.
567  virtual unsigned getInlineAsmLength(const char *Str,
568                                      const MCAsmInfo &MAI) const;
569
570  /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to
571  /// use for this target when scheduling the machine instructions before
572  /// register allocation.
573  virtual ScheduleHazardRecognizer*
574  CreateTargetHazardRecognizer(const TargetMachine *TM,
575                               const ScheduleDAG *DAG) const = 0;
576
577  /// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
578  /// recognizer to use for this target when scheduling the machine instructions
579  /// after register allocation.
580  virtual ScheduleHazardRecognizer*
581  CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
582                                     const ScheduleDAG *DAG) const = 0;
583
584  /// AnalyzeCompare - For a comparison instruction, return the source register
585  /// in SrcReg and the value it compares against in CmpValue. Return true if
586  /// the comparison instruction can be analyzed.
587  virtual bool AnalyzeCompare(const MachineInstr *MI,
588                              unsigned &SrcReg, int &Mask, int &Value) const {
589    return false;
590  }
591
592  /// OptimizeCompareInstr - See if the comparison instruction can be converted
593  /// into something more efficient. E.g., on ARM most instructions can set the
594  /// flags register, obviating the need for a separate CMP.
595  virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
596                                    unsigned SrcReg, int Mask, int Value,
597                                    const MachineRegisterInfo *MRI) const {
598    return false;
599  }
600
601  /// FoldImmediate - 'Reg' is known to be defined by a move immediate
602  /// instruction, try to fold the immediate into the use instruction.
603  virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
604                             unsigned Reg, MachineRegisterInfo *MRI) const {
605    return false;
606  }
607
608  /// getNumMicroOps - Return the number of u-operations the given machine
609  /// instruction will be decoded to on the target cpu.
610  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
611                                  const MachineInstr *MI) const;
612
613  /// isZeroCost - Return true for pseudo instructions that don't consume any
614  /// machine resources in their current form. These are common cases that the
615  /// scheduler should consider free, rather than conservatively handling them
616  /// as instructions with no itinerary.
617  bool isZeroCost(unsigned Opcode) const {
618    return Opcode <= TargetOpcode::COPY;
619  }
620
621  /// getOperandLatency - Compute and return the use operand latency of a given
622  /// pair of def and use.
623  /// In most cases, the static scheduling itinerary was enough to determine the
624  /// operand latency. But it may not be possible for instructions with variable
625  /// number of defs / uses.
626  virtual int getOperandLatency(const InstrItineraryData *ItinData,
627                              const MachineInstr *DefMI, unsigned DefIdx,
628                              const MachineInstr *UseMI, unsigned UseIdx) const;
629
630  virtual int getOperandLatency(const InstrItineraryData *ItinData,
631                                SDNode *DefNode, unsigned DefIdx,
632                                SDNode *UseNode, unsigned UseIdx) const;
633
634  /// getInstrLatency - Compute the instruction latency of a given instruction.
635  /// If the instruction has higher cost when predicated, it's returned via
636  /// PredCost.
637  virtual int getInstrLatency(const InstrItineraryData *ItinData,
638                              const MachineInstr *MI,
639                              unsigned *PredCost = 0) const;
640
641  virtual int getInstrLatency(const InstrItineraryData *ItinData,
642                              SDNode *Node) const;
643
644  /// isHighLatencyDef - Return true if this opcode has high latency to its
645  /// result.
646  virtual bool isHighLatencyDef(int opc) const { return false; }
647
648  /// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
649  /// and an use in the current loop, return true if the target considered
650  /// it 'high'. This is used by optimization passes such as machine LICM to
651  /// determine whether it makes sense to hoist an instruction out even in
652  /// high register pressure situation.
653  virtual
654  bool hasHighOperandLatency(const InstrItineraryData *ItinData,
655                             const MachineRegisterInfo *MRI,
656                             const MachineInstr *DefMI, unsigned DefIdx,
657                             const MachineInstr *UseMI, unsigned UseIdx) const {
658    return false;
659  }
660
661  /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
662  /// if the target considered it 'low'.
663  virtual
664  bool hasLowDefLatency(const InstrItineraryData *ItinData,
665                        const MachineInstr *DefMI, unsigned DefIdx) const;
666};
667
668/// TargetInstrInfoImpl - This is the default implementation of
669/// TargetInstrInfo, which just provides a couple of default implementations
670/// for various methods.  This separated out because it is implemented in
671/// libcodegen, not in libtarget.
672class TargetInstrInfoImpl : public TargetInstrInfo {
673protected:
674  TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
675  : TargetInstrInfo(desc, NumOpcodes) {}
676public:
677  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
678                                       MachineBasicBlock *NewDest) const;
679  virtual MachineInstr *commuteInstruction(MachineInstr *MI,
680                                           bool NewMI = false) const;
681  virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
682                                     unsigned &SrcOpIdx2) const;
683  virtual bool canFoldMemoryOperand(const MachineInstr *MI,
684                                    const SmallVectorImpl<unsigned> &Ops) const;
685  virtual bool PredicateInstruction(MachineInstr *MI,
686                            const SmallVectorImpl<MachineOperand> &Pred) const;
687  virtual void reMaterialize(MachineBasicBlock &MBB,
688                             MachineBasicBlock::iterator MI,
689                             unsigned DestReg, unsigned SubReg,
690                             const MachineInstr *Orig,
691                             const TargetRegisterInfo &TRI) const;
692  virtual MachineInstr *duplicate(MachineInstr *Orig,
693                                  MachineFunction &MF) const;
694  virtual bool produceSameValue(const MachineInstr *MI0,
695                                const MachineInstr *MI1,
696                                const MachineRegisterInfo *MRI) const;
697  virtual bool isSchedulingBoundary(const MachineInstr *MI,
698                                    const MachineBasicBlock *MBB,
699                                    const MachineFunction &MF) const;
700
701  bool usePreRAHazardRecognizer() const;
702
703  virtual ScheduleHazardRecognizer *
704  CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const;
705
706  virtual ScheduleHazardRecognizer *
707  CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
708                                     const ScheduleDAG*) const;
709};
710
711} // End llvm namespace
712
713#endif
714