1//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file Implements the ScheduleDAG class, which is used as the common base
11/// class for instruction schedulers. This encapsulates the scheduling DAG,
12/// which is shared between SelectionDAG and MachineInstr scheduling.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17#define LLVM_CODEGEN_SCHEDULEDAG_H
18
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/GraphTraits.h"
21#include "llvm/ADT/PointerIntPair.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/iterator.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Target/TargetLowering.h"
27#include <cassert>
28#include <cstddef>
29#include <iterator>
30#include <string>
31#include <vector>
32
33namespace llvm {
34
35template<class Graph> class GraphWriter;
36class MachineFunction;
37class MachineRegisterInfo;
38class MCInstrDesc;
39struct MCSchedClassDesc;
40class ScheduleDAG;
41class SDNode;
42class SUnit;
43class TargetInstrInfo;
44class TargetMachine;
45class TargetRegisterClass;
46class TargetRegisterInfo;
47
48  /// Scheduling dependency. This represents one direction of an edge in the
49  /// scheduling DAG.
50  class SDep {
51  public:
52    /// These are the different kinds of scheduling dependencies.
53    enum Kind {
54      Data,        ///< Regular data dependence (aka true-dependence).
55      Anti,        ///< A register anti-dependence (aka WAR).
56      Output,      ///< A register output-dependence (aka WAW).
57      Order        ///< Any other ordering dependency.
58    };
59
60    // Strong dependencies must be respected by the scheduler. Artificial
61    // dependencies may be removed only if they are redundant with another
62    // strong dependence.
63    //
64    // Weak dependencies may be violated by the scheduling strategy, but only if
65    // the strategy can prove it is correct to do so.
66    //
67    // Strong OrderKinds must occur before "Weak".
68    // Weak OrderKinds must occur after "Weak".
69    enum OrderKind {
70      Barrier,      ///< An unknown scheduling barrier.
71      MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
72      MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
73      Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
74      Weak,         ///< Arbitrary weak DAG edge.
75      Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
76    };
77
78  private:
79    /// \brief A pointer to the depending/depended-on SUnit, and an enum
80    /// indicating the kind of the dependency.
81    PointerIntPair<SUnit *, 2, Kind> Dep;
82
83    /// A union discriminated by the dependence kind.
84    union {
85      /// For Data, Anti, and Output dependencies, the associated register. For
86      /// Data dependencies that don't currently have a register/ assigned, this
87      /// is set to zero.
88      unsigned Reg;
89
90      /// Additional information about Order dependencies.
91      unsigned OrdKind; // enum OrderKind
92    } Contents;
93
94    /// The time associated with this edge. Often this is just the value of the
95    /// Latency field of the predecessor, however advanced models may provide
96    /// additional information about specific edges.
97    unsigned Latency;
98
99  public:
100    /// Constructs a null SDep. This is only for use by container classes which
101    /// require default constructors. SUnits may not/ have null SDep edges.
102    SDep() : Dep(nullptr, Data) {}
103
104    /// Constructs an SDep with the specified values.
105    SDep(SUnit *S, Kind kind, unsigned Reg)
106      : Dep(S, kind), Contents() {
107      switch (kind) {
108      default:
109        llvm_unreachable("Reg given for non-register dependence!");
110      case Anti:
111      case Output:
112        assert(Reg != 0 &&
113               "SDep::Anti and SDep::Output must use a non-zero Reg!");
114        Contents.Reg = Reg;
115        Latency = 0;
116        break;
117      case Data:
118        Contents.Reg = Reg;
119        Latency = 1;
120        break;
121      }
122    }
123
124    SDep(SUnit *S, OrderKind kind)
125      : Dep(S, Order), Contents(), Latency(0) {
126      Contents.OrdKind = kind;
127    }
128
129    /// Returns true if the specified SDep is equivalent except for latency.
130    bool overlaps(const SDep &Other) const;
131
132    bool operator==(const SDep &Other) const {
133      return overlaps(Other) && Latency == Other.Latency;
134    }
135
136    bool operator!=(const SDep &Other) const {
137      return !operator==(Other);
138    }
139
140    /// \brief Returns the latency value for this edge, which roughly means the
141    /// minimum number of cycles that must elapse between the predecessor and
142    /// the successor, given that they have this edge between them.
143    unsigned getLatency() const {
144      return Latency;
145    }
146
147    /// Sets the latency for this edge.
148    void setLatency(unsigned Lat) {
149      Latency = Lat;
150    }
151
152    //// Returns the SUnit to which this edge points.
153    SUnit *getSUnit() const;
154
155    //// Assigns the SUnit to which this edge points.
156    void setSUnit(SUnit *SU);
157
158    /// Returns an enum value representing the kind of the dependence.
159    Kind getKind() const;
160
161    /// Shorthand for getKind() != SDep::Data.
162    bool isCtrl() const {
163      return getKind() != Data;
164    }
165
166    /// \brief Tests if this is an Order dependence between two memory accesses
167    /// where both sides of the dependence access memory in non-volatile and
168    /// fully modeled ways.
169    bool isNormalMemory() const {
170      return getKind() == Order && (Contents.OrdKind == MayAliasMem
171                                    || Contents.OrdKind == MustAliasMem);
172    }
173
174    /// Tests if this is an Order dependence that is marked as a barrier.
175    bool isBarrier() const {
176      return getKind() == Order && Contents.OrdKind == Barrier;
177    }
178
179    /// Tests if this is could be any kind of memory dependence.
180    bool isNormalMemoryOrBarrier() const {
181      return (isNormalMemory() || isBarrier());
182    }
183
184    /// \brief Tests if this is an Order dependence that is marked as
185    /// "must alias", meaning that the SUnits at either end of the edge have a
186    /// memory dependence on a known memory location.
187    bool isMustAlias() const {
188      return getKind() == Order && Contents.OrdKind == MustAliasMem;
189    }
190
191    /// Tests if this a weak dependence. Weak dependencies are considered DAG
192    /// edges for height computation and other heuristics, but do not force
193    /// ordering. Breaking a weak edge may require the scheduler to compensate,
194    /// for example by inserting a copy.
195    bool isWeak() const {
196      return getKind() == Order && Contents.OrdKind >= Weak;
197    }
198
199    /// \brief Tests if this is an Order dependence that is marked as
200    /// "artificial", meaning it isn't necessary for correctness.
201    bool isArtificial() const {
202      return getKind() == Order && Contents.OrdKind == Artificial;
203    }
204
205    /// \brief Tests if this is an Order dependence that is marked as "cluster",
206    /// meaning it is artificial and wants to be adjacent.
207    bool isCluster() const {
208      return getKind() == Order && Contents.OrdKind == Cluster;
209    }
210
211    /// Tests if this is a Data dependence that is associated with a register.
212    bool isAssignedRegDep() const {
213      return getKind() == Data && Contents.Reg != 0;
214    }
215
216    /// Returns the register associated with this edge. This is only valid on
217    /// Data, Anti, and Output edges. On Data edges, this value may be zero,
218    /// meaning there is no associated register.
219    unsigned getReg() const {
220      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
221             "getReg called on non-register dependence edge!");
222      return Contents.Reg;
223    }
224
225    /// Assigns the associated register for this edge. This is only valid on
226    /// Data, Anti, and Output edges. On Anti and Output edges, this value must
227    /// not be zero. On Data edges, the value may be zero, which would mean that
228    /// no specific register is associated with this edge.
229    void setReg(unsigned Reg) {
230      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
231             "setReg called on non-register dependence edge!");
232      assert((getKind() != Anti || Reg != 0) &&
233             "SDep::Anti edge cannot use the zero register!");
234      assert((getKind() != Output || Reg != 0) &&
235             "SDep::Output edge cannot use the zero register!");
236      Contents.Reg = Reg;
237    }
238
239    raw_ostream &print(raw_ostream &O,
240                       const TargetRegisterInfo *TRI = nullptr) const;
241  };
242
243  template <>
244  struct isPodLike<SDep> { static const bool value = true; };
245
246  /// Scheduling unit. This is a node in the scheduling DAG.
247  class SUnit {
248  private:
249    enum : unsigned { BoundaryID = ~0u };
250
251    SDNode *Node = nullptr;        ///< Representative node.
252    MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
253
254  public:
255    SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
256                               /// was cloned. (SD scheduling only)
257
258    const MCSchedClassDesc *SchedClass =
259        nullptr; ///< nullptr or resolved SchedClass.
260
261    SmallVector<SDep, 4> Preds;  ///< All sunit predecessors.
262    SmallVector<SDep, 4> Succs;  ///< All sunit successors.
263
264    typedef SmallVectorImpl<SDep>::iterator pred_iterator;
265    typedef SmallVectorImpl<SDep>::iterator succ_iterator;
266    typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
267    typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
268
269    unsigned NodeNum = BoundaryID;     ///< Entry # of node in the node vector.
270    unsigned NodeQueueId = 0;          ///< Queue id of node.
271    unsigned NumPreds = 0;             ///< # of SDep::Data preds.
272    unsigned NumSuccs = 0;             ///< # of SDep::Data sucss.
273    unsigned NumPredsLeft = 0;         ///< # of preds not scheduled.
274    unsigned NumSuccsLeft = 0;         ///< # of succs not scheduled.
275    unsigned WeakPredsLeft = 0;        ///< # of weak preds not scheduled.
276    unsigned WeakSuccsLeft = 0;        ///< # of weak succs not scheduled.
277    unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
278    unsigned short Latency = 0;        ///< Node latency.
279    bool isVRegCycle      : 1;         ///< May use and def the same vreg.
280    bool isCall           : 1;         ///< Is a function call.
281    bool isCallOp         : 1;         ///< Is a function call operand.
282    bool isTwoAddress     : 1;         ///< Is a two-address instruction.
283    bool isCommutable     : 1;         ///< Is a commutable instruction.
284    bool hasPhysRegUses   : 1;         ///< Has physreg uses.
285    bool hasPhysRegDefs   : 1;         ///< Has physreg defs that are being used.
286    bool hasPhysRegClobbers : 1;       ///< Has any physreg defs, used or not.
287    bool isPending        : 1;         ///< True once pending.
288    bool isAvailable      : 1;         ///< True once available.
289    bool isScheduled      : 1;         ///< True once scheduled.
290    bool isScheduleHigh   : 1;         ///< True if preferable to schedule high.
291    bool isScheduleLow    : 1;         ///< True if preferable to schedule low.
292    bool isCloned         : 1;         ///< True if this node has been cloned.
293    bool isUnbuffered     : 1;         ///< Uses an unbuffered resource.
294    bool hasReservedResource : 1;      ///< Uses a reserved resource.
295    Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
296
297  private:
298    bool isDepthCurrent   : 1;         ///< True if Depth is current.
299    bool isHeightCurrent  : 1;         ///< True if Height is current.
300    unsigned Depth = 0;                ///< Node depth.
301    unsigned Height = 0;               ///< Node height.
302
303  public:
304    unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
305    unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
306
307    const TargetRegisterClass *CopyDstRC =
308        nullptr; ///< Is a special copy node if != nullptr.
309    const TargetRegisterClass *CopySrcRC = nullptr;
310
311    /// \brief Constructs an SUnit for pre-regalloc scheduling to represent an
312    /// SDNode and any nodes flagged to it.
313    SUnit(SDNode *node, unsigned nodenum)
314      : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
315        isCallOp(false), isTwoAddress(false), isCommutable(false),
316        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
317        isPending(false), isAvailable(false), isScheduled(false),
318        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
319        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
320        isHeightCurrent(false) {}
321
322    /// \brief Constructs an SUnit for post-regalloc scheduling to represent a
323    /// MachineInstr.
324    SUnit(MachineInstr *instr, unsigned nodenum)
325      : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
326        isCallOp(false), isTwoAddress(false), isCommutable(false),
327        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
328        isPending(false), isAvailable(false), isScheduled(false),
329        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
330        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
331        isHeightCurrent(false) {}
332
333    /// \brief Constructs a placeholder SUnit.
334    SUnit()
335      : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
336        isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
337        hasPhysRegClobbers(false), isPending(false), isAvailable(false),
338        isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
339        isCloned(false), isUnbuffered(false), hasReservedResource(false),
340        isDepthCurrent(false), isHeightCurrent(false) {}
341
342    /// \brief Boundary nodes are placeholders for the boundary of the
343    /// scheduling region.
344    ///
345    /// BoundaryNodes can have DAG edges, including Data edges, but they do not
346    /// correspond to schedulable entities (e.g. instructions) and do not have a
347    /// valid ID. Consequently, always check for boundary nodes before accessing
348    /// an associative data structure keyed on node ID.
349    bool isBoundaryNode() const { return NodeNum == BoundaryID; }
350
351    /// Assigns the representative SDNode for this SUnit. This may be used
352    /// during pre-regalloc scheduling.
353    void setNode(SDNode *N) {
354      assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
355      Node = N;
356    }
357
358    /// Returns the representative SDNode for this SUnit. This may be used
359    /// during pre-regalloc scheduling.
360    SDNode *getNode() const {
361      assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
362      return Node;
363    }
364
365    /// \brief Returns true if this SUnit refers to a machine instruction as
366    /// opposed to an SDNode.
367    bool isInstr() const { return Instr; }
368
369    /// Assigns the instruction for the SUnit. This may be used during
370    /// post-regalloc scheduling.
371    void setInstr(MachineInstr *MI) {
372      assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
373      Instr = MI;
374    }
375
376    /// Returns the representative MachineInstr for this SUnit. This may be used
377    /// during post-regalloc scheduling.
378    MachineInstr *getInstr() const {
379      assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
380      return Instr;
381    }
382
383    /// Adds the specified edge as a pred of the current node if not already.
384    /// It also adds the current node as a successor of the specified node.
385    bool addPred(const SDep &D, bool Required = true);
386
387    /// \brief Adds a barrier edge to SU by calling addPred(), with latency 0
388    /// generally or latency 1 for a store followed by a load.
389    bool addPredBarrier(SUnit *SU) {
390      SDep Dep(SU, SDep::Barrier);
391      unsigned TrueMemOrderLatency =
392        ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
393      Dep.setLatency(TrueMemOrderLatency);
394      return addPred(Dep);
395    }
396
397    /// Removes the specified edge as a pred of the current node if it exists.
398    /// It also removes the current node as a successor of the specified node.
399    void removePred(const SDep &D);
400
401    /// Returns the depth of this node, which is the length of the maximum path
402    /// up to any node which has no predecessors.
403    unsigned getDepth() const {
404      if (!isDepthCurrent)
405        const_cast<SUnit *>(this)->ComputeDepth();
406      return Depth;
407    }
408
409    /// \brief Returns the height of this node, which is the length of the
410    /// maximum path down to any node which has no successors.
411    unsigned getHeight() const {
412      if (!isHeightCurrent)
413        const_cast<SUnit *>(this)->ComputeHeight();
414      return Height;
415    }
416
417    /// \brief If NewDepth is greater than this node's depth value, sets it to
418    /// be the new depth value. This also recursively marks successor nodes
419    /// dirty.
420    void setDepthToAtLeast(unsigned NewDepth);
421
422    /// \brief If NewDepth is greater than this node's depth value, set it to be
423    /// the new height value. This also recursively marks predecessor nodes
424    /// dirty.
425    void setHeightToAtLeast(unsigned NewHeight);
426
427    /// \brief Sets a flag in this node to indicate that its stored Depth value
428    /// will require recomputation the next time getDepth() is called.
429    void setDepthDirty();
430
431    /// \brief Sets a flag in this node to indicate that its stored Height value
432    /// will require recomputation the next time getHeight() is called.
433    void setHeightDirty();
434
435    /// Tests if node N is a predecessor of this node.
436    bool isPred(const SUnit *N) const {
437      for (const SDep &Pred : Preds)
438        if (Pred.getSUnit() == N)
439          return true;
440      return false;
441    }
442
443    /// Tests if node N is a successor of this node.
444    bool isSucc(const SUnit *N) const {
445      for (const SDep &Succ : Succs)
446        if (Succ.getSUnit() == N)
447          return true;
448      return false;
449    }
450
451    bool isTopReady() const {
452      return NumPredsLeft == 0;
453    }
454    bool isBottomReady() const {
455      return NumSuccsLeft == 0;
456    }
457
458    /// \brief Orders this node's predecessor edges such that the critical path
459    /// edge occurs first.
460    void biasCriticalPath();
461
462    void dump(const ScheduleDAG *G) const;
463    void dumpAll(const ScheduleDAG *G) const;
464    raw_ostream &print(raw_ostream &O,
465                       const SUnit *N = nullptr,
466                       const SUnit *X = nullptr) const;
467    raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
468
469  private:
470    void ComputeDepth();
471    void ComputeHeight();
472  };
473
474  /// Returns true if the specified SDep is equivalent except for latency.
475  inline bool SDep::overlaps(const SDep &Other) const {
476    if (Dep != Other.Dep)
477      return false;
478    switch (Dep.getInt()) {
479    case Data:
480    case Anti:
481    case Output:
482      return Contents.Reg == Other.Contents.Reg;
483    case Order:
484      return Contents.OrdKind == Other.Contents.OrdKind;
485    }
486    llvm_unreachable("Invalid dependency kind!");
487  }
488
489  //// Returns the SUnit to which this edge points.
490  inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
491
492  //// Assigns the SUnit to which this edge points.
493  inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
494
495  /// Returns an enum value representing the kind of the dependence.
496  inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
497
498  //===--------------------------------------------------------------------===//
499
500  /// \brief This interface is used to plug different priorities computation
501  /// algorithms into the list scheduler. It implements the interface of a
502  /// standard priority queue, where nodes are inserted in arbitrary order and
503  /// returned in priority order.  The computation of the priority and the
504  /// representation of the queue are totally up to the implementation to
505  /// decide.
506  class SchedulingPriorityQueue {
507    virtual void anchor();
508
509    unsigned CurCycle = 0;
510    bool HasReadyFilter;
511
512  public:
513    SchedulingPriorityQueue(bool rf = false) :  HasReadyFilter(rf) {}
514
515    virtual ~SchedulingPriorityQueue() = default;
516
517    virtual bool isBottomUp() const = 0;
518
519    virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
520    virtual void addNode(const SUnit *SU) = 0;
521    virtual void updateNode(const SUnit *SU) = 0;
522    virtual void releaseState() = 0;
523
524    virtual bool empty() const = 0;
525
526    bool hasReadyFilter() const { return HasReadyFilter; }
527
528    virtual bool tracksRegPressure() const { return false; }
529
530    virtual bool isReady(SUnit *) const {
531      assert(!HasReadyFilter && "The ready filter must override isReady()");
532      return true;
533    }
534
535    virtual void push(SUnit *U) = 0;
536
537    void push_all(const std::vector<SUnit *> &Nodes) {
538      for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
539           E = Nodes.end(); I != E; ++I)
540        push(*I);
541    }
542
543    virtual SUnit *pop() = 0;
544
545    virtual void remove(SUnit *SU) = 0;
546
547    virtual void dump(ScheduleDAG *) const {}
548
549    /// As each node is scheduled, this method is invoked.  This allows the
550    /// priority function to adjust the priority of related unscheduled nodes,
551    /// for example.
552    virtual void scheduledNode(SUnit *) {}
553
554    virtual void unscheduledNode(SUnit *) {}
555
556    void setCurCycle(unsigned Cycle) {
557      CurCycle = Cycle;
558    }
559
560    unsigned getCurCycle() const {
561      return CurCycle;
562    }
563  };
564
565  class ScheduleDAG {
566  public:
567    const TargetMachine &TM;            ///< Target processor
568    const TargetInstrInfo *TII;         ///< Target instruction information
569    const TargetRegisterInfo *TRI;      ///< Target processor register info
570    MachineFunction &MF;                ///< Machine function
571    MachineRegisterInfo &MRI;           ///< Virtual/real register map
572    std::vector<SUnit> SUnits;          ///< The scheduling units.
573    SUnit EntrySU;                      ///< Special node for the region entry.
574    SUnit ExitSU;                       ///< Special node for the region exit.
575
576#ifdef NDEBUG
577    static const bool StressSched = false;
578#else
579    bool StressSched;
580#endif
581
582    explicit ScheduleDAG(MachineFunction &mf);
583
584    virtual ~ScheduleDAG();
585
586    /// Clears the DAG state (between regions).
587    void clearDAG();
588
589    /// Returns the MCInstrDesc of this SUnit.
590    /// Returns NULL for SDNodes without a machine opcode.
591    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
592      if (SU->isInstr()) return &SU->getInstr()->getDesc();
593      return getNodeDesc(SU->getNode());
594    }
595
596    /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
597    virtual void viewGraph(const Twine &Name, const Twine &Title);
598    virtual void viewGraph();
599
600    virtual void dumpNode(const SUnit *SU) const = 0;
601
602    /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
603    virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
604
605    /// Returns a label for the region of code covered by the DAG.
606    virtual std::string getDAGName() const = 0;
607
608    /// Adds custom features for a visualization of the ScheduleDAG.
609    virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
610
611#ifndef NDEBUG
612    /// \brief Verifies that all SUnits were scheduled and that their state is
613    /// consistent. Returns the number of scheduled SUnits.
614    unsigned VerifyScheduledDAG(bool isBottomUp);
615#endif
616
617  private:
618    /// Returns the MCInstrDesc of this SDNode or NULL.
619    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
620  };
621
622  class SUnitIterator : public std::iterator<std::forward_iterator_tag,
623                                             SUnit, ptrdiff_t> {
624    SUnit *Node;
625    unsigned Operand;
626
627    SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
628
629  public:
630    bool operator==(const SUnitIterator& x) const {
631      return Operand == x.Operand;
632    }
633    bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
634
635    pointer operator*() const {
636      return Node->Preds[Operand].getSUnit();
637    }
638    pointer operator->() const { return operator*(); }
639
640    SUnitIterator& operator++() {                // Preincrement
641      ++Operand;
642      return *this;
643    }
644    SUnitIterator operator++(int) { // Postincrement
645      SUnitIterator tmp = *this; ++*this; return tmp;
646    }
647
648    static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
649    static SUnitIterator end  (SUnit *N) {
650      return SUnitIterator(N, (unsigned)N->Preds.size());
651    }
652
653    unsigned getOperand() const { return Operand; }
654    const SUnit *getNode() const { return Node; }
655
656    /// Tests if this is not an SDep::Data dependence.
657    bool isCtrlDep() const {
658      return getSDep().isCtrl();
659    }
660    bool isArtificialDep() const {
661      return getSDep().isArtificial();
662    }
663    const SDep &getSDep() const {
664      return Node->Preds[Operand];
665    }
666  };
667
668  template <> struct GraphTraits<SUnit*> {
669    typedef SUnit *NodeRef;
670    typedef SUnitIterator ChildIteratorType;
671    static NodeRef getEntryNode(SUnit *N) { return N; }
672    static ChildIteratorType child_begin(NodeRef N) {
673      return SUnitIterator::begin(N);
674    }
675    static ChildIteratorType child_end(NodeRef N) {
676      return SUnitIterator::end(N);
677    }
678  };
679
680  template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
681    typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
682    static nodes_iterator nodes_begin(ScheduleDAG *G) {
683      return nodes_iterator(G->SUnits.begin());
684    }
685    static nodes_iterator nodes_end(ScheduleDAG *G) {
686      return nodes_iterator(G->SUnits.end());
687    }
688  };
689
690  /// This class can compute a topological ordering for SUnits and provides
691  /// methods for dynamically updating the ordering as new edges are added.
692  ///
693  /// This allows a very fast implementation of IsReachable, for example.
694  class ScheduleDAGTopologicalSort {
695    /// A reference to the ScheduleDAG's SUnits.
696    std::vector<SUnit> &SUnits;
697    SUnit *ExitSU;
698
699    /// Maps topological index to the node number.
700    std::vector<int> Index2Node;
701    /// Maps the node number to its topological index.
702    std::vector<int> Node2Index;
703    /// a set of nodes visited during a DFS traversal.
704    BitVector Visited;
705
706    /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
707    /// These nodes will later get new topological indexes by means of the Shift
708    /// method.
709    void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
710
711    /// \brief Reassigns topological indexes for the nodes in the DAG to
712    /// preserve the topological ordering.
713    void Shift(BitVector& Visited, int LowerBound, int UpperBound);
714
715    /// Assigns the topological index to the node n.
716    void Allocate(int n, int index);
717
718  public:
719    ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
720
721    /// Creates the initial topological ordering from the DAG to be scheduled.
722    void InitDAGTopologicalSorting();
723
724    /// Returns an array of SUs that are both in the successor
725    /// subtree of StartSU and in the predecessor subtree of TargetSU.
726    /// StartSU and TargetSU are not in the array.
727    /// Success is false if TargetSU is not in the successor subtree of
728    /// StartSU, else it is true.
729    std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
730                                 bool &Success);
731
732    /// Checks if \p SU is reachable from \p TargetSU.
733    bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
734
735    /// Returns true if addPred(TargetSU, SU) creates a cycle.
736    bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
737
738    /// \brief Updates the topological ordering to accommodate an edge to be
739    /// added from SUnit \p X to SUnit \p Y.
740    void AddPred(SUnit *Y, SUnit *X);
741
742    /// \brief Updates the topological ordering to accommodate an an edge to be
743    /// removed from the specified node \p N from the predecessors of the
744    /// current node \p M.
745    void RemovePred(SUnit *M, SUnit *N);
746
747    typedef std::vector<int>::iterator iterator;
748    typedef std::vector<int>::const_iterator const_iterator;
749    iterator begin() { return Index2Node.begin(); }
750    const_iterator begin() const { return Index2Node.begin(); }
751    iterator end() { return Index2Node.end(); }
752    const_iterator end() const { return Index2Node.end(); }
753
754    typedef std::vector<int>::reverse_iterator reverse_iterator;
755    typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
756    reverse_iterator rbegin() { return Index2Node.rbegin(); }
757    const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
758    reverse_iterator rend() { return Index2Node.rend(); }
759    const_reverse_iterator rend() const { return Index2Node.rend(); }
760  };
761
762} // end namespace llvm
763
764#endif // LLVM_CODEGEN_SCHEDULEDAG_H
765