ScheduleDAGRRList.cpp revision 59932584a812685b16ad6a53a023b2bb3fc21819
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms.  The basic approach uses a priority
12// queue of available nodes to schedule.  One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "pre-RA-sched"
19#include "llvm/CodeGen/ScheduleDAG.h"
20#include "llvm/CodeGen/SchedulerRegistry.h"
21#include "llvm/Target/TargetRegisterInfo.h"
22#include "llvm/Target/TargetData.h"
23#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetInstrInfo.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/Compiler.h"
27#include "llvm/ADT/BitVector.h"
28#include "llvm/ADT/PriorityQueue.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/STLExtras.h"
33#include <climits>
34#include "llvm/Support/CommandLine.h"
35using namespace llvm;
36
37STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
38STATISTIC(NumUnfolds,    "Number of nodes unfolded");
39STATISTIC(NumDups,       "Number of duplicated nodes");
40STATISTIC(NumCCCopies,   "Number of cross class copies");
41
42static RegisterScheduler
43  burrListDAGScheduler("list-burr",
44                       "  Bottom-up register reduction list scheduling",
45                       createBURRListDAGScheduler);
46static RegisterScheduler
47  tdrListrDAGScheduler("list-tdrr",
48                       "  Top-down register reduction list scheduling",
49                       createTDRRListDAGScheduler);
50
51namespace {
52//===----------------------------------------------------------------------===//
53/// ScheduleDAGRRList - The actual register reduction list scheduler
54/// implementation.  This supports both top-down and bottom-up scheduling.
55///
56class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
57private:
58  /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
59  /// it is top-down.
60  bool isBottomUp;
61
62  /// Fast - True if we are performing fast scheduling.
63  ///
64  bool Fast;
65
66  /// AvailableQueue - The priority queue to use for the available SUnits.
67  SchedulingPriorityQueue *AvailableQueue;
68
69  /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
70  /// that are "live". These nodes must be scheduled before any other nodes that
71  /// modifies the registers can be scheduled.
72  SmallSet<unsigned, 4> LiveRegs;
73  std::vector<SUnit*> LiveRegDefs;
74  std::vector<unsigned> LiveRegCycles;
75
76public:
77  ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
78                    const TargetMachine &tm, bool isbottomup, bool f,
79                    SchedulingPriorityQueue *availqueue)
80    : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
81      AvailableQueue(availqueue) {
82    }
83
84  ~ScheduleDAGRRList() {
85    delete AvailableQueue;
86  }
87
88  void Schedule();
89
90  /// IsReachable - Checks if SU is reachable from TargetSU.
91  bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
92
93  /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
94  /// create a cycle.
95  bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
96
97  /// AddPred - This adds the specified node X as a predecessor of
98  /// the current node Y if not already.
99  /// This returns true if this is a new predecessor.
100  /// Updates the topological ordering if required.
101  bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
102               unsigned PhyReg = 0, int Cost = 1);
103
104  /// RemovePred - This removes the specified node N from the predecessors of
105  /// the current node M. Updates the topological ordering if required.
106  bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
107
108private:
109  void ReleasePred(SUnit*, bool, unsigned);
110  void ReleaseSucc(SUnit*, bool isChain, unsigned);
111  void CapturePred(SUnit*, SUnit*, bool);
112  void ScheduleNodeBottomUp(SUnit*, unsigned);
113  void ScheduleNodeTopDown(SUnit*, unsigned);
114  void UnscheduleNodeBottomUp(SUnit*);
115  void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
116  SUnit *CopyAndMoveSuccessors(SUnit*);
117  void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
118                                  const TargetRegisterClass*,
119                                  const TargetRegisterClass*,
120                                  SmallVector<SUnit*, 2>&);
121  bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
122  void ListScheduleTopDown();
123  void ListScheduleBottomUp();
124  void CommuteNodesToReducePressure();
125
126
127  /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
128  /// Updates the topological ordering if required.
129  SUnit *CreateNewSUnit(SDNode *N) {
130    SUnit *NewNode = NewSUnit(N);
131    // Update the topological ordering.
132    if (NewNode->NodeNum >= Node2Index.size())
133      InitDAGTopologicalSorting();
134    return NewNode;
135  }
136
137  /// CreateClone - Creates a new SUnit from an existing one.
138  /// Updates the topological ordering if required.
139  SUnit *CreateClone(SUnit *N) {
140    SUnit *NewNode = Clone(N);
141    // Update the topological ordering.
142    if (NewNode->NodeNum >= Node2Index.size())
143      InitDAGTopologicalSorting();
144    return NewNode;
145  }
146
147  /// Functions for preserving the topological ordering
148  /// even after dynamic insertions of new edges.
149  /// This allows a very fast implementation of IsReachable.
150
151  /// InitDAGTopologicalSorting - create the initial topological
152  /// ordering from the DAG to be scheduled.
153  void InitDAGTopologicalSorting();
154
155  /// DFS - make a DFS traversal and mark all nodes affected by the
156  /// edge insertion. These nodes will later get new topological indexes
157  /// by means of the Shift method.
158  void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
159
160  /// Shift - reassign topological indexes for the nodes in the DAG
161  /// to preserve the topological ordering.
162  void Shift(BitVector& Visited, int LowerBound, int UpperBound);
163
164  /// Allocate - assign the topological index to the node n.
165  void Allocate(int n, int index);
166
167  /// Index2Node - Maps topological index to the node number.
168  std::vector<int> Index2Node;
169  /// Node2Index - Maps the node number to its topological index.
170  std::vector<int> Node2Index;
171  /// Visited - a set of nodes visited during a DFS traversal.
172  BitVector Visited;
173};
174}  // end anonymous namespace
175
176
177/// Schedule - Schedule the DAG using list scheduling.
178void ScheduleDAGRRList::Schedule() {
179  DOUT << "********** List Scheduling **********\n";
180
181  LiveRegDefs.resize(TRI->getNumRegs(), NULL);
182  LiveRegCycles.resize(TRI->getNumRegs(), 0);
183
184  // Build scheduling units.
185  BuildSchedUnits();
186
187  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
188          SUnits[su].dumpAll(&DAG));
189  if (!Fast) {
190    CalculateDepths();
191    CalculateHeights();
192  }
193  InitDAGTopologicalSorting();
194
195  AvailableQueue->initNodes(SUnits);
196
197  // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
198  if (isBottomUp)
199    ListScheduleBottomUp();
200  else
201    ListScheduleTopDown();
202
203  AvailableQueue->releaseState();
204
205  if (!Fast)
206    CommuteNodesToReducePressure();
207}
208
209/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
210/// it is not the last use of its first operand, add it to the CommuteSet if
211/// possible. It will be commuted when it is translated to a MI.
212void ScheduleDAGRRList::CommuteNodesToReducePressure() {
213  SmallPtrSet<SUnit*, 4> OperandSeen;
214  for (unsigned i = Sequence.size(); i != 0; ) {
215    --i;
216    SUnit *SU = Sequence[i];
217    if (!SU || !SU->Node) continue;
218    if (SU->isCommutable) {
219      unsigned Opc = SU->Node->getMachineOpcode();
220      const TargetInstrDesc &TID = TII->get(Opc);
221      unsigned NumRes = TID.getNumDefs();
222      unsigned NumOps = TID.getNumOperands() - NumRes;
223      for (unsigned j = 0; j != NumOps; ++j) {
224        if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
225          continue;
226
227        SDNode *OpN = SU->Node->getOperand(j).getNode();
228        SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
229        if (OpSU && OperandSeen.count(OpSU) == 1) {
230          // Ok, so SU is not the last use of OpSU, but SU is two-address so
231          // it will clobber OpSU. Try to commute SU if no other source operands
232          // are live below.
233          bool DoCommute = true;
234          for (unsigned k = 0; k < NumOps; ++k) {
235            if (k != j) {
236              OpN = SU->Node->getOperand(k).getNode();
237              OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
238              if (OpSU && OperandSeen.count(OpSU) == 1) {
239                DoCommute = false;
240                break;
241              }
242            }
243          }
244          if (DoCommute)
245            CommuteSet.insert(SU->Node);
246        }
247
248        // Only look at the first use&def node for now.
249        break;
250      }
251    }
252
253    for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
254         I != E; ++I) {
255      if (!I->isCtrl)
256        OperandSeen.insert(I->Dep->OrigNode);
257    }
258  }
259}
260
261//===----------------------------------------------------------------------===//
262//  Bottom-Up Scheduling
263//===----------------------------------------------------------------------===//
264
265/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
266/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
267void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
268                                    unsigned CurCycle) {
269  // FIXME: the distance between two nodes is not always == the predecessor's
270  // latency. For example, the reader can very well read the register written
271  // by the predecessor later than the issue cycle. It also depends on the
272  // interrupt model (drain vs. freeze).
273  PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
274
275  --PredSU->NumSuccsLeft;
276
277#ifndef NDEBUG
278  if (PredSU->NumSuccsLeft < 0) {
279    cerr << "*** List scheduling failed! ***\n";
280    PredSU->dump(&DAG);
281    cerr << " has been released too many times!\n";
282    assert(0);
283  }
284#endif
285
286  if (PredSU->NumSuccsLeft == 0) {
287    PredSU->isAvailable = true;
288    AvailableQueue->push(PredSU);
289  }
290}
291
292/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
293/// count of its predecessors. If a predecessor pending count is zero, add it to
294/// the Available queue.
295void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
296  DOUT << "*** Scheduling [" << CurCycle << "]: ";
297  DEBUG(SU->dump(&DAG));
298  SU->Cycle = CurCycle;
299
300  AvailableQueue->ScheduledNode(SU);
301
302  // Bottom up: release predecessors
303  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
304       I != E; ++I) {
305    ReleasePred(I->Dep, I->isCtrl, CurCycle);
306    if (I->Cost < 0)  {
307      // This is a physical register dependency and it's impossible or
308      // expensive to copy the register. Make sure nothing that can
309      // clobber the register is scheduled between the predecessor and
310      // this node.
311      if (LiveRegs.insert(I->Reg)) {
312        LiveRegDefs[I->Reg] = I->Dep;
313        LiveRegCycles[I->Reg] = CurCycle;
314      }
315    }
316  }
317
318  // Release all the implicit physical register defs that are live.
319  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
320       I != E; ++I) {
321    if (I->Cost < 0)  {
322      if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
323        LiveRegs.erase(I->Reg);
324        assert(LiveRegDefs[I->Reg] == SU &&
325               "Physical register dependency violated?");
326        LiveRegDefs[I->Reg] = NULL;
327        LiveRegCycles[I->Reg] = 0;
328      }
329    }
330  }
331
332  SU->isScheduled = true;
333}
334
335/// CapturePred - This does the opposite of ReleasePred. Since SU is being
336/// unscheduled, incrcease the succ left count of its predecessors. Remove
337/// them from AvailableQueue if necessary.
338void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
339  unsigned CycleBound = 0;
340  for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
341       I != E; ++I) {
342    if (I->Dep == SU)
343      continue;
344    CycleBound = std::max(CycleBound,
345                          I->Dep->Cycle + PredSU->Latency);
346  }
347
348  if (PredSU->isAvailable) {
349    PredSU->isAvailable = false;
350    if (!PredSU->isPending)
351      AvailableQueue->remove(PredSU);
352  }
353
354  PredSU->CycleBound = CycleBound;
355  ++PredSU->NumSuccsLeft;
356}
357
358/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
359/// its predecessor states to reflect the change.
360void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
361  DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
362  DEBUG(SU->dump(&DAG));
363
364  AvailableQueue->UnscheduledNode(SU);
365
366  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
367       I != E; ++I) {
368    CapturePred(I->Dep, SU, I->isCtrl);
369    if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg])  {
370      LiveRegs.erase(I->Reg);
371      assert(LiveRegDefs[I->Reg] == I->Dep &&
372             "Physical register dependency violated?");
373      LiveRegDefs[I->Reg] = NULL;
374      LiveRegCycles[I->Reg] = 0;
375    }
376  }
377
378  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
379       I != E; ++I) {
380    if (I->Cost < 0)  {
381      if (LiveRegs.insert(I->Reg)) {
382        assert(!LiveRegDefs[I->Reg] &&
383               "Physical register dependency violated?");
384        LiveRegDefs[I->Reg] = SU;
385      }
386      if (I->Dep->Cycle < LiveRegCycles[I->Reg])
387        LiveRegCycles[I->Reg] = I->Dep->Cycle;
388    }
389  }
390
391  SU->Cycle = 0;
392  SU->isScheduled = false;
393  SU->isAvailable = true;
394  AvailableQueue->push(SU);
395}
396
397/// IsReachable - Checks if SU is reachable from TargetSU.
398bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
399  // If insertion of the edge SU->TargetSU would create a cycle
400  // then there is a path from TargetSU to SU.
401  int UpperBound, LowerBound;
402  LowerBound = Node2Index[TargetSU->NodeNum];
403  UpperBound = Node2Index[SU->NodeNum];
404  bool HasLoop = false;
405  // Is Ord(TargetSU) < Ord(SU) ?
406  if (LowerBound < UpperBound) {
407    Visited.reset();
408    // There may be a path from TargetSU to SU. Check for it.
409    DFS(TargetSU, UpperBound, HasLoop);
410  }
411  return HasLoop;
412}
413
414/// Allocate - assign the topological index to the node n.
415inline void ScheduleDAGRRList::Allocate(int n, int index) {
416  Node2Index[n] = index;
417  Index2Node[index] = n;
418}
419
420/// InitDAGTopologicalSorting - create the initial topological
421/// ordering from the DAG to be scheduled.
422
423/// The idea of the algorithm is taken from
424/// "Online algorithms for managing the topological order of
425/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
426/// This is the MNR algorithm, which was first introduced by
427/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
428/// "Maintaining a topological order under edge insertions".
429///
430/// Short description of the algorithm:
431///
432/// Topological ordering, ord, of a DAG maps each node to a topological
433/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
434///
435/// This means that if there is a path from the node X to the node Z,
436/// then ord(X) < ord(Z).
437///
438/// This property can be used to check for reachability of nodes:
439/// if Z is reachable from X, then an insertion of the edge Z->X would
440/// create a cycle.
441///
442/// The algorithm first computes a topological ordering for the DAG by
443/// initializing the Index2Node and Node2Index arrays and then tries to keep
444/// the ordering up-to-date after edge insertions by reordering the DAG.
445///
446/// On insertion of the edge X->Y, the algorithm first marks by calling DFS
447/// the nodes reachable from Y, and then shifts them using Shift to lie
448/// immediately after X in Index2Node.
449void ScheduleDAGRRList::InitDAGTopologicalSorting() {
450  unsigned DAGSize = SUnits.size();
451  std::vector<SUnit*> WorkList;
452  WorkList.reserve(DAGSize);
453
454  Index2Node.resize(DAGSize);
455  Node2Index.resize(DAGSize);
456
457  // Initialize the data structures.
458  for (unsigned i = 0, e = DAGSize; i != e; ++i) {
459    SUnit *SU = &SUnits[i];
460    int NodeNum = SU->NodeNum;
461    unsigned Degree = SU->Succs.size();
462    // Temporarily use the Node2Index array as scratch space for degree counts.
463    Node2Index[NodeNum] = Degree;
464
465    // Is it a node without dependencies?
466    if (Degree == 0) {
467        assert(SU->Succs.empty() && "SUnit should have no successors");
468        // Collect leaf nodes.
469        WorkList.push_back(SU);
470    }
471  }
472
473  int Id = DAGSize;
474  while (!WorkList.empty()) {
475    SUnit *SU = WorkList.back();
476    WorkList.pop_back();
477    Allocate(SU->NodeNum, --Id);
478    for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
479         I != E; ++I) {
480      SUnit *SU = I->Dep;
481      if (!--Node2Index[SU->NodeNum])
482        // If all dependencies of the node are processed already,
483        // then the node can be computed now.
484        WorkList.push_back(SU);
485    }
486  }
487
488  Visited.resize(DAGSize);
489
490#ifndef NDEBUG
491  // Check correctness of the ordering
492  for (unsigned i = 0, e = DAGSize; i != e; ++i) {
493    SUnit *SU = &SUnits[i];
494    for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
495         I != E; ++I) {
496       assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
497       "Wrong topological sorting");
498    }
499  }
500#endif
501}
502
503/// AddPred - adds an edge from SUnit X to SUnit Y.
504/// Updates the topological ordering if required.
505bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
506                 unsigned PhyReg, int Cost) {
507  int UpperBound, LowerBound;
508  LowerBound = Node2Index[Y->NodeNum];
509  UpperBound = Node2Index[X->NodeNum];
510  bool HasLoop = false;
511  // Is Ord(X) < Ord(Y) ?
512  if (LowerBound < UpperBound) {
513    // Update the topological order.
514    Visited.reset();
515    DFS(Y, UpperBound, HasLoop);
516    assert(!HasLoop && "Inserted edge creates a loop!");
517    // Recompute topological indexes.
518    Shift(Visited, LowerBound, UpperBound);
519  }
520  // Now really insert the edge.
521  return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
522}
523
524/// RemovePred - This removes the specified node N from the predecessors of
525/// the current node M. Updates the topological ordering if required.
526bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
527                                   bool isCtrl, bool isSpecial) {
528  // InitDAGTopologicalSorting();
529  return M->removePred(N, isCtrl, isSpecial);
530}
531
532/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
533/// all nodes affected by the edge insertion. These nodes will later get new
534/// topological indexes by means of the Shift method.
535void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
536  std::vector<const SUnit*> WorkList;
537  WorkList.reserve(SUnits.size());
538
539  WorkList.push_back(SU);
540  while (!WorkList.empty()) {
541    SU = WorkList.back();
542    WorkList.pop_back();
543    Visited.set(SU->NodeNum);
544    for (int I = SU->Succs.size()-1; I >= 0; --I) {
545      int s = SU->Succs[I].Dep->NodeNum;
546      if (Node2Index[s] == UpperBound) {
547        HasLoop = true;
548        return;
549      }
550      // Visit successors if not already and in affected region.
551      if (!Visited.test(s) && Node2Index[s] < UpperBound) {
552        WorkList.push_back(SU->Succs[I].Dep);
553      }
554    }
555  }
556}
557
558/// Shift - Renumber the nodes so that the topological ordering is
559/// preserved.
560void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
561                              int UpperBound) {
562  std::vector<int> L;
563  int shift = 0;
564  int i;
565
566  for (i = LowerBound; i <= UpperBound; ++i) {
567    // w is node at topological index i.
568    int w = Index2Node[i];
569    if (Visited.test(w)) {
570      // Unmark.
571      Visited.reset(w);
572      L.push_back(w);
573      shift = shift + 1;
574    } else {
575      Allocate(w, i - shift);
576    }
577  }
578
579  for (unsigned j = 0; j < L.size(); ++j) {
580    Allocate(L[j], i - shift);
581    i = i + 1;
582  }
583}
584
585
586/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
587/// create a cycle.
588bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
589  if (IsReachable(TargetSU, SU))
590    return true;
591  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
592       I != E; ++I)
593    if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
594      return true;
595  return false;
596}
597
598/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
599/// BTCycle in order to schedule a specific node. Returns the last unscheduled
600/// SUnit. Also returns if a successor is unscheduled in the process.
601void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
602                                          unsigned &CurCycle) {
603  SUnit *OldSU = NULL;
604  while (CurCycle > BtCycle) {
605    OldSU = Sequence.back();
606    Sequence.pop_back();
607    if (SU->isSucc(OldSU))
608      // Don't try to remove SU from AvailableQueue.
609      SU->isAvailable = false;
610    UnscheduleNodeBottomUp(OldSU);
611    --CurCycle;
612  }
613
614
615  if (SU->isSucc(OldSU)) {
616    assert(false && "Something is wrong!");
617    abort();
618  }
619
620  ++NumBacktracks;
621}
622
623/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
624/// successors to the newly created node.
625SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
626  if (SU->FlaggedNodes.size())
627    return NULL;
628
629  SDNode *N = SU->Node;
630  if (!N)
631    return NULL;
632
633  SUnit *NewSU;
634  bool TryUnfold = false;
635  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
636    MVT VT = N->getValueType(i);
637    if (VT == MVT::Flag)
638      return NULL;
639    else if (VT == MVT::Other)
640      TryUnfold = true;
641  }
642  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
643    const SDValue &Op = N->getOperand(i);
644    MVT VT = Op.getNode()->getValueType(Op.getResNo());
645    if (VT == MVT::Flag)
646      return NULL;
647  }
648
649  if (TryUnfold) {
650    SmallVector<SDNode*, 2> NewNodes;
651    if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
652      return NULL;
653
654    DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
655    assert(NewNodes.size() == 2 && "Expected a load folding node!");
656
657    N = NewNodes[1];
658    SDNode *LoadNode = NewNodes[0];
659    unsigned NumVals = N->getNumValues();
660    unsigned OldNumVals = SU->Node->getNumValues();
661    for (unsigned i = 0; i != NumVals; ++i)
662      DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, i), SDValue(N, i));
663    DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, OldNumVals-1),
664                                  SDValue(LoadNode, 1));
665
666    SUnit *NewSU = CreateNewSUnit(N);
667    assert(N->getNodeId() == -1 && "Node already inserted!");
668    N->setNodeId(NewSU->NodeNum);
669
670    const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
671    for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
672      if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
673        NewSU->isTwoAddress = true;
674        break;
675      }
676    }
677    if (TID.isCommutable())
678      NewSU->isCommutable = true;
679    // FIXME: Calculate height / depth and propagate the changes?
680    NewSU->Depth = SU->Depth;
681    NewSU->Height = SU->Height;
682    ComputeLatency(NewSU);
683
684    // LoadNode may already exist. This can happen when there is another
685    // load from the same location and producing the same type of value
686    // but it has different alignment or volatileness.
687    bool isNewLoad = true;
688    SUnit *LoadSU;
689    if (LoadNode->getNodeId() != -1) {
690      LoadSU = &SUnits[LoadNode->getNodeId()];
691      isNewLoad = false;
692    } else {
693      LoadSU = CreateNewSUnit(LoadNode);
694      LoadNode->setNodeId(LoadSU->NodeNum);
695
696      LoadSU->Depth = SU->Depth;
697      LoadSU->Height = SU->Height;
698      ComputeLatency(LoadSU);
699    }
700
701    SUnit *ChainPred = NULL;
702    SmallVector<SDep, 4> ChainSuccs;
703    SmallVector<SDep, 4> LoadPreds;
704    SmallVector<SDep, 4> NodePreds;
705    SmallVector<SDep, 4> NodeSuccs;
706    for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
707         I != E; ++I) {
708      if (I->isCtrl)
709        ChainPred = I->Dep;
710      else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
711        LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
712      else
713        NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
714    }
715    for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
716         I != E; ++I) {
717      if (I->isCtrl)
718        ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
719                                  I->isCtrl, I->isSpecial));
720      else
721        NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
722                                 I->isCtrl, I->isSpecial));
723    }
724
725    if (ChainPred) {
726      RemovePred(SU, ChainPred, true, false);
727      if (isNewLoad)
728        AddPred(LoadSU, ChainPred, true, false);
729    }
730    for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
731      SDep *Pred = &LoadPreds[i];
732      RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
733      if (isNewLoad) {
734        AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
735                Pred->Reg, Pred->Cost);
736      }
737    }
738    for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
739      SDep *Pred = &NodePreds[i];
740      RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
741      AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
742              Pred->Reg, Pred->Cost);
743    }
744    for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
745      SDep *Succ = &NodeSuccs[i];
746      RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
747      AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
748              Succ->Reg, Succ->Cost);
749    }
750    for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
751      SDep *Succ = &ChainSuccs[i];
752      RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
753      if (isNewLoad) {
754        AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
755                Succ->Reg, Succ->Cost);
756      }
757    }
758    if (isNewLoad) {
759      AddPred(NewSU, LoadSU, false, false);
760    }
761
762    if (isNewLoad)
763      AvailableQueue->addNode(LoadSU);
764    AvailableQueue->addNode(NewSU);
765
766    ++NumUnfolds;
767
768    if (NewSU->NumSuccsLeft == 0) {
769      NewSU->isAvailable = true;
770      return NewSU;
771    }
772    SU = NewSU;
773  }
774
775  DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
776  NewSU = CreateClone(SU);
777
778  // New SUnit has the exact same predecessors.
779  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
780       I != E; ++I)
781    if (!I->isSpecial) {
782      AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
783      NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
784    }
785
786  // Only copy scheduled successors. Cut them from old node's successor
787  // list and move them over.
788  SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
789  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
790       I != E; ++I) {
791    if (I->isSpecial)
792      continue;
793    if (I->Dep->isScheduled) {
794      NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
795      AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
796      DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
797    }
798  }
799  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
800    SUnit *Succ = DelDeps[i].first;
801    bool isCtrl = DelDeps[i].second;
802    RemovePred(Succ, SU, isCtrl, false);
803  }
804
805  AvailableQueue->updateNode(SU);
806  AvailableQueue->addNode(NewSU);
807
808  ++NumDups;
809  return NewSU;
810}
811
812/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
813/// and move all scheduled successors of the given SUnit to the last copy.
814void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
815                                              const TargetRegisterClass *DestRC,
816                                              const TargetRegisterClass *SrcRC,
817                                               SmallVector<SUnit*, 2> &Copies) {
818  SUnit *CopyFromSU = CreateNewSUnit(NULL);
819  CopyFromSU->CopySrcRC = SrcRC;
820  CopyFromSU->CopyDstRC = DestRC;
821  CopyFromSU->Depth = SU->Depth;
822  CopyFromSU->Height = SU->Height;
823
824  SUnit *CopyToSU = CreateNewSUnit(NULL);
825  CopyToSU->CopySrcRC = DestRC;
826  CopyToSU->CopyDstRC = SrcRC;
827
828  // Only copy scheduled successors. Cut them from old node's successor
829  // list and move them over.
830  SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
831  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
832       I != E; ++I) {
833    if (I->isSpecial)
834      continue;
835    if (I->Dep->isScheduled) {
836      CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
837      AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
838      DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
839    }
840  }
841  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
842    SUnit *Succ = DelDeps[i].first;
843    bool isCtrl = DelDeps[i].second;
844    RemovePred(Succ, SU, isCtrl, false);
845  }
846
847  AddPred(CopyFromSU, SU, false, false, Reg, -1);
848  AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
849
850  AvailableQueue->updateNode(SU);
851  AvailableQueue->addNode(CopyFromSU);
852  AvailableQueue->addNode(CopyToSU);
853  Copies.push_back(CopyFromSU);
854  Copies.push_back(CopyToSU);
855
856  ++NumCCCopies;
857}
858
859/// getPhysicalRegisterVT - Returns the ValueType of the physical register
860/// definition of the specified node.
861/// FIXME: Move to SelectionDAG?
862static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
863                                 const TargetInstrInfo *TII) {
864  const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
865  assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
866  unsigned NumRes = TID.getNumDefs();
867  for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
868    if (Reg == *ImpDef)
869      break;
870    ++NumRes;
871  }
872  return N->getValueType(NumRes);
873}
874
875/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
876/// scheduling of the given node to satisfy live physical register dependencies.
877/// If the specific node is the last one that's available to schedule, do
878/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
879bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
880                                                 SmallVector<unsigned, 4> &LRegs){
881  if (LiveRegs.empty())
882    return false;
883
884  SmallSet<unsigned, 4> RegAdded;
885  // If this node would clobber any "live" register, then it's not ready.
886  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
887       I != E; ++I) {
888    if (I->Cost < 0)  {
889      unsigned Reg = I->Reg;
890      if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
891        if (RegAdded.insert(Reg))
892          LRegs.push_back(Reg);
893      }
894      for (const unsigned *Alias = TRI->getAliasSet(Reg);
895           *Alias; ++Alias)
896        if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
897          if (RegAdded.insert(*Alias))
898            LRegs.push_back(*Alias);
899        }
900    }
901  }
902
903  for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
904    SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
905    if (!Node || !Node->isMachineOpcode())
906      continue;
907    const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
908    if (!TID.ImplicitDefs)
909      continue;
910    for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
911      if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
912        if (RegAdded.insert(*Reg))
913          LRegs.push_back(*Reg);
914      }
915      for (const unsigned *Alias = TRI->getAliasSet(*Reg);
916           *Alias; ++Alias)
917        if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
918          if (RegAdded.insert(*Alias))
919            LRegs.push_back(*Alias);
920        }
921    }
922  }
923  return !LRegs.empty();
924}
925
926
927/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
928/// schedulers.
929void ScheduleDAGRRList::ListScheduleBottomUp() {
930  unsigned CurCycle = 0;
931  // Add root to Available queue.
932  if (!SUnits.empty()) {
933    SUnit *RootSU = &SUnits[DAG.getRoot().getNode()->getNodeId()];
934    assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
935    RootSU->isAvailable = true;
936    AvailableQueue->push(RootSU);
937  }
938
939  // While Available queue is not empty, grab the node with the highest
940  // priority. If it is not ready put it back.  Schedule the node.
941  SmallVector<SUnit*, 4> NotReady;
942  DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
943  Sequence.reserve(SUnits.size());
944  while (!AvailableQueue->empty()) {
945    bool Delayed = false;
946    LRegsMap.clear();
947    SUnit *CurSU = AvailableQueue->pop();
948    while (CurSU) {
949      if (CurSU->CycleBound <= CurCycle) {
950        SmallVector<unsigned, 4> LRegs;
951        if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
952          break;
953        Delayed = true;
954        LRegsMap.insert(std::make_pair(CurSU, LRegs));
955      }
956
957      CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
958      NotReady.push_back(CurSU);
959      CurSU = AvailableQueue->pop();
960    }
961
962    // All candidates are delayed due to live physical reg dependencies.
963    // Try backtracking, code duplication, or inserting cross class copies
964    // to resolve it.
965    if (Delayed && !CurSU) {
966      for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
967        SUnit *TrySU = NotReady[i];
968        SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
969
970        // Try unscheduling up to the point where it's safe to schedule
971        // this node.
972        unsigned LiveCycle = CurCycle;
973        for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
974          unsigned Reg = LRegs[j];
975          unsigned LCycle = LiveRegCycles[Reg];
976          LiveCycle = std::min(LiveCycle, LCycle);
977        }
978        SUnit *OldSU = Sequence[LiveCycle];
979        if (!WillCreateCycle(TrySU, OldSU))  {
980          BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
981          // Force the current node to be scheduled before the node that
982          // requires the physical reg dep.
983          if (OldSU->isAvailable) {
984            OldSU->isAvailable = false;
985            AvailableQueue->remove(OldSU);
986          }
987          AddPred(TrySU, OldSU, true, true);
988          // If one or more successors has been unscheduled, then the current
989          // node is no longer avaialable. Schedule a successor that's now
990          // available instead.
991          if (!TrySU->isAvailable)
992            CurSU = AvailableQueue->pop();
993          else {
994            CurSU = TrySU;
995            TrySU->isPending = false;
996            NotReady.erase(NotReady.begin()+i);
997          }
998          break;
999        }
1000      }
1001
1002      if (!CurSU) {
1003        // Can't backtrack. Try duplicating the nodes that produces these
1004        // "expensive to copy" values to break the dependency. In case even
1005        // that doesn't work, insert cross class copies.
1006        SUnit *TrySU = NotReady[0];
1007        SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1008        assert(LRegs.size() == 1 && "Can't handle this yet!");
1009        unsigned Reg = LRegs[0];
1010        SUnit *LRDef = LiveRegDefs[Reg];
1011        SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1012        if (!NewDef) {
1013          // Issue expensive cross register class copies.
1014          MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
1015          const TargetRegisterClass *RC =
1016            TRI->getPhysicalRegisterRegClass(Reg, VT);
1017          const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1018          if (!DestRC) {
1019            assert(false && "Don't know how to copy this physical register!");
1020            abort();
1021          }
1022          SmallVector<SUnit*, 2> Copies;
1023          InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1024          DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1025               << " to SU #" << Copies.front()->NodeNum << "\n";
1026          AddPred(TrySU, Copies.front(), true, true);
1027          NewDef = Copies.back();
1028        }
1029
1030        DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1031             << " to SU #" << TrySU->NodeNum << "\n";
1032        LiveRegDefs[Reg] = NewDef;
1033        AddPred(NewDef, TrySU, true, true);
1034        TrySU->isAvailable = false;
1035        CurSU = NewDef;
1036      }
1037
1038      if (!CurSU) {
1039        assert(false && "Unable to resolve live physical register dependencies!");
1040        abort();
1041      }
1042    }
1043
1044    // Add the nodes that aren't ready back onto the available list.
1045    for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1046      NotReady[i]->isPending = false;
1047      // May no longer be available due to backtracking.
1048      if (NotReady[i]->isAvailable)
1049        AvailableQueue->push(NotReady[i]);
1050    }
1051    NotReady.clear();
1052
1053    if (!CurSU)
1054      Sequence.push_back(0);
1055    else {
1056      ScheduleNodeBottomUp(CurSU, CurCycle);
1057      Sequence.push_back(CurSU);
1058    }
1059    ++CurCycle;
1060  }
1061
1062  // Reverse the order if it is bottom up.
1063  std::reverse(Sequence.begin(), Sequence.end());
1064
1065
1066#ifndef NDEBUG
1067  // Verify that all SUnits were scheduled.
1068  bool AnyNotSched = false;
1069  unsigned DeadNodes = 0;
1070  unsigned Noops = 0;
1071  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1072    if (!SUnits[i].isScheduled) {
1073      if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1074        ++DeadNodes;
1075        continue;
1076      }
1077      if (!AnyNotSched)
1078        cerr << "*** List scheduling failed! ***\n";
1079      SUnits[i].dump(&DAG);
1080      cerr << "has not been scheduled!\n";
1081      AnyNotSched = true;
1082    }
1083    if (SUnits[i].NumSuccsLeft != 0) {
1084      if (!AnyNotSched)
1085        cerr << "*** List scheduling failed! ***\n";
1086      SUnits[i].dump(&DAG);
1087      cerr << "has successors left!\n";
1088      AnyNotSched = true;
1089    }
1090  }
1091  for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1092    if (!Sequence[i])
1093      ++Noops;
1094  assert(!AnyNotSched);
1095  assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1096         "The number of nodes scheduled doesn't match the expected number!");
1097#endif
1098}
1099
1100//===----------------------------------------------------------------------===//
1101//  Top-Down Scheduling
1102//===----------------------------------------------------------------------===//
1103
1104/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1105/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1106void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
1107                                    unsigned CurCycle) {
1108  // FIXME: the distance between two nodes is not always == the predecessor's
1109  // latency. For example, the reader can very well read the register written
1110  // by the predecessor later than the issue cycle. It also depends on the
1111  // interrupt model (drain vs. freeze).
1112  SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
1113
1114  --SuccSU->NumPredsLeft;
1115
1116#ifndef NDEBUG
1117  if (SuccSU->NumPredsLeft < 0) {
1118    cerr << "*** List scheduling failed! ***\n";
1119    SuccSU->dump(&DAG);
1120    cerr << " has been released too many times!\n";
1121    assert(0);
1122  }
1123#endif
1124
1125  if (SuccSU->NumPredsLeft == 0) {
1126    SuccSU->isAvailable = true;
1127    AvailableQueue->push(SuccSU);
1128  }
1129}
1130
1131
1132/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1133/// count of its successors. If a successor pending count is zero, add it to
1134/// the Available queue.
1135void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1136  DOUT << "*** Scheduling [" << CurCycle << "]: ";
1137  DEBUG(SU->dump(&DAG));
1138  SU->Cycle = CurCycle;
1139
1140  AvailableQueue->ScheduledNode(SU);
1141
1142  // Top down: release successors
1143  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1144       I != E; ++I)
1145    ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
1146  SU->isScheduled = true;
1147}
1148
1149/// ListScheduleTopDown - The main loop of list scheduling for top-down
1150/// schedulers.
1151void ScheduleDAGRRList::ListScheduleTopDown() {
1152  unsigned CurCycle = 0;
1153
1154  // All leaves to Available queue.
1155  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1156    // It is available if it has no predecessors.
1157    if (SUnits[i].Preds.empty()) {
1158      AvailableQueue->push(&SUnits[i]);
1159      SUnits[i].isAvailable = true;
1160    }
1161  }
1162
1163  // While Available queue is not empty, grab the node with the highest
1164  // priority. If it is not ready put it back.  Schedule the node.
1165  std::vector<SUnit*> NotReady;
1166  Sequence.reserve(SUnits.size());
1167  while (!AvailableQueue->empty()) {
1168    SUnit *CurSU = AvailableQueue->pop();
1169    while (CurSU && CurSU->CycleBound > CurCycle) {
1170      NotReady.push_back(CurSU);
1171      CurSU = AvailableQueue->pop();
1172    }
1173
1174    // Add the nodes that aren't ready back onto the available list.
1175    AvailableQueue->push_all(NotReady);
1176    NotReady.clear();
1177
1178    if (!CurSU)
1179      Sequence.push_back(0);
1180    else {
1181      ScheduleNodeTopDown(CurSU, CurCycle);
1182      Sequence.push_back(CurSU);
1183    }
1184    ++CurCycle;
1185  }
1186
1187
1188#ifndef NDEBUG
1189  // Verify that all SUnits were scheduled.
1190  bool AnyNotSched = false;
1191  unsigned DeadNodes = 0;
1192  unsigned Noops = 0;
1193  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1194    if (!SUnits[i].isScheduled) {
1195      if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1196        ++DeadNodes;
1197        continue;
1198      }
1199      if (!AnyNotSched)
1200        cerr << "*** List scheduling failed! ***\n";
1201      SUnits[i].dump(&DAG);
1202      cerr << "has not been scheduled!\n";
1203      AnyNotSched = true;
1204    }
1205    if (SUnits[i].NumPredsLeft != 0) {
1206      if (!AnyNotSched)
1207        cerr << "*** List scheduling failed! ***\n";
1208      SUnits[i].dump(&DAG);
1209      cerr << "has predecessors left!\n";
1210      AnyNotSched = true;
1211    }
1212  }
1213  for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1214    if (!Sequence[i])
1215      ++Noops;
1216  assert(!AnyNotSched);
1217  assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1218         "The number of nodes scheduled doesn't match the expected number!");
1219#endif
1220}
1221
1222
1223
1224//===----------------------------------------------------------------------===//
1225//                RegReductionPriorityQueue Implementation
1226//===----------------------------------------------------------------------===//
1227//
1228// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1229// to reduce register pressure.
1230//
1231namespace {
1232  template<class SF>
1233  class RegReductionPriorityQueue;
1234
1235  /// Sorting functions for the Available queue.
1236  struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1237    RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1238    bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1239    bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1240
1241    bool operator()(const SUnit* left, const SUnit* right) const;
1242  };
1243
1244  struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
1245    RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
1246    bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
1247      : SPQ(spq) {}
1248    bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
1249
1250    bool operator()(const SUnit* left, const SUnit* right) const;
1251  };
1252
1253  struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1254    RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1255    td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1256    td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1257
1258    bool operator()(const SUnit* left, const SUnit* right) const;
1259  };
1260}  // end anonymous namespace
1261
1262static inline bool isCopyFromLiveIn(const SUnit *SU) {
1263  SDNode *N = SU->Node;
1264  return N && N->getOpcode() == ISD::CopyFromReg &&
1265    N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1266}
1267
1268/// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
1269/// scheduling. Smaller number is the higher priority.
1270static unsigned
1271CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1272  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1273  if (SethiUllmanNumber != 0)
1274    return SethiUllmanNumber;
1275
1276  unsigned Extra = 0;
1277  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1278       I != E; ++I) {
1279    if (I->isCtrl) continue;  // ignore chain preds
1280    SUnit *PredSU = I->Dep;
1281    unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
1282    if (PredSethiUllman > SethiUllmanNumber) {
1283      SethiUllmanNumber = PredSethiUllman;
1284      Extra = 0;
1285    } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1286      ++Extra;
1287  }
1288
1289  SethiUllmanNumber += Extra;
1290
1291  if (SethiUllmanNumber == 0)
1292    SethiUllmanNumber = 1;
1293
1294  return SethiUllmanNumber;
1295}
1296
1297/// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
1298/// scheduling. Smaller number is the higher priority.
1299static unsigned
1300CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1301  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1302  if (SethiUllmanNumber != 0)
1303    return SethiUllmanNumber;
1304
1305  unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1306  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1307    SethiUllmanNumber = 0xffff;
1308  else if (SU->NumSuccsLeft == 0)
1309    // If SU does not have a use, i.e. it doesn't produce a value that would
1310    // be consumed (e.g. store), then it terminates a chain of computation.
1311    // Give it a small SethiUllman number so it will be scheduled right before
1312    // its predecessors that it doesn't lengthen their live ranges.
1313    SethiUllmanNumber = 0;
1314  else if (SU->NumPredsLeft == 0 &&
1315           (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1316    SethiUllmanNumber = 0xffff;
1317  else {
1318    int Extra = 0;
1319    for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1320         I != E; ++I) {
1321      if (I->isCtrl) continue;  // ignore chain preds
1322      SUnit *PredSU = I->Dep;
1323      unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
1324      if (PredSethiUllman > SethiUllmanNumber) {
1325        SethiUllmanNumber = PredSethiUllman;
1326        Extra = 0;
1327      } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1328        ++Extra;
1329    }
1330
1331    SethiUllmanNumber += Extra;
1332  }
1333
1334  return SethiUllmanNumber;
1335}
1336
1337
1338namespace {
1339  template<class SF>
1340  class VISIBILITY_HIDDEN RegReductionPriorityQueue
1341   : public SchedulingPriorityQueue {
1342    PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
1343    unsigned currentQueueId;
1344
1345  public:
1346    RegReductionPriorityQueue() :
1347    Queue(SF(this)), currentQueueId(0) {}
1348
1349    virtual void initNodes(std::vector<SUnit> &sunits) {}
1350
1351    virtual void addNode(const SUnit *SU) {}
1352
1353    virtual void updateNode(const SUnit *SU) {}
1354
1355    virtual void releaseState() {}
1356
1357    virtual unsigned getNodePriority(const SUnit *SU) const {
1358      return 0;
1359    }
1360
1361    unsigned size() const { return Queue.size(); }
1362
1363    bool empty() const { return Queue.empty(); }
1364
1365    void push(SUnit *U) {
1366      assert(!U->NodeQueueId && "Node in the queue already");
1367      U->NodeQueueId = ++currentQueueId;
1368      Queue.push(U);
1369    }
1370
1371    void push_all(const std::vector<SUnit *> &Nodes) {
1372      for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1373        push(Nodes[i]);
1374    }
1375
1376    SUnit *pop() {
1377      if (empty()) return NULL;
1378      SUnit *V = Queue.top();
1379      Queue.pop();
1380      V->NodeQueueId = 0;
1381      return V;
1382    }
1383
1384    void remove(SUnit *SU) {
1385      assert(!Queue.empty() && "Queue is empty!");
1386      assert(SU->NodeQueueId != 0 && "Not in queue!");
1387      Queue.erase_one(SU);
1388      SU->NodeQueueId = 0;
1389    }
1390  };
1391
1392  class VISIBILITY_HIDDEN BURegReductionPriorityQueue
1393   : public RegReductionPriorityQueue<bu_ls_rr_sort> {
1394    // SUnits - The SUnits for the current graph.
1395    std::vector<SUnit> *SUnits;
1396
1397    // SethiUllmanNumbers - The SethiUllman number for each node.
1398    std::vector<unsigned> SethiUllmanNumbers;
1399
1400    const TargetInstrInfo *TII;
1401    const TargetRegisterInfo *TRI;
1402    ScheduleDAGRRList *scheduleDAG;
1403
1404  public:
1405    explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
1406                                         const TargetRegisterInfo *tri)
1407      : TII(tii), TRI(tri), scheduleDAG(NULL) {}
1408
1409    void initNodes(std::vector<SUnit> &sunits) {
1410      SUnits = &sunits;
1411      // Add pseudo dependency edges for two-address nodes.
1412      AddPseudoTwoAddrDeps();
1413      // Calculate node priorities.
1414      CalculateSethiUllmanNumbers();
1415    }
1416
1417    void addNode(const SUnit *SU) {
1418      unsigned SUSize = SethiUllmanNumbers.size();
1419      if (SUnits->size() > SUSize)
1420        SethiUllmanNumbers.resize(SUSize*2, 0);
1421      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1422    }
1423
1424    void updateNode(const SUnit *SU) {
1425      SethiUllmanNumbers[SU->NodeNum] = 0;
1426      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1427    }
1428
1429    void releaseState() {
1430      SUnits = 0;
1431      SethiUllmanNumbers.clear();
1432    }
1433
1434    unsigned getNodePriority(const SUnit *SU) const {
1435      assert(SU->NodeNum < SethiUllmanNumbers.size());
1436      unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1437      if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1438        // CopyFromReg should be close to its def because it restricts
1439        // allocation choices. But if it is a livein then perhaps we want it
1440        // closer to its uses so it can be coalesced.
1441        return 0xffff;
1442      else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1443        // CopyToReg should be close to its uses to facilitate coalescing and
1444        // avoid spilling.
1445        return 0;
1446      else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1447               Opc == TargetInstrInfo::INSERT_SUBREG)
1448        // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1449        // facilitate coalescing.
1450        return 0;
1451      else if (SU->NumSuccs == 0)
1452        // If SU does not have a use, i.e. it doesn't produce a value that would
1453        // be consumed (e.g. store), then it terminates a chain of computation.
1454        // Give it a large SethiUllman number so it will be scheduled right
1455        // before its predecessors that it doesn't lengthen their live ranges.
1456        return 0xffff;
1457      else if (SU->NumPreds == 0)
1458        // If SU does not have a def, schedule it close to its uses because it
1459        // does not lengthen any live ranges.
1460        return 0;
1461      else
1462        return SethiUllmanNumbers[SU->NodeNum];
1463    }
1464
1465    void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1466      scheduleDAG = scheduleDag;
1467    }
1468
1469  private:
1470    bool canClobber(const SUnit *SU, const SUnit *Op);
1471    void AddPseudoTwoAddrDeps();
1472    void CalculateSethiUllmanNumbers();
1473  };
1474
1475
1476  class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
1477   : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
1478    // SUnits - The SUnits for the current graph.
1479    const std::vector<SUnit> *SUnits;
1480
1481    // SethiUllmanNumbers - The SethiUllman number for each node.
1482    std::vector<unsigned> SethiUllmanNumbers;
1483  public:
1484    explicit BURegReductionFastPriorityQueue() {}
1485
1486    void initNodes(std::vector<SUnit> &sunits) {
1487      SUnits = &sunits;
1488      // Calculate node priorities.
1489      CalculateSethiUllmanNumbers();
1490    }
1491
1492    void addNode(const SUnit *SU) {
1493      unsigned SUSize = SethiUllmanNumbers.size();
1494      if (SUnits->size() > SUSize)
1495        SethiUllmanNumbers.resize(SUSize*2, 0);
1496      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1497    }
1498
1499    void updateNode(const SUnit *SU) {
1500      SethiUllmanNumbers[SU->NodeNum] = 0;
1501      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1502    }
1503
1504    void releaseState() {
1505      SUnits = 0;
1506      SethiUllmanNumbers.clear();
1507    }
1508
1509    unsigned getNodePriority(const SUnit *SU) const {
1510      return SethiUllmanNumbers[SU->NodeNum];
1511    }
1512
1513  private:
1514    void CalculateSethiUllmanNumbers();
1515  };
1516
1517
1518  class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1519   : public RegReductionPriorityQueue<td_ls_rr_sort> {
1520    // SUnits - The SUnits for the current graph.
1521    const std::vector<SUnit> *SUnits;
1522
1523    // SethiUllmanNumbers - The SethiUllman number for each node.
1524    std::vector<unsigned> SethiUllmanNumbers;
1525
1526  public:
1527    TDRegReductionPriorityQueue() {}
1528
1529    void initNodes(std::vector<SUnit> &sunits) {
1530      SUnits = &sunits;
1531      // Calculate node priorities.
1532      CalculateSethiUllmanNumbers();
1533    }
1534
1535    void addNode(const SUnit *SU) {
1536      unsigned SUSize = SethiUllmanNumbers.size();
1537      if (SUnits->size() > SUSize)
1538        SethiUllmanNumbers.resize(SUSize*2, 0);
1539      CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1540    }
1541
1542    void updateNode(const SUnit *SU) {
1543      SethiUllmanNumbers[SU->NodeNum] = 0;
1544      CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1545    }
1546
1547    void releaseState() {
1548      SUnits = 0;
1549      SethiUllmanNumbers.clear();
1550    }
1551
1552    unsigned getNodePriority(const SUnit *SU) const {
1553      assert(SU->NodeNum < SethiUllmanNumbers.size());
1554      return SethiUllmanNumbers[SU->NodeNum];
1555    }
1556
1557  private:
1558    void CalculateSethiUllmanNumbers();
1559  };
1560}
1561
1562/// closestSucc - Returns the scheduled cycle of the successor which is
1563/// closet to the current cycle.
1564static unsigned closestSucc(const SUnit *SU) {
1565  unsigned MaxCycle = 0;
1566  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1567       I != E; ++I) {
1568    unsigned Cycle = I->Dep->Cycle;
1569    // If there are bunch of CopyToRegs stacked up, they should be considered
1570    // to be at the same position.
1571    if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
1572      Cycle = closestSucc(I->Dep)+1;
1573    if (Cycle > MaxCycle)
1574      MaxCycle = Cycle;
1575  }
1576  return MaxCycle;
1577}
1578
1579/// calcMaxScratches - Returns an cost estimate of the worse case requirement
1580/// for scratch registers. Live-in operands and live-out results don't count
1581/// since they are "fixed".
1582static unsigned calcMaxScratches(const SUnit *SU) {
1583  unsigned Scratches = 0;
1584  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1585       I != E; ++I) {
1586    if (I->isCtrl) continue;  // ignore chain preds
1587    if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
1588      Scratches++;
1589  }
1590  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1591       I != E; ++I) {
1592    if (I->isCtrl) continue;  // ignore chain succs
1593    if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
1594      Scratches += 10;
1595  }
1596  return Scratches;
1597}
1598
1599// Bottom up
1600bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1601  unsigned LPriority = SPQ->getNodePriority(left);
1602  unsigned RPriority = SPQ->getNodePriority(right);
1603  if (LPriority != RPriority)
1604    return LPriority > RPriority;
1605
1606  // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1607  // e.g.
1608  // t1 = op t2, c1
1609  // t3 = op t4, c2
1610  //
1611  // and the following instructions are both ready.
1612  // t2 = op c3
1613  // t4 = op c4
1614  //
1615  // Then schedule t2 = op first.
1616  // i.e.
1617  // t4 = op c4
1618  // t2 = op c3
1619  // t1 = op t2, c1
1620  // t3 = op t4, c2
1621  //
1622  // This creates more short live intervals.
1623  unsigned LDist = closestSucc(left);
1624  unsigned RDist = closestSucc(right);
1625  if (LDist != RDist)
1626    return LDist < RDist;
1627
1628  // Intuitively, it's good to push down instructions whose results are
1629  // liveout so their long live ranges won't conflict with other values
1630  // which are needed inside the BB. Further prioritize liveout instructions
1631  // by the number of operands which are calculated within the BB.
1632  unsigned LScratch = calcMaxScratches(left);
1633  unsigned RScratch = calcMaxScratches(right);
1634  if (LScratch != RScratch)
1635    return LScratch > RScratch;
1636
1637  if (left->Height != right->Height)
1638    return left->Height > right->Height;
1639
1640  if (left->Depth != right->Depth)
1641    return left->Depth < right->Depth;
1642
1643  if (left->CycleBound != right->CycleBound)
1644    return left->CycleBound > right->CycleBound;
1645
1646  assert(left->NodeQueueId && right->NodeQueueId &&
1647         "NodeQueueId cannot be zero");
1648  return (left->NodeQueueId > right->NodeQueueId);
1649}
1650
1651bool
1652bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
1653  unsigned LPriority = SPQ->getNodePriority(left);
1654  unsigned RPriority = SPQ->getNodePriority(right);
1655  if (LPriority != RPriority)
1656    return LPriority > RPriority;
1657  assert(left->NodeQueueId && right->NodeQueueId &&
1658         "NodeQueueId cannot be zero");
1659  return (left->NodeQueueId > right->NodeQueueId);
1660}
1661
1662bool
1663BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
1664  if (SU->isTwoAddress) {
1665    unsigned Opc = SU->Node->getMachineOpcode();
1666    const TargetInstrDesc &TID = TII->get(Opc);
1667    unsigned NumRes = TID.getNumDefs();
1668    unsigned NumOps = TID.getNumOperands() - NumRes;
1669    for (unsigned i = 0; i != NumOps; ++i) {
1670      if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1671        SDNode *DU = SU->Node->getOperand(i).getNode();
1672        if (DU->getNodeId() != -1 &&
1673            Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1674          return true;
1675      }
1676    }
1677  }
1678  return false;
1679}
1680
1681
1682/// hasCopyToRegUse - Return true if SU has a value successor that is a
1683/// CopyToReg node.
1684static bool hasCopyToRegUse(const SUnit *SU) {
1685  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1686       I != E; ++I) {
1687    if (I->isCtrl) continue;
1688    const SUnit *SuccSU = I->Dep;
1689    if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
1690      return true;
1691  }
1692  return false;
1693}
1694
1695/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1696/// physical register defs.
1697static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1698                                  const TargetInstrInfo *TII,
1699                                  const TargetRegisterInfo *TRI) {
1700  SDNode *N = SuccSU->Node;
1701  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1702  const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1703  assert(ImpDefs && "Caller should check hasPhysRegDefs");
1704  const unsigned *SUImpDefs =
1705    TII->get(SU->Node->getMachineOpcode()).getImplicitDefs();
1706  if (!SUImpDefs)
1707    return false;
1708  for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1709    MVT VT = N->getValueType(i);
1710    if (VT == MVT::Flag || VT == MVT::Other)
1711      continue;
1712    if (!N->hasAnyUseOfValue(i))
1713      continue;
1714    unsigned Reg = ImpDefs[i - NumDefs];
1715    for (;*SUImpDefs; ++SUImpDefs) {
1716      unsigned SUReg = *SUImpDefs;
1717      if (TRI->regsOverlap(Reg, SUReg))
1718        return true;
1719    }
1720  }
1721  return false;
1722}
1723
1724/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1725/// it as a def&use operand. Add a pseudo control edge from it to the other
1726/// node (if it won't create a cycle) so the two-address one will be scheduled
1727/// first (lower in the schedule). If both nodes are two-address, favor the
1728/// one that has a CopyToReg use (more likely to be a loop induction update).
1729/// If both are two-address, but one is commutable while the other is not
1730/// commutable, favor the one that's not commutable.
1731void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
1732  for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1733    SUnit *SU = &(*SUnits)[i];
1734    if (!SU->isTwoAddress)
1735      continue;
1736
1737    SDNode *Node = SU->Node;
1738    if (!Node || !Node->isMachineOpcode() || SU->FlaggedNodes.size() > 0)
1739      continue;
1740
1741    unsigned Opc = Node->getMachineOpcode();
1742    const TargetInstrDesc &TID = TII->get(Opc);
1743    unsigned NumRes = TID.getNumDefs();
1744    unsigned NumOps = TID.getNumOperands() - NumRes;
1745    for (unsigned j = 0; j != NumOps; ++j) {
1746      if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
1747        SDNode *DU = SU->Node->getOperand(j).getNode();
1748        if (DU->getNodeId() == -1)
1749          continue;
1750        const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1751        if (!DUSU) continue;
1752        for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1753             E = DUSU->Succs.end(); I != E; ++I) {
1754          if (I->isCtrl) continue;
1755          SUnit *SuccSU = I->Dep;
1756          if (SuccSU == SU)
1757            continue;
1758          // Be conservative. Ignore if nodes aren't at roughly the same
1759          // depth and height.
1760          if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1761            continue;
1762          if (!SuccSU->Node || !SuccSU->Node->isMachineOpcode())
1763            continue;
1764          // Don't constrain nodes with physical register defs if the
1765          // predecessor can clobber them.
1766          if (SuccSU->hasPhysRegDefs) {
1767            if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1768              continue;
1769          }
1770          // Don't constraint extract_subreg / insert_subreg these may be
1771          // coalesced away. We don't them close to their uses.
1772          unsigned SuccOpc = SuccSU->Node->getMachineOpcode();
1773          if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1774              SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1775            continue;
1776          if ((!canClobber(SuccSU, DUSU) ||
1777               (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1778               (!SU->isCommutable && SuccSU->isCommutable)) &&
1779              !scheduleDAG->IsReachable(SuccSU, SU)) {
1780            DOUT << "Adding an edge from SU # " << SU->NodeNum
1781                 << " to SU #" << SuccSU->NodeNum << "\n";
1782            scheduleDAG->AddPred(SU, SuccSU, true, true);
1783          }
1784        }
1785      }
1786    }
1787  }
1788}
1789
1790/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1791/// scheduling units.
1792void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1793  SethiUllmanNumbers.assign(SUnits->size(), 0);
1794
1795  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1796    CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1797}
1798void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
1799  SethiUllmanNumbers.assign(SUnits->size(), 0);
1800
1801  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1802    CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1803}
1804
1805/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1806/// predecessors of the successors of the SUnit SU. Stop when the provided
1807/// limit is exceeded.
1808static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1809                                                    unsigned Limit) {
1810  unsigned Sum = 0;
1811  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1812       I != E; ++I) {
1813    const SUnit *SuccSU = I->Dep;
1814    for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1815         EE = SuccSU->Preds.end(); II != EE; ++II) {
1816      SUnit *PredSU = II->Dep;
1817      if (!PredSU->isScheduled)
1818        if (++Sum > Limit)
1819          return Sum;
1820    }
1821  }
1822  return Sum;
1823}
1824
1825
1826// Top down
1827bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1828  unsigned LPriority = SPQ->getNodePriority(left);
1829  unsigned RPriority = SPQ->getNodePriority(right);
1830  bool LIsTarget = left->Node && left->Node->isMachineOpcode();
1831  bool RIsTarget = right->Node && right->Node->isMachineOpcode();
1832  bool LIsFloater = LIsTarget && left->NumPreds == 0;
1833  bool RIsFloater = RIsTarget && right->NumPreds == 0;
1834  unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1835  unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1836
1837  if (left->NumSuccs == 0 && right->NumSuccs != 0)
1838    return false;
1839  else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1840    return true;
1841
1842  if (LIsFloater)
1843    LBonus -= 2;
1844  if (RIsFloater)
1845    RBonus -= 2;
1846  if (left->NumSuccs == 1)
1847    LBonus += 2;
1848  if (right->NumSuccs == 1)
1849    RBonus += 2;
1850
1851  if (LPriority+LBonus != RPriority+RBonus)
1852    return LPriority+LBonus < RPriority+RBonus;
1853
1854  if (left->Depth != right->Depth)
1855    return left->Depth < right->Depth;
1856
1857  if (left->NumSuccsLeft != right->NumSuccsLeft)
1858    return left->NumSuccsLeft > right->NumSuccsLeft;
1859
1860  if (left->CycleBound != right->CycleBound)
1861    return left->CycleBound > right->CycleBound;
1862
1863  assert(left->NodeQueueId && right->NodeQueueId &&
1864         "NodeQueueId cannot be zero");
1865  return (left->NodeQueueId > right->NodeQueueId);
1866}
1867
1868/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1869/// scheduling units.
1870void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1871  SethiUllmanNumbers.assign(SUnits->size(), 0);
1872
1873  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1874    CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1875}
1876
1877//===----------------------------------------------------------------------===//
1878//                         Public Constructor Functions
1879//===----------------------------------------------------------------------===//
1880
1881llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1882                                                    SelectionDAG *DAG,
1883                                                    MachineBasicBlock *BB,
1884                                                    bool Fast) {
1885  if (Fast)
1886    return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, true,
1887                                 new BURegReductionFastPriorityQueue());
1888
1889  const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
1890  const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
1891
1892  BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1893
1894  ScheduleDAGRRList *SD =
1895    new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(),true,false, PQ);
1896  PQ->setScheduleDAG(SD);
1897  return SD;
1898}
1899
1900llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1901                                                    SelectionDAG *DAG,
1902                                                    MachineBasicBlock *BB,
1903                                                    bool Fast) {
1904  return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast,
1905                               new TDRegReductionPriorityQueue());
1906}
1907