ScheduleDAGRRList.cpp revision dd21ce80415edf09fa66a0bfebc812ce682409bf
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms.  The basic approach uses a priority
12// queue of available nodes to schedule.  One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "pre-RA-sched"
19#include "ScheduleDAGSDNodes.h"
20#include "llvm/InlineAsm.h"
21#include "llvm/CodeGen/SchedulerRegistry.h"
22#include "llvm/CodeGen/SelectionDAGISel.h"
23#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24#include "llvm/Target/TargetRegisterInfo.h"
25#include "llvm/Target/TargetData.h"
26#include "llvm/Target/TargetMachine.h"
27#include "llvm/Target/TargetInstrInfo.h"
28#include "llvm/Target/TargetLowering.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/Statistic.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include <climits>
36using namespace llvm;
37
38STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39STATISTIC(NumUnfolds,    "Number of nodes unfolded");
40STATISTIC(NumDups,       "Number of duplicated nodes");
41STATISTIC(NumPRCopies,   "Number of physical register copies");
42
43static RegisterScheduler
44  burrListDAGScheduler("list-burr",
45                       "Bottom-up register reduction list scheduling",
46                       createBURRListDAGScheduler);
47static RegisterScheduler
48  sourceListDAGScheduler("source",
49                         "Similar to list-burr but schedules in source "
50                         "order when possible",
51                         createSourceListDAGScheduler);
52
53static RegisterScheduler
54  hybridListDAGScheduler("list-hybrid",
55                         "Bottom-up register pressure aware list scheduling "
56                         "which tries to balance latency and register pressure",
57                         createHybridListDAGScheduler);
58
59static RegisterScheduler
60  ILPListDAGScheduler("list-ilp",
61                      "Bottom-up register pressure aware list scheduling "
62                      "which tries to balance ILP and register pressure",
63                      createILPListDAGScheduler);
64
65static cl::opt<bool> DisableSchedCycles(
66  "disable-sched-cycles", cl::Hidden, cl::init(false),
67  cl::desc("Disable cycle-level precision during preRA scheduling"));
68
69// Temporary sched=list-ilp flags until the heuristics are robust.
70// Some options are also available under sched=list-hybrid.
71static cl::opt<bool> DisableSchedRegPressure(
72  "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
73  cl::desc("Disable regpressure priority in sched=list-ilp"));
74static cl::opt<bool> DisableSchedLiveUses(
75  "disable-sched-live-uses", cl::Hidden, cl::init(true),
76  cl::desc("Disable live use priority in sched=list-ilp"));
77static cl::opt<bool> DisableSchedVRegCycle(
78  "disable-sched-vrcycle", cl::Hidden, cl::init(false),
79  cl::desc("Disable virtual register cycle interference checks"));
80static cl::opt<bool> DisableSchedPhysRegJoin(
81  "disable-sched-physreg-join", cl::Hidden, cl::init(false),
82  cl::desc("Disable physreg def-use affinity"));
83static cl::opt<bool> DisableSchedStalls(
84  "disable-sched-stalls", cl::Hidden, cl::init(true),
85  cl::desc("Disable no-stall priority in sched=list-ilp"));
86static cl::opt<bool> DisableSchedCriticalPath(
87  "disable-sched-critical-path", cl::Hidden, cl::init(false),
88  cl::desc("Disable critical path priority in sched=list-ilp"));
89static cl::opt<bool> DisableSchedHeight(
90  "disable-sched-height", cl::Hidden, cl::init(false),
91  cl::desc("Disable scheduled-height priority in sched=list-ilp"));
92
93static cl::opt<int> MaxReorderWindow(
94  "max-sched-reorder", cl::Hidden, cl::init(6),
95  cl::desc("Number of instructions to allow ahead of the critical path "
96           "in sched=list-ilp"));
97
98static cl::opt<unsigned> AvgIPC(
99  "sched-avg-ipc", cl::Hidden, cl::init(1),
100  cl::desc("Average inst/cycle whan no target itinerary exists."));
101
102#ifndef NDEBUG
103namespace {
104  // For sched=list-ilp, Count the number of times each factor comes into play.
105  enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
106         FactStatic, FactOther, NumFactors };
107}
108static const char *FactorName[NumFactors] =
109{"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
110static int FactorCount[NumFactors];
111#endif //!NDEBUG
112
113namespace {
114//===----------------------------------------------------------------------===//
115/// ScheduleDAGRRList - The actual register reduction list scheduler
116/// implementation.  This supports both top-down and bottom-up scheduling.
117///
118class ScheduleDAGRRList : public ScheduleDAGSDNodes {
119private:
120  /// NeedLatency - True if the scheduler will make use of latency information.
121  ///
122  bool NeedLatency;
123
124  /// AvailableQueue - The priority queue to use for the available SUnits.
125  SchedulingPriorityQueue *AvailableQueue;
126
127  /// PendingQueue - This contains all of the instructions whose operands have
128  /// been issued, but their results are not ready yet (due to the latency of
129  /// the operation).  Once the operands becomes available, the instruction is
130  /// added to the AvailableQueue.
131  std::vector<SUnit*> PendingQueue;
132
133  /// HazardRec - The hazard recognizer to use.
134  ScheduleHazardRecognizer *HazardRec;
135
136  /// CurCycle - The current scheduler state corresponds to this cycle.
137  unsigned CurCycle;
138
139  /// MinAvailableCycle - Cycle of the soonest available instruction.
140  unsigned MinAvailableCycle;
141
142  /// IssueCount - Count instructions issued in this cycle
143  /// Currently valid only for bottom-up scheduling.
144  unsigned IssueCount;
145
146  /// LiveRegDefs - A set of physical registers and their definition
147  /// that are "live". These nodes must be scheduled before any other nodes that
148  /// modifies the registers can be scheduled.
149  unsigned NumLiveRegs;
150  std::vector<SUnit*> LiveRegDefs;
151  std::vector<SUnit*> LiveRegGens;
152
153  /// Topo - A topological ordering for SUnits which permits fast IsReachable
154  /// and similar queries.
155  ScheduleDAGTopologicalSort Topo;
156
157public:
158  ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
159                    SchedulingPriorityQueue *availqueue,
160                    CodeGenOpt::Level OptLevel)
161    : ScheduleDAGSDNodes(mf),
162      NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
163      Topo(SUnits) {
164
165    const TargetMachine &tm = mf.getTarget();
166    if (DisableSchedCycles || !NeedLatency)
167      HazardRec = new ScheduleHazardRecognizer();
168    else
169      HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
170  }
171
172  ~ScheduleDAGRRList() {
173    delete HazardRec;
174    delete AvailableQueue;
175  }
176
177  void Schedule();
178
179  ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
180
181  /// IsReachable - Checks if SU is reachable from TargetSU.
182  bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
183    return Topo.IsReachable(SU, TargetSU);
184  }
185
186  /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
187  /// create a cycle.
188  bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
189    return Topo.WillCreateCycle(SU, TargetSU);
190  }
191
192  /// AddPred - adds a predecessor edge to SUnit SU.
193  /// This returns true if this is a new predecessor.
194  /// Updates the topological ordering if required.
195  void AddPred(SUnit *SU, const SDep &D) {
196    Topo.AddPred(SU, D.getSUnit());
197    SU->addPred(D);
198  }
199
200  /// RemovePred - removes a predecessor edge from SUnit SU.
201  /// This returns true if an edge was removed.
202  /// Updates the topological ordering if required.
203  void RemovePred(SUnit *SU, const SDep &D) {
204    Topo.RemovePred(SU, D.getSUnit());
205    SU->removePred(D);
206  }
207
208private:
209  bool isReady(SUnit *SU) {
210    return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
211      AvailableQueue->isReady(SU);
212  }
213
214  void ReleasePred(SUnit *SU, const SDep *PredEdge);
215  void ReleasePredecessors(SUnit *SU);
216  void ReleasePending();
217  void AdvanceToCycle(unsigned NextCycle);
218  void AdvancePastStalls(SUnit *SU);
219  void EmitNode(SUnit *SU);
220  void ScheduleNodeBottomUp(SUnit*);
221  void CapturePred(SDep *PredEdge);
222  void UnscheduleNodeBottomUp(SUnit*);
223  void RestoreHazardCheckerBottomUp();
224  void BacktrackBottomUp(SUnit*, SUnit*);
225  SUnit *CopyAndMoveSuccessors(SUnit*);
226  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
227                                const TargetRegisterClass*,
228                                const TargetRegisterClass*,
229                                SmallVector<SUnit*, 2>&);
230  bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
231
232  SUnit *PickNodeToScheduleBottomUp();
233  void ListScheduleBottomUp();
234
235  /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
236  /// Updates the topological ordering if required.
237  SUnit *CreateNewSUnit(SDNode *N) {
238    unsigned NumSUnits = SUnits.size();
239    SUnit *NewNode = NewSUnit(N);
240    // Update the topological ordering.
241    if (NewNode->NodeNum >= NumSUnits)
242      Topo.InitDAGTopologicalSorting();
243    return NewNode;
244  }
245
246  /// CreateClone - Creates a new SUnit from an existing one.
247  /// Updates the topological ordering if required.
248  SUnit *CreateClone(SUnit *N) {
249    unsigned NumSUnits = SUnits.size();
250    SUnit *NewNode = Clone(N);
251    // Update the topological ordering.
252    if (NewNode->NodeNum >= NumSUnits)
253      Topo.InitDAGTopologicalSorting();
254    return NewNode;
255  }
256
257  /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
258  /// need actual latency information but the hybrid scheduler does.
259  bool ForceUnitLatencies() const {
260    return !NeedLatency;
261  }
262};
263}  // end anonymous namespace
264
265/// GetCostForDef - Looks up the register class and cost for a given definition.
266/// Typically this just means looking up the representative register class,
267/// but for untyped values (MVT::untyped) it means inspecting the node's
268/// opcode to determine what register class is being generated.
269static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
270                          const TargetLowering *TLI,
271                          const TargetInstrInfo *TII,
272                          const TargetRegisterInfo *TRI,
273                          unsigned &RegClass, unsigned &Cost) {
274  EVT VT = RegDefPos.GetValue();
275
276  // Special handling for untyped values.  These values can only come from
277  // the expansion of custom DAG-to-DAG patterns.
278  if (VT == MVT::untyped) {
279    const SDNode *Node = RegDefPos.GetNode();
280    unsigned Opcode = Node->getMachineOpcode();
281
282    if (Opcode == TargetOpcode::REG_SEQUENCE) {
283      unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
284      const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
285      RegClass = RC->getID();
286      Cost = 1;
287      return;
288    }
289
290    unsigned Idx = RegDefPos.GetIdx();
291    const MCInstrDesc Desc = TII->get(Opcode);
292    const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
293    RegClass = RC->getID();
294    // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
295    // better way to determine it.
296    Cost = 1;
297  } else {
298    RegClass = TLI->getRepRegClassFor(VT)->getID();
299    Cost = TLI->getRepRegClassCostFor(VT);
300  }
301}
302
303/// Schedule - Schedule the DAG using list scheduling.
304void ScheduleDAGRRList::Schedule() {
305  DEBUG(dbgs()
306        << "********** List Scheduling BB#" << BB->getNumber()
307        << " '" << BB->getName() << "' **********\n");
308#ifndef NDEBUG
309  for (int i = 0; i < NumFactors; ++i) {
310    FactorCount[i] = 0;
311  }
312#endif //!NDEBUG
313
314  CurCycle = 0;
315  IssueCount = 0;
316  MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
317  NumLiveRegs = 0;
318  // Allocate slots for each physical register, plus one for a special register
319  // to track the virtual resource of a calling sequence.
320  LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
321  LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
322
323  // Build the scheduling graph.
324  BuildSchedGraph(NULL);
325
326  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
327          SUnits[su].dumpAll(this));
328  Topo.InitDAGTopologicalSorting();
329
330  AvailableQueue->initNodes(SUnits);
331
332  HazardRec->Reset();
333
334  // Execute the actual scheduling loop.
335  ListScheduleBottomUp();
336
337#ifndef NDEBUG
338  for (int i = 0; i < NumFactors; ++i) {
339    DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
340  }
341#endif // !NDEBUG
342  AvailableQueue->releaseState();
343}
344
345//===----------------------------------------------------------------------===//
346//  Bottom-Up Scheduling
347//===----------------------------------------------------------------------===//
348
349/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
350/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
351void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
352  SUnit *PredSU = PredEdge->getSUnit();
353
354#ifndef NDEBUG
355  if (PredSU->NumSuccsLeft == 0) {
356    dbgs() << "*** Scheduling failed! ***\n";
357    PredSU->dump(this);
358    dbgs() << " has been released too many times!\n";
359    llvm_unreachable(0);
360  }
361#endif
362  --PredSU->NumSuccsLeft;
363
364  if (!ForceUnitLatencies()) {
365    // Updating predecessor's height. This is now the cycle when the
366    // predecessor can be scheduled without causing a pipeline stall.
367    PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
368  }
369
370  // If all the node's successors are scheduled, this node is ready
371  // to be scheduled. Ignore the special EntrySU node.
372  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
373    PredSU->isAvailable = true;
374
375    unsigned Height = PredSU->getHeight();
376    if (Height < MinAvailableCycle)
377      MinAvailableCycle = Height;
378
379    if (isReady(PredSU)) {
380      AvailableQueue->push(PredSU);
381    }
382    // CapturePred and others may have left the node in the pending queue, avoid
383    // adding it twice.
384    else if (!PredSU->isPending) {
385      PredSU->isPending = true;
386      PendingQueue.push_back(PredSU);
387    }
388  }
389}
390
391/// IsChainDependent - Test if Outer is reachable from Inner through
392/// chain dependencies.
393static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
394                             unsigned NestLevel,
395                             const TargetInstrInfo *TII) {
396  SDNode *N = Outer;
397  for (;;) {
398    if (N == Inner)
399      return true;
400    // For a TokenFactor, examine each operand. There may be multiple ways
401    // to get to the CALLSEQ_BEGIN, but we need to find the path with the
402    // most nesting in order to ensure that we find the corresponding match.
403    if (N->getOpcode() == ISD::TokenFactor) {
404      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
405        if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
406          return true;
407      return false;
408    }
409    // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
410    if (N->isMachineOpcode()) {
411      if (N->getMachineOpcode() ==
412          (unsigned)TII->getCallFrameDestroyOpcode()) {
413        ++NestLevel;
414      } else if (N->getMachineOpcode() ==
415                 (unsigned)TII->getCallFrameSetupOpcode()) {
416        if (NestLevel == 0)
417          return false;
418        --NestLevel;
419      }
420    }
421    // Otherwise, find the chain and continue climbing.
422    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
423      if (N->getOperand(i).getValueType() == MVT::Other) {
424        N = N->getOperand(i).getNode();
425        goto found_chain_operand;
426      }
427    return false;
428  found_chain_operand:;
429    if (N->getOpcode() == ISD::EntryToken)
430      return false;
431  }
432}
433
434/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
435/// the corresponding (lowered) CALLSEQ_BEGIN node.
436///
437/// NestLevel and MaxNested are used in recursion to indcate the current level
438/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
439/// level seen so far.
440///
441/// TODO: It would be better to give CALLSEQ_END an explicit operand to point
442/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
443static SDNode *
444FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
445                 const TargetInstrInfo *TII) {
446  for (;;) {
447    // For a TokenFactor, examine each operand. There may be multiple ways
448    // to get to the CALLSEQ_BEGIN, but we need to find the path with the
449    // most nesting in order to ensure that we find the corresponding match.
450    if (N->getOpcode() == ISD::TokenFactor) {
451      SDNode *Best = 0;
452      unsigned BestMaxNest = MaxNest;
453      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
454        unsigned MyNestLevel = NestLevel;
455        unsigned MyMaxNest = MaxNest;
456        if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
457                                           MyNestLevel, MyMaxNest, TII))
458          if (!Best || (MyMaxNest > BestMaxNest)) {
459            Best = New;
460            BestMaxNest = MyMaxNest;
461          }
462      }
463      assert(Best);
464      MaxNest = BestMaxNest;
465      return Best;
466    }
467    // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
468    if (N->isMachineOpcode()) {
469      if (N->getMachineOpcode() ==
470          (unsigned)TII->getCallFrameDestroyOpcode()) {
471        ++NestLevel;
472        MaxNest = std::max(MaxNest, NestLevel);
473      } else if (N->getMachineOpcode() ==
474                 (unsigned)TII->getCallFrameSetupOpcode()) {
475        assert(NestLevel != 0);
476        --NestLevel;
477        if (NestLevel == 0)
478          return N;
479      }
480    }
481    // Otherwise, find the chain and continue climbing.
482    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
483      if (N->getOperand(i).getValueType() == MVT::Other) {
484        N = N->getOperand(i).getNode();
485        goto found_chain_operand;
486      }
487    return 0;
488  found_chain_operand:;
489    if (N->getOpcode() == ISD::EntryToken)
490      return 0;
491  }
492}
493
494/// Call ReleasePred for each predecessor, then update register live def/gen.
495/// Always update LiveRegDefs for a register dependence even if the current SU
496/// also defines the register. This effectively create one large live range
497/// across a sequence of two-address node. This is important because the
498/// entire chain must be scheduled together. Example:
499///
500/// flags = (3) add
501/// flags = (2) addc flags
502/// flags = (1) addc flags
503///
504/// results in
505///
506/// LiveRegDefs[flags] = 3
507/// LiveRegGens[flags] = 1
508///
509/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
510/// interference on flags.
511void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
512  // Bottom up: release predecessors
513  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
514       I != E; ++I) {
515    ReleasePred(SU, &*I);
516    if (I->isAssignedRegDep()) {
517      // This is a physical register dependency and it's impossible or
518      // expensive to copy the register. Make sure nothing that can
519      // clobber the register is scheduled between the predecessor and
520      // this node.
521      SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
522      assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
523             "interference on register dependence");
524      LiveRegDefs[I->getReg()] = I->getSUnit();
525      if (!LiveRegGens[I->getReg()]) {
526        ++NumLiveRegs;
527        LiveRegGens[I->getReg()] = SU;
528      }
529    }
530  }
531
532  // If we're scheduling a lowered CALLSEQ_END, find the corresponding
533  // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
534  // these nodes, to prevent other calls from being interscheduled with them.
535  unsigned CallResource = TRI->getNumRegs();
536  if (!LiveRegDefs[CallResource])
537    for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
538      if (Node->isMachineOpcode() &&
539          Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
540        unsigned NestLevel = 0;
541        unsigned MaxNest = 0;
542        SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
543
544        SUnit *Def = &SUnits[N->getNodeId()];
545        ++NumLiveRegs;
546        LiveRegDefs[CallResource] = Def;
547        LiveRegGens[CallResource] = SU;
548        break;
549      }
550}
551
552/// Check to see if any of the pending instructions are ready to issue.  If
553/// so, add them to the available queue.
554void ScheduleDAGRRList::ReleasePending() {
555  if (DisableSchedCycles) {
556    assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
557    return;
558  }
559
560  // If the available queue is empty, it is safe to reset MinAvailableCycle.
561  if (AvailableQueue->empty())
562    MinAvailableCycle = UINT_MAX;
563
564  // Check to see if any of the pending instructions are ready to issue.  If
565  // so, add them to the available queue.
566  for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
567    unsigned ReadyCycle = PendingQueue[i]->getHeight();
568    if (ReadyCycle < MinAvailableCycle)
569      MinAvailableCycle = ReadyCycle;
570
571    if (PendingQueue[i]->isAvailable) {
572      if (!isReady(PendingQueue[i]))
573          continue;
574      AvailableQueue->push(PendingQueue[i]);
575    }
576    PendingQueue[i]->isPending = false;
577    PendingQueue[i] = PendingQueue.back();
578    PendingQueue.pop_back();
579    --i; --e;
580  }
581}
582
583/// Move the scheduler state forward by the specified number of Cycles.
584void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
585  if (NextCycle <= CurCycle)
586    return;
587
588  IssueCount = 0;
589  AvailableQueue->setCurCycle(NextCycle);
590  if (!HazardRec->isEnabled()) {
591    // Bypass lots of virtual calls in case of long latency.
592    CurCycle = NextCycle;
593  }
594  else {
595    for (; CurCycle != NextCycle; ++CurCycle) {
596      HazardRec->RecedeCycle();
597    }
598  }
599  // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
600  // available Q to release pending nodes at least once before popping.
601  ReleasePending();
602}
603
604/// Move the scheduler state forward until the specified node's dependents are
605/// ready and can be scheduled with no resource conflicts.
606void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
607  if (DisableSchedCycles)
608    return;
609
610  // FIXME: Nodes such as CopyFromReg probably should not advance the current
611  // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
612  // has predecessors the cycle will be advanced when they are scheduled.
613  // But given the crude nature of modeling latency though such nodes, we
614  // currently need to treat these nodes like real instructions.
615  // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
616
617  unsigned ReadyCycle = SU->getHeight();
618
619  // Bump CurCycle to account for latency. We assume the latency of other
620  // available instructions may be hidden by the stall (not a full pipe stall).
621  // This updates the hazard recognizer's cycle before reserving resources for
622  // this instruction.
623  AdvanceToCycle(ReadyCycle);
624
625  // Calls are scheduled in their preceding cycle, so don't conflict with
626  // hazards from instructions after the call. EmitNode will reset the
627  // scoreboard state before emitting the call.
628  if (SU->isCall)
629    return;
630
631  // FIXME: For resource conflicts in very long non-pipelined stages, we
632  // should probably skip ahead here to avoid useless scoreboard checks.
633  int Stalls = 0;
634  while (true) {
635    ScheduleHazardRecognizer::HazardType HT =
636      HazardRec->getHazardType(SU, -Stalls);
637
638    if (HT == ScheduleHazardRecognizer::NoHazard)
639      break;
640
641    ++Stalls;
642  }
643  AdvanceToCycle(CurCycle + Stalls);
644}
645
646/// Record this SUnit in the HazardRecognizer.
647/// Does not update CurCycle.
648void ScheduleDAGRRList::EmitNode(SUnit *SU) {
649  if (!HazardRec->isEnabled())
650    return;
651
652  // Check for phys reg copy.
653  if (!SU->getNode())
654    return;
655
656  switch (SU->getNode()->getOpcode()) {
657  default:
658    assert(SU->getNode()->isMachineOpcode() &&
659           "This target-independent node should not be scheduled.");
660    break;
661  case ISD::MERGE_VALUES:
662  case ISD::TokenFactor:
663  case ISD::CopyToReg:
664  case ISD::CopyFromReg:
665  case ISD::EH_LABEL:
666    // Noops don't affect the scoreboard state. Copies are likely to be
667    // removed.
668    return;
669  case ISD::INLINEASM:
670    // For inline asm, clear the pipeline state.
671    HazardRec->Reset();
672    return;
673  }
674  if (SU->isCall) {
675    // Calls are scheduled with their preceding instructions. For bottom-up
676    // scheduling, clear the pipeline state before emitting.
677    HazardRec->Reset();
678  }
679
680  HazardRec->EmitInstruction(SU);
681}
682
683static void resetVRegCycle(SUnit *SU);
684
685/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
686/// count of its predecessors. If a predecessor pending count is zero, add it to
687/// the Available queue.
688void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
689  DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
690  DEBUG(SU->dump(this));
691
692#ifndef NDEBUG
693  if (CurCycle < SU->getHeight())
694    DEBUG(dbgs() << "   Height [" << SU->getHeight()
695          << "] pipeline stall!\n");
696#endif
697
698  // FIXME: Do not modify node height. It may interfere with
699  // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
700  // node its ready cycle can aid heuristics, and after scheduling it can
701  // indicate the scheduled cycle.
702  SU->setHeightToAtLeast(CurCycle);
703
704  // Reserve resources for the scheduled intruction.
705  EmitNode(SU);
706
707  Sequence.push_back(SU);
708
709  AvailableQueue->ScheduledNode(SU);
710
711  // If HazardRec is disabled, and each inst counts as one cycle, then
712  // advance CurCycle before ReleasePredecessors to avoid useless pushes to
713  // PendingQueue for schedulers that implement HasReadyFilter.
714  if (!HazardRec->isEnabled() && AvgIPC < 2)
715    AdvanceToCycle(CurCycle + 1);
716
717  // Update liveness of predecessors before successors to avoid treating a
718  // two-address node as a live range def.
719  ReleasePredecessors(SU);
720
721  // Release all the implicit physical register defs that are live.
722  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
723       I != E; ++I) {
724    // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
725    if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
726      assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
727      --NumLiveRegs;
728      LiveRegDefs[I->getReg()] = NULL;
729      LiveRegGens[I->getReg()] = NULL;
730    }
731  }
732  // Release the special call resource dependence, if this is the beginning
733  // of a call.
734  unsigned CallResource = TRI->getNumRegs();
735  if (LiveRegDefs[CallResource] == SU)
736    for (const SDNode *SUNode = SU->getNode(); SUNode;
737         SUNode = SUNode->getGluedNode()) {
738      if (SUNode->isMachineOpcode() &&
739          SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
740        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
741        --NumLiveRegs;
742        LiveRegDefs[CallResource] = NULL;
743        LiveRegGens[CallResource] = NULL;
744      }
745    }
746
747  resetVRegCycle(SU);
748
749  SU->isScheduled = true;
750
751  // Conditions under which the scheduler should eagerly advance the cycle:
752  // (1) No available instructions
753  // (2) All pipelines full, so available instructions must have hazards.
754  //
755  // If HazardRec is disabled, the cycle was pre-advanced before calling
756  // ReleasePredecessors. In that case, IssueCount should remain 0.
757  //
758  // Check AvailableQueue after ReleasePredecessors in case of zero latency.
759  if (HazardRec->isEnabled() || AvgIPC > 1) {
760    if (SU->getNode() && SU->getNode()->isMachineOpcode())
761      ++IssueCount;
762    if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
763        || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
764      AdvanceToCycle(CurCycle + 1);
765  }
766}
767
768/// CapturePred - This does the opposite of ReleasePred. Since SU is being
769/// unscheduled, incrcease the succ left count of its predecessors. Remove
770/// them from AvailableQueue if necessary.
771void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
772  SUnit *PredSU = PredEdge->getSUnit();
773  if (PredSU->isAvailable) {
774    PredSU->isAvailable = false;
775    if (!PredSU->isPending)
776      AvailableQueue->remove(PredSU);
777  }
778
779  assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
780  ++PredSU->NumSuccsLeft;
781}
782
783/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
784/// its predecessor states to reflect the change.
785void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
786  DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
787  DEBUG(SU->dump(this));
788
789  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
790       I != E; ++I) {
791    CapturePred(&*I);
792    if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
793      assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
794      assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
795             "Physical register dependency violated?");
796      --NumLiveRegs;
797      LiveRegDefs[I->getReg()] = NULL;
798      LiveRegGens[I->getReg()] = NULL;
799    }
800  }
801
802  // Reclaim the special call resource dependence, if this is the beginning
803  // of a call.
804  unsigned CallResource = TRI->getNumRegs();
805  for (const SDNode *SUNode = SU->getNode(); SUNode;
806       SUNode = SUNode->getGluedNode()) {
807    if (SUNode->isMachineOpcode() &&
808        SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
809      ++NumLiveRegs;
810      LiveRegDefs[CallResource] = SU;
811      LiveRegGens[CallResource] = NULL;
812    }
813  }
814
815  // Release the special call resource dependence, if this is the end
816  // of a call.
817  if (LiveRegGens[CallResource] == SU)
818    for (const SDNode *SUNode = SU->getNode(); SUNode;
819         SUNode = SUNode->getGluedNode()) {
820      if (SUNode->isMachineOpcode() &&
821          SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
822        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
823        --NumLiveRegs;
824        LiveRegDefs[CallResource] = NULL;
825        LiveRegGens[CallResource] = NULL;
826      }
827    }
828
829  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
830       I != E; ++I) {
831    if (I->isAssignedRegDep()) {
832      // This becomes the nearest def. Note that an earlier def may still be
833      // pending if this is a two-address node.
834      LiveRegDefs[I->getReg()] = SU;
835      if (!LiveRegDefs[I->getReg()]) {
836        ++NumLiveRegs;
837      }
838      if (LiveRegGens[I->getReg()] == NULL ||
839          I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
840        LiveRegGens[I->getReg()] = I->getSUnit();
841    }
842  }
843  if (SU->getHeight() < MinAvailableCycle)
844    MinAvailableCycle = SU->getHeight();
845
846  SU->setHeightDirty();
847  SU->isScheduled = false;
848  SU->isAvailable = true;
849  if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
850    // Don't make available until backtracking is complete.
851    SU->isPending = true;
852    PendingQueue.push_back(SU);
853  }
854  else {
855    AvailableQueue->push(SU);
856  }
857  AvailableQueue->UnscheduledNode(SU);
858}
859
860/// After backtracking, the hazard checker needs to be restored to a state
861/// corresponding the the current cycle.
862void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
863  HazardRec->Reset();
864
865  unsigned LookAhead = std::min((unsigned)Sequence.size(),
866                                HazardRec->getMaxLookAhead());
867  if (LookAhead == 0)
868    return;
869
870  std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
871  unsigned HazardCycle = (*I)->getHeight();
872  for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
873    SUnit *SU = *I;
874    for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
875      HazardRec->RecedeCycle();
876    }
877    EmitNode(SU);
878  }
879}
880
881/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
882/// BTCycle in order to schedule a specific node.
883void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
884  SUnit *OldSU = Sequence.back();
885  while (true) {
886    Sequence.pop_back();
887    if (SU->isSucc(OldSU))
888      // Don't try to remove SU from AvailableQueue.
889      SU->isAvailable = false;
890    // FIXME: use ready cycle instead of height
891    CurCycle = OldSU->getHeight();
892    UnscheduleNodeBottomUp(OldSU);
893    AvailableQueue->setCurCycle(CurCycle);
894    if (OldSU == BtSU)
895      break;
896    OldSU = Sequence.back();
897  }
898
899  assert(!SU->isSucc(OldSU) && "Something is wrong!");
900
901  RestoreHazardCheckerBottomUp();
902
903  ReleasePending();
904
905  ++NumBacktracks;
906}
907
908static bool isOperandOf(const SUnit *SU, SDNode *N) {
909  for (const SDNode *SUNode = SU->getNode(); SUNode;
910       SUNode = SUNode->getGluedNode()) {
911    if (SUNode->isOperandOf(N))
912      return true;
913  }
914  return false;
915}
916
917/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
918/// successors to the newly created node.
919SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
920  SDNode *N = SU->getNode();
921  if (!N)
922    return NULL;
923
924  if (SU->getNode()->getGluedNode())
925    return NULL;
926
927  SUnit *NewSU;
928  bool TryUnfold = false;
929  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
930    EVT VT = N->getValueType(i);
931    if (VT == MVT::Glue)
932      return NULL;
933    else if (VT == MVT::Other)
934      TryUnfold = true;
935  }
936  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
937    const SDValue &Op = N->getOperand(i);
938    EVT VT = Op.getNode()->getValueType(Op.getResNo());
939    if (VT == MVT::Glue)
940      return NULL;
941  }
942
943  if (TryUnfold) {
944    SmallVector<SDNode*, 2> NewNodes;
945    if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
946      return NULL;
947
948    DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
949    assert(NewNodes.size() == 2 && "Expected a load folding node!");
950
951    N = NewNodes[1];
952    SDNode *LoadNode = NewNodes[0];
953    unsigned NumVals = N->getNumValues();
954    unsigned OldNumVals = SU->getNode()->getNumValues();
955    for (unsigned i = 0; i != NumVals; ++i)
956      DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
957    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
958                                   SDValue(LoadNode, 1));
959
960    // LoadNode may already exist. This can happen when there is another
961    // load from the same location and producing the same type of value
962    // but it has different alignment or volatileness.
963    bool isNewLoad = true;
964    SUnit *LoadSU;
965    if (LoadNode->getNodeId() != -1) {
966      LoadSU = &SUnits[LoadNode->getNodeId()];
967      isNewLoad = false;
968    } else {
969      LoadSU = CreateNewSUnit(LoadNode);
970      LoadNode->setNodeId(LoadSU->NodeNum);
971
972      InitNumRegDefsLeft(LoadSU);
973      ComputeLatency(LoadSU);
974    }
975
976    SUnit *NewSU = CreateNewSUnit(N);
977    assert(N->getNodeId() == -1 && "Node already inserted!");
978    N->setNodeId(NewSU->NodeNum);
979
980    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
981    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
982      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
983        NewSU->isTwoAddress = true;
984        break;
985      }
986    }
987    if (MCID.isCommutable())
988      NewSU->isCommutable = true;
989
990    InitNumRegDefsLeft(NewSU);
991    ComputeLatency(NewSU);
992
993    // Record all the edges to and from the old SU, by category.
994    SmallVector<SDep, 4> ChainPreds;
995    SmallVector<SDep, 4> ChainSuccs;
996    SmallVector<SDep, 4> LoadPreds;
997    SmallVector<SDep, 4> NodePreds;
998    SmallVector<SDep, 4> NodeSuccs;
999    for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1000         I != E; ++I) {
1001      if (I->isCtrl())
1002        ChainPreds.push_back(*I);
1003      else if (isOperandOf(I->getSUnit(), LoadNode))
1004        LoadPreds.push_back(*I);
1005      else
1006        NodePreds.push_back(*I);
1007    }
1008    for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1009         I != E; ++I) {
1010      if (I->isCtrl())
1011        ChainSuccs.push_back(*I);
1012      else
1013        NodeSuccs.push_back(*I);
1014    }
1015
1016    // Now assign edges to the newly-created nodes.
1017    for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1018      const SDep &Pred = ChainPreds[i];
1019      RemovePred(SU, Pred);
1020      if (isNewLoad)
1021        AddPred(LoadSU, Pred);
1022    }
1023    for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1024      const SDep &Pred = LoadPreds[i];
1025      RemovePred(SU, Pred);
1026      if (isNewLoad)
1027        AddPred(LoadSU, Pred);
1028    }
1029    for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1030      const SDep &Pred = NodePreds[i];
1031      RemovePred(SU, Pred);
1032      AddPred(NewSU, Pred);
1033    }
1034    for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1035      SDep D = NodeSuccs[i];
1036      SUnit *SuccDep = D.getSUnit();
1037      D.setSUnit(SU);
1038      RemovePred(SuccDep, D);
1039      D.setSUnit(NewSU);
1040      AddPred(SuccDep, D);
1041      // Balance register pressure.
1042      if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1043          && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1044        --NewSU->NumRegDefsLeft;
1045    }
1046    for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1047      SDep D = ChainSuccs[i];
1048      SUnit *SuccDep = D.getSUnit();
1049      D.setSUnit(SU);
1050      RemovePred(SuccDep, D);
1051      if (isNewLoad) {
1052        D.setSUnit(LoadSU);
1053        AddPred(SuccDep, D);
1054      }
1055    }
1056
1057    // Add a data dependency to reflect that NewSU reads the value defined
1058    // by LoadSU.
1059    AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
1060
1061    if (isNewLoad)
1062      AvailableQueue->addNode(LoadSU);
1063    AvailableQueue->addNode(NewSU);
1064
1065    ++NumUnfolds;
1066
1067    if (NewSU->NumSuccsLeft == 0) {
1068      NewSU->isAvailable = true;
1069      return NewSU;
1070    }
1071    SU = NewSU;
1072  }
1073
1074  DEBUG(dbgs() << "    Duplicating SU #" << SU->NodeNum << "\n");
1075  NewSU = CreateClone(SU);
1076
1077  // New SUnit has the exact same predecessors.
1078  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1079       I != E; ++I)
1080    if (!I->isArtificial())
1081      AddPred(NewSU, *I);
1082
1083  // Only copy scheduled successors. Cut them from old node's successor
1084  // list and move them over.
1085  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1086  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1087       I != E; ++I) {
1088    if (I->isArtificial())
1089      continue;
1090    SUnit *SuccSU = I->getSUnit();
1091    if (SuccSU->isScheduled) {
1092      SDep D = *I;
1093      D.setSUnit(NewSU);
1094      AddPred(SuccSU, D);
1095      D.setSUnit(SU);
1096      DelDeps.push_back(std::make_pair(SuccSU, D));
1097    }
1098  }
1099  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1100    RemovePred(DelDeps[i].first, DelDeps[i].second);
1101
1102  AvailableQueue->updateNode(SU);
1103  AvailableQueue->addNode(NewSU);
1104
1105  ++NumDups;
1106  return NewSU;
1107}
1108
1109/// InsertCopiesAndMoveSuccs - Insert register copies and move all
1110/// scheduled successors of the given SUnit to the last copy.
1111void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1112                                               const TargetRegisterClass *DestRC,
1113                                               const TargetRegisterClass *SrcRC,
1114                                               SmallVector<SUnit*, 2> &Copies) {
1115  SUnit *CopyFromSU = CreateNewSUnit(NULL);
1116  CopyFromSU->CopySrcRC = SrcRC;
1117  CopyFromSU->CopyDstRC = DestRC;
1118
1119  SUnit *CopyToSU = CreateNewSUnit(NULL);
1120  CopyToSU->CopySrcRC = DestRC;
1121  CopyToSU->CopyDstRC = SrcRC;
1122
1123  // Only copy scheduled successors. Cut them from old node's successor
1124  // list and move them over.
1125  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1126  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1127       I != E; ++I) {
1128    if (I->isArtificial())
1129      continue;
1130    SUnit *SuccSU = I->getSUnit();
1131    if (SuccSU->isScheduled) {
1132      SDep D = *I;
1133      D.setSUnit(CopyToSU);
1134      AddPred(SuccSU, D);
1135      DelDeps.push_back(std::make_pair(SuccSU, *I));
1136    }
1137    else {
1138      // Avoid scheduling the def-side copy before other successors. Otherwise
1139      // we could introduce another physreg interference on the copy and
1140      // continue inserting copies indefinitely.
1141      SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
1142             /*Reg=*/0, /*isNormalMemory=*/false,
1143             /*isMustAlias=*/false, /*isArtificial=*/true);
1144      AddPred(SuccSU, D);
1145    }
1146  }
1147  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1148    RemovePred(DelDeps[i].first, DelDeps[i].second);
1149
1150  AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1151  AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1152
1153  AvailableQueue->updateNode(SU);
1154  AvailableQueue->addNode(CopyFromSU);
1155  AvailableQueue->addNode(CopyToSU);
1156  Copies.push_back(CopyFromSU);
1157  Copies.push_back(CopyToSU);
1158
1159  ++NumPRCopies;
1160}
1161
1162/// getPhysicalRegisterVT - Returns the ValueType of the physical register
1163/// definition of the specified node.
1164/// FIXME: Move to SelectionDAG?
1165static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1166                                 const TargetInstrInfo *TII) {
1167  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1168  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1169  unsigned NumRes = MCID.getNumDefs();
1170  for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1171    if (Reg == *ImpDef)
1172      break;
1173    ++NumRes;
1174  }
1175  return N->getValueType(NumRes);
1176}
1177
1178/// CheckForLiveRegDef - Return true and update live register vector if the
1179/// specified register def of the specified SUnit clobbers any "live" registers.
1180static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1181                               std::vector<SUnit*> &LiveRegDefs,
1182                               SmallSet<unsigned, 4> &RegAdded,
1183                               SmallVector<unsigned, 4> &LRegs,
1184                               const TargetRegisterInfo *TRI) {
1185  for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1186
1187    // Check if Ref is live.
1188    if (!LiveRegDefs[*AliasI]) continue;
1189
1190    // Allow multiple uses of the same def.
1191    if (LiveRegDefs[*AliasI] == SU) continue;
1192
1193    // Add Reg to the set of interfering live regs.
1194    if (RegAdded.insert(*AliasI)) {
1195      LRegs.push_back(*AliasI);
1196    }
1197  }
1198}
1199
1200/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1201/// scheduling of the given node to satisfy live physical register dependencies.
1202/// If the specific node is the last one that's available to schedule, do
1203/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1204bool ScheduleDAGRRList::
1205DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1206  if (NumLiveRegs == 0)
1207    return false;
1208
1209  SmallSet<unsigned, 4> RegAdded;
1210  // If this node would clobber any "live" register, then it's not ready.
1211  //
1212  // If SU is the currently live definition of the same register that it uses,
1213  // then we are free to schedule it.
1214  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1215       I != E; ++I) {
1216    if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1217      CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1218                         RegAdded, LRegs, TRI);
1219  }
1220
1221  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1222    if (Node->getOpcode() == ISD::INLINEASM) {
1223      // Inline asm can clobber physical defs.
1224      unsigned NumOps = Node->getNumOperands();
1225      if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1226        --NumOps;  // Ignore the glue operand.
1227
1228      for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1229        unsigned Flags =
1230          cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1231        unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1232
1233        ++i; // Skip the ID value.
1234        if (InlineAsm::isRegDefKind(Flags) ||
1235            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1236            InlineAsm::isClobberKind(Flags)) {
1237          // Check for def of register or earlyclobber register.
1238          for (; NumVals; --NumVals, ++i) {
1239            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1240            if (TargetRegisterInfo::isPhysicalRegister(Reg))
1241              CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1242          }
1243        } else
1244          i += NumVals;
1245      }
1246      continue;
1247    }
1248
1249    if (!Node->isMachineOpcode())
1250      continue;
1251    // If we're in the middle of scheduling a call, don't begin scheduling
1252    // another call. Also, don't allow any physical registers to be live across
1253    // the call.
1254    if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1255      // Check the special calling-sequence resource.
1256      unsigned CallResource = TRI->getNumRegs();
1257      if (LiveRegDefs[CallResource]) {
1258        SDNode *Gen = LiveRegGens[CallResource]->getNode();
1259        while (SDNode *Glued = Gen->getGluedNode())
1260          Gen = Glued;
1261        if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1262          LRegs.push_back(CallResource);
1263      }
1264    }
1265    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1266    if (!MCID.ImplicitDefs)
1267      continue;
1268    for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
1269      CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1270  }
1271
1272  return !LRegs.empty();
1273}
1274
1275/// Return a node that can be scheduled in this cycle. Requirements:
1276/// (1) Ready: latency has been satisfied
1277/// (2) No Hazards: resources are available
1278/// (3) No Interferences: may unschedule to break register interferences.
1279SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1280  SmallVector<SUnit*, 4> Interferences;
1281  DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1282
1283  SUnit *CurSU = AvailableQueue->pop();
1284  while (CurSU) {
1285    SmallVector<unsigned, 4> LRegs;
1286    if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1287      break;
1288    LRegsMap.insert(std::make_pair(CurSU, LRegs));
1289
1290    CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
1291    Interferences.push_back(CurSU);
1292    CurSU = AvailableQueue->pop();
1293  }
1294  if (CurSU) {
1295    // Add the nodes that aren't ready back onto the available list.
1296    for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1297      Interferences[i]->isPending = false;
1298      assert(Interferences[i]->isAvailable && "must still be available");
1299      AvailableQueue->push(Interferences[i]);
1300    }
1301    return CurSU;
1302  }
1303
1304  // All candidates are delayed due to live physical reg dependencies.
1305  // Try backtracking, code duplication, or inserting cross class copies
1306  // to resolve it.
1307  for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1308    SUnit *TrySU = Interferences[i];
1309    SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1310
1311    // Try unscheduling up to the point where it's safe to schedule
1312    // this node.
1313    SUnit *BtSU = NULL;
1314    unsigned LiveCycle = UINT_MAX;
1315    for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1316      unsigned Reg = LRegs[j];
1317      if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1318        BtSU = LiveRegGens[Reg];
1319        LiveCycle = BtSU->getHeight();
1320      }
1321    }
1322    if (!WillCreateCycle(TrySU, BtSU))  {
1323      BacktrackBottomUp(TrySU, BtSU);
1324
1325      // Force the current node to be scheduled before the node that
1326      // requires the physical reg dep.
1327      if (BtSU->isAvailable) {
1328        BtSU->isAvailable = false;
1329        if (!BtSU->isPending)
1330          AvailableQueue->remove(BtSU);
1331      }
1332      AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1333                          /*Reg=*/0, /*isNormalMemory=*/false,
1334                          /*isMustAlias=*/false, /*isArtificial=*/true));
1335
1336      // If one or more successors has been unscheduled, then the current
1337      // node is no longer avaialable. Schedule a successor that's now
1338      // available instead.
1339      if (!TrySU->isAvailable) {
1340        CurSU = AvailableQueue->pop();
1341      }
1342      else {
1343        CurSU = TrySU;
1344        TrySU->isPending = false;
1345        Interferences.erase(Interferences.begin()+i);
1346      }
1347      break;
1348    }
1349  }
1350
1351  if (!CurSU) {
1352    // Can't backtrack. If it's too expensive to copy the value, then try
1353    // duplicate the nodes that produces these "too expensive to copy"
1354    // values to break the dependency. In case even that doesn't work,
1355    // insert cross class copies.
1356    // If it's not too expensive, i.e. cost != -1, issue copies.
1357    SUnit *TrySU = Interferences[0];
1358    SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1359    assert(LRegs.size() == 1 && "Can't handle this yet!");
1360    unsigned Reg = LRegs[0];
1361    SUnit *LRDef = LiveRegDefs[Reg];
1362    EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1363    const TargetRegisterClass *RC =
1364      TRI->getMinimalPhysRegClass(Reg, VT);
1365    const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1366
1367    // If cross copy register class is the same as RC, then it must be possible
1368    // copy the value directly. Do not try duplicate the def.
1369    // If cross copy register class is not the same as RC, then it's possible to
1370    // copy the value but it require cross register class copies and it is
1371    // expensive.
1372    // If cross copy register class is null, then it's not possible to copy
1373    // the value at all.
1374    SUnit *NewDef = 0;
1375    if (DestRC != RC) {
1376      NewDef = CopyAndMoveSuccessors(LRDef);
1377      if (!DestRC && !NewDef)
1378        report_fatal_error("Can't handle live physical register dependency!");
1379    }
1380    if (!NewDef) {
1381      // Issue copies, these can be expensive cross register class copies.
1382      SmallVector<SUnit*, 2> Copies;
1383      InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1384      DEBUG(dbgs() << "    Adding an edge from SU #" << TrySU->NodeNum
1385            << " to SU #" << Copies.front()->NodeNum << "\n");
1386      AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1387                          /*Reg=*/0, /*isNormalMemory=*/false,
1388                          /*isMustAlias=*/false,
1389                          /*isArtificial=*/true));
1390      NewDef = Copies.back();
1391    }
1392
1393    DEBUG(dbgs() << "    Adding an edge from SU #" << NewDef->NodeNum
1394          << " to SU #" << TrySU->NodeNum << "\n");
1395    LiveRegDefs[Reg] = NewDef;
1396    AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1397                         /*Reg=*/0, /*isNormalMemory=*/false,
1398                         /*isMustAlias=*/false,
1399                         /*isArtificial=*/true));
1400    TrySU->isAvailable = false;
1401    CurSU = NewDef;
1402  }
1403
1404  assert(CurSU && "Unable to resolve live physical register dependencies!");
1405
1406  // Add the nodes that aren't ready back onto the available list.
1407  for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1408    Interferences[i]->isPending = false;
1409    // May no longer be available due to backtracking.
1410    if (Interferences[i]->isAvailable) {
1411      AvailableQueue->push(Interferences[i]);
1412    }
1413  }
1414  return CurSU;
1415}
1416
1417/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1418/// schedulers.
1419void ScheduleDAGRRList::ListScheduleBottomUp() {
1420  // Release any predecessors of the special Exit node.
1421  ReleasePredecessors(&ExitSU);
1422
1423  // Add root to Available queue.
1424  if (!SUnits.empty()) {
1425    SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1426    assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1427    RootSU->isAvailable = true;
1428    AvailableQueue->push(RootSU);
1429  }
1430
1431  // While Available queue is not empty, grab the node with the highest
1432  // priority. If it is not ready put it back.  Schedule the node.
1433  Sequence.reserve(SUnits.size());
1434  while (!AvailableQueue->empty()) {
1435    DEBUG(dbgs() << "\nExamining Available:\n";
1436          AvailableQueue->dump(this));
1437
1438    // Pick the best node to schedule taking all constraints into
1439    // consideration.
1440    SUnit *SU = PickNodeToScheduleBottomUp();
1441
1442    AdvancePastStalls(SU);
1443
1444    ScheduleNodeBottomUp(SU);
1445
1446    while (AvailableQueue->empty() && !PendingQueue.empty()) {
1447      // Advance the cycle to free resources. Skip ahead to the next ready SU.
1448      assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1449      AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1450    }
1451  }
1452
1453  // Reverse the order if it is bottom up.
1454  std::reverse(Sequence.begin(), Sequence.end());
1455
1456#ifndef NDEBUG
1457  VerifySchedule(/*isBottomUp=*/true);
1458#endif
1459}
1460
1461//===----------------------------------------------------------------------===//
1462//                RegReductionPriorityQueue Definition
1463//===----------------------------------------------------------------------===//
1464//
1465// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1466// to reduce register pressure.
1467//
1468namespace {
1469class RegReductionPQBase;
1470
1471struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1472  bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1473};
1474
1475#ifndef NDEBUG
1476template<class SF>
1477struct reverse_sort : public queue_sort {
1478  SF &SortFunc;
1479  reverse_sort(SF &sf) : SortFunc(sf) {}
1480  reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1481
1482  bool operator()(SUnit* left, SUnit* right) const {
1483    // reverse left/right rather than simply !SortFunc(left, right)
1484    // to expose different paths in the comparison logic.
1485    return SortFunc(right, left);
1486  }
1487};
1488#endif // NDEBUG
1489
1490/// bu_ls_rr_sort - Priority function for bottom up register pressure
1491// reduction scheduler.
1492struct bu_ls_rr_sort : public queue_sort {
1493  enum {
1494    IsBottomUp = true,
1495    HasReadyFilter = false
1496  };
1497
1498  RegReductionPQBase *SPQ;
1499  bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1500  bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1501
1502  bool operator()(SUnit* left, SUnit* right) const;
1503};
1504
1505// src_ls_rr_sort - Priority function for source order scheduler.
1506struct src_ls_rr_sort : public queue_sort {
1507  enum {
1508    IsBottomUp = true,
1509    HasReadyFilter = false
1510  };
1511
1512  RegReductionPQBase *SPQ;
1513  src_ls_rr_sort(RegReductionPQBase *spq)
1514    : SPQ(spq) {}
1515  src_ls_rr_sort(const src_ls_rr_sort &RHS)
1516    : SPQ(RHS.SPQ) {}
1517
1518  bool operator()(SUnit* left, SUnit* right) const;
1519};
1520
1521// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1522struct hybrid_ls_rr_sort : public queue_sort {
1523  enum {
1524    IsBottomUp = true,
1525    HasReadyFilter = false
1526  };
1527
1528  RegReductionPQBase *SPQ;
1529  hybrid_ls_rr_sort(RegReductionPQBase *spq)
1530    : SPQ(spq) {}
1531  hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1532    : SPQ(RHS.SPQ) {}
1533
1534  bool isReady(SUnit *SU, unsigned CurCycle) const;
1535
1536  bool operator()(SUnit* left, SUnit* right) const;
1537};
1538
1539// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1540// scheduler.
1541struct ilp_ls_rr_sort : public queue_sort {
1542  enum {
1543    IsBottomUp = true,
1544    HasReadyFilter = false
1545  };
1546
1547  RegReductionPQBase *SPQ;
1548  ilp_ls_rr_sort(RegReductionPQBase *spq)
1549    : SPQ(spq) {}
1550  ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1551    : SPQ(RHS.SPQ) {}
1552
1553  bool isReady(SUnit *SU, unsigned CurCycle) const;
1554
1555  bool operator()(SUnit* left, SUnit* right) const;
1556};
1557
1558class RegReductionPQBase : public SchedulingPriorityQueue {
1559protected:
1560  std::vector<SUnit*> Queue;
1561  unsigned CurQueueId;
1562  bool TracksRegPressure;
1563
1564  // SUnits - The SUnits for the current graph.
1565  std::vector<SUnit> *SUnits;
1566
1567  MachineFunction &MF;
1568  const TargetInstrInfo *TII;
1569  const TargetRegisterInfo *TRI;
1570  const TargetLowering *TLI;
1571  ScheduleDAGRRList *scheduleDAG;
1572
1573  // SethiUllmanNumbers - The SethiUllman number for each node.
1574  std::vector<unsigned> SethiUllmanNumbers;
1575
1576  /// RegPressure - Tracking current reg pressure per register class.
1577  ///
1578  std::vector<unsigned> RegPressure;
1579
1580  /// RegLimit - Tracking the number of allocatable registers per register
1581  /// class.
1582  std::vector<unsigned> RegLimit;
1583
1584public:
1585  RegReductionPQBase(MachineFunction &mf,
1586                     bool hasReadyFilter,
1587                     bool tracksrp,
1588                     const TargetInstrInfo *tii,
1589                     const TargetRegisterInfo *tri,
1590                     const TargetLowering *tli)
1591    : SchedulingPriorityQueue(hasReadyFilter),
1592      CurQueueId(0), TracksRegPressure(tracksrp),
1593      MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1594    if (TracksRegPressure) {
1595      unsigned NumRC = TRI->getNumRegClasses();
1596      RegLimit.resize(NumRC);
1597      RegPressure.resize(NumRC);
1598      std::fill(RegLimit.begin(), RegLimit.end(), 0);
1599      std::fill(RegPressure.begin(), RegPressure.end(), 0);
1600      for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1601             E = TRI->regclass_end(); I != E; ++I)
1602        RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1603    }
1604  }
1605
1606  void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1607    scheduleDAG = scheduleDag;
1608  }
1609
1610  ScheduleHazardRecognizer* getHazardRec() {
1611    return scheduleDAG->getHazardRec();
1612  }
1613
1614  void initNodes(std::vector<SUnit> &sunits);
1615
1616  void addNode(const SUnit *SU);
1617
1618  void updateNode(const SUnit *SU);
1619
1620  void releaseState() {
1621    SUnits = 0;
1622    SethiUllmanNumbers.clear();
1623    std::fill(RegPressure.begin(), RegPressure.end(), 0);
1624  }
1625
1626  unsigned getNodePriority(const SUnit *SU) const;
1627
1628  unsigned getNodeOrdering(const SUnit *SU) const {
1629    if (!SU->getNode()) return 0;
1630
1631    return scheduleDAG->DAG->GetOrdering(SU->getNode());
1632  }
1633
1634  bool empty() const { return Queue.empty(); }
1635
1636  void push(SUnit *U) {
1637    assert(!U->NodeQueueId && "Node in the queue already");
1638    U->NodeQueueId = ++CurQueueId;
1639    Queue.push_back(U);
1640  }
1641
1642  void remove(SUnit *SU) {
1643    assert(!Queue.empty() && "Queue is empty!");
1644    assert(SU->NodeQueueId != 0 && "Not in queue!");
1645    std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1646                                                 SU);
1647    if (I != prior(Queue.end()))
1648      std::swap(*I, Queue.back());
1649    Queue.pop_back();
1650    SU->NodeQueueId = 0;
1651  }
1652
1653  bool tracksRegPressure() const { return TracksRegPressure; }
1654
1655  void dumpRegPressure() const;
1656
1657  bool HighRegPressure(const SUnit *SU) const;
1658
1659  bool MayReduceRegPressure(SUnit *SU) const;
1660
1661  int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1662
1663  void ScheduledNode(SUnit *SU);
1664
1665  void UnscheduledNode(SUnit *SU);
1666
1667protected:
1668  bool canClobber(const SUnit *SU, const SUnit *Op);
1669  void AddPseudoTwoAddrDeps();
1670  void PrescheduleNodesWithMultipleUses();
1671  void CalculateSethiUllmanNumbers();
1672};
1673
1674template<class SF>
1675static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1676  std::vector<SUnit *>::iterator Best = Q.begin();
1677  for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1678         E = Q.end(); I != E; ++I)
1679    if (Picker(*Best, *I))
1680      Best = I;
1681  SUnit *V = *Best;
1682  if (Best != prior(Q.end()))
1683    std::swap(*Best, Q.back());
1684  Q.pop_back();
1685  return V;
1686}
1687
1688template<class SF>
1689SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1690#ifndef NDEBUG
1691  if (DAG->StressSched) {
1692    reverse_sort<SF> RPicker(Picker);
1693    return popFromQueueImpl(Q, RPicker);
1694  }
1695#endif
1696  (void)DAG;
1697  return popFromQueueImpl(Q, Picker);
1698}
1699
1700template<class SF>
1701class RegReductionPriorityQueue : public RegReductionPQBase {
1702  SF Picker;
1703
1704public:
1705  RegReductionPriorityQueue(MachineFunction &mf,
1706                            bool tracksrp,
1707                            const TargetInstrInfo *tii,
1708                            const TargetRegisterInfo *tri,
1709                            const TargetLowering *tli)
1710    : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1711      Picker(this) {}
1712
1713  bool isBottomUp() const { return SF::IsBottomUp; }
1714
1715  bool isReady(SUnit *U) const {
1716    return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1717  }
1718
1719  SUnit *pop() {
1720    if (Queue.empty()) return NULL;
1721
1722    SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1723    V->NodeQueueId = 0;
1724    return V;
1725  }
1726
1727  void dump(ScheduleDAG *DAG) const {
1728    // Emulate pop() without clobbering NodeQueueIds.
1729    std::vector<SUnit*> DumpQueue = Queue;
1730    SF DumpPicker = Picker;
1731    while (!DumpQueue.empty()) {
1732      SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1733      dbgs() << "Height " << SU->getHeight() << ": ";
1734      SU->dump(DAG);
1735    }
1736  }
1737};
1738
1739typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1740BURegReductionPriorityQueue;
1741
1742typedef RegReductionPriorityQueue<src_ls_rr_sort>
1743SrcRegReductionPriorityQueue;
1744
1745typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1746HybridBURRPriorityQueue;
1747
1748typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1749ILPBURRPriorityQueue;
1750} // end anonymous namespace
1751
1752//===----------------------------------------------------------------------===//
1753//           Static Node Priority for Register Pressure Reduction
1754//===----------------------------------------------------------------------===//
1755
1756// Check for special nodes that bypass scheduling heuristics.
1757// Currently this pushes TokenFactor nodes down, but may be used for other
1758// pseudo-ops as well.
1759//
1760// Return -1 to schedule right above left, 1 for left above right.
1761// Return 0 if no bias exists.
1762static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1763  bool LSchedLow = left->isScheduleLow;
1764  bool RSchedLow = right->isScheduleLow;
1765  if (LSchedLow != RSchedLow)
1766    return LSchedLow < RSchedLow ? 1 : -1;
1767  return 0;
1768}
1769
1770/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1771/// Smaller number is the higher priority.
1772static unsigned
1773CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1774  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1775  if (SethiUllmanNumber != 0)
1776    return SethiUllmanNumber;
1777
1778  unsigned Extra = 0;
1779  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1780       I != E; ++I) {
1781    if (I->isCtrl()) continue;  // ignore chain preds
1782    SUnit *PredSU = I->getSUnit();
1783    unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1784    if (PredSethiUllman > SethiUllmanNumber) {
1785      SethiUllmanNumber = PredSethiUllman;
1786      Extra = 0;
1787    } else if (PredSethiUllman == SethiUllmanNumber)
1788      ++Extra;
1789  }
1790
1791  SethiUllmanNumber += Extra;
1792
1793  if (SethiUllmanNumber == 0)
1794    SethiUllmanNumber = 1;
1795
1796  return SethiUllmanNumber;
1797}
1798
1799/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1800/// scheduling units.
1801void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1802  SethiUllmanNumbers.assign(SUnits->size(), 0);
1803
1804  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1805    CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1806}
1807
1808void RegReductionPQBase::addNode(const SUnit *SU) {
1809  unsigned SUSize = SethiUllmanNumbers.size();
1810  if (SUnits->size() > SUSize)
1811    SethiUllmanNumbers.resize(SUSize*2, 0);
1812  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1813}
1814
1815void RegReductionPQBase::updateNode(const SUnit *SU) {
1816  SethiUllmanNumbers[SU->NodeNum] = 0;
1817  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1818}
1819
1820// Lower priority means schedule further down. For bottom-up scheduling, lower
1821// priority SUs are scheduled before higher priority SUs.
1822unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1823  assert(SU->NodeNum < SethiUllmanNumbers.size());
1824  unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1825  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1826    // CopyToReg should be close to its uses to facilitate coalescing and
1827    // avoid spilling.
1828    return 0;
1829  if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1830      Opc == TargetOpcode::SUBREG_TO_REG ||
1831      Opc == TargetOpcode::INSERT_SUBREG)
1832    // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1833    // close to their uses to facilitate coalescing.
1834    return 0;
1835  if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1836    // If SU does not have a register use, i.e. it doesn't produce a value
1837    // that would be consumed (e.g. store), then it terminates a chain of
1838    // computation.  Give it a large SethiUllman number so it will be
1839    // scheduled right before its predecessors that it doesn't lengthen
1840    // their live ranges.
1841    return 0xffff;
1842  if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1843    // If SU does not have a register def, schedule it close to its uses
1844    // because it does not lengthen any live ranges.
1845    return 0;
1846#if 1
1847  return SethiUllmanNumbers[SU->NodeNum];
1848#else
1849  unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1850  if (SU->isCallOp) {
1851    // FIXME: This assumes all of the defs are used as call operands.
1852    int NP = (int)Priority - SU->getNode()->getNumValues();
1853    return (NP > 0) ? NP : 0;
1854  }
1855  return Priority;
1856#endif
1857}
1858
1859//===----------------------------------------------------------------------===//
1860//                     Register Pressure Tracking
1861//===----------------------------------------------------------------------===//
1862
1863void RegReductionPQBase::dumpRegPressure() const {
1864  for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1865         E = TRI->regclass_end(); I != E; ++I) {
1866    const TargetRegisterClass *RC = *I;
1867    unsigned Id = RC->getID();
1868    unsigned RP = RegPressure[Id];
1869    if (!RP) continue;
1870    DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1871          << '\n');
1872  }
1873}
1874
1875bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1876  if (!TLI)
1877    return false;
1878
1879  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1880       I != E; ++I) {
1881    if (I->isCtrl())
1882      continue;
1883    SUnit *PredSU = I->getSUnit();
1884    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1885    // to cover the number of registers defined (they are all live).
1886    if (PredSU->NumRegDefsLeft == 0) {
1887      continue;
1888    }
1889    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1890         RegDefPos.IsValid(); RegDefPos.Advance()) {
1891      unsigned RCId, Cost;
1892      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1893
1894      if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1895        return true;
1896    }
1897  }
1898  return false;
1899}
1900
1901bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1902  const SDNode *N = SU->getNode();
1903
1904  if (!N->isMachineOpcode() || !SU->NumSuccs)
1905    return false;
1906
1907  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1908  for (unsigned i = 0; i != NumDefs; ++i) {
1909    EVT VT = N->getValueType(i);
1910    if (!N->hasAnyUseOfValue(i))
1911      continue;
1912    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1913    if (RegPressure[RCId] >= RegLimit[RCId])
1914      return true;
1915  }
1916  return false;
1917}
1918
1919// Compute the register pressure contribution by this instruction by count up
1920// for uses that are not live and down for defs. Only count register classes
1921// that are already under high pressure. As a side effect, compute the number of
1922// uses of registers that are already live.
1923//
1924// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1925// so could probably be factored.
1926int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1927  LiveUses = 0;
1928  int PDiff = 0;
1929  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1930       I != E; ++I) {
1931    if (I->isCtrl())
1932      continue;
1933    SUnit *PredSU = I->getSUnit();
1934    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1935    // to cover the number of registers defined (they are all live).
1936    if (PredSU->NumRegDefsLeft == 0) {
1937      if (PredSU->getNode()->isMachineOpcode())
1938        ++LiveUses;
1939      continue;
1940    }
1941    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1942         RegDefPos.IsValid(); RegDefPos.Advance()) {
1943      EVT VT = RegDefPos.GetValue();
1944      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1945      if (RegPressure[RCId] >= RegLimit[RCId])
1946        ++PDiff;
1947    }
1948  }
1949  const SDNode *N = SU->getNode();
1950
1951  if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1952    return PDiff;
1953
1954  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1955  for (unsigned i = 0; i != NumDefs; ++i) {
1956    EVT VT = N->getValueType(i);
1957    if (!N->hasAnyUseOfValue(i))
1958      continue;
1959    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1960    if (RegPressure[RCId] >= RegLimit[RCId])
1961      --PDiff;
1962  }
1963  return PDiff;
1964}
1965
1966void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1967  if (!TracksRegPressure)
1968    return;
1969
1970  if (!SU->getNode())
1971    return;
1972
1973  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1974       I != E; ++I) {
1975    if (I->isCtrl())
1976      continue;
1977    SUnit *PredSU = I->getSUnit();
1978    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1979    // to cover the number of registers defined (they are all live).
1980    if (PredSU->NumRegDefsLeft == 0) {
1981      continue;
1982    }
1983    // FIXME: The ScheduleDAG currently loses information about which of a
1984    // node's values is consumed by each dependence. Consequently, if the node
1985    // defines multiple register classes, we don't know which to pressurize
1986    // here. Instead the following loop consumes the register defs in an
1987    // arbitrary order. At least it handles the common case of clustered loads
1988    // to the same class. For precise liveness, each SDep needs to indicate the
1989    // result number. But that tightly couples the ScheduleDAG with the
1990    // SelectionDAG making updates tricky. A simpler hack would be to attach a
1991    // value type or register class to SDep.
1992    //
1993    // The most important aspect of register tracking is balancing the increase
1994    // here with the reduction further below. Note that this SU may use multiple
1995    // defs in PredSU. The can't be determined here, but we've already
1996    // compensated by reducing NumRegDefsLeft in PredSU during
1997    // ScheduleDAGSDNodes::AddSchedEdges.
1998    --PredSU->NumRegDefsLeft;
1999    unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2000    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2001         RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2002      if (SkipRegDefs)
2003        continue;
2004
2005      unsigned RCId, Cost;
2006      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2007      RegPressure[RCId] += Cost;
2008      break;
2009    }
2010  }
2011
2012  // We should have this assert, but there may be dead SDNodes that never
2013  // materialize as SUnits, so they don't appear to generate liveness.
2014  //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2015  int SkipRegDefs = (int)SU->NumRegDefsLeft;
2016  for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2017       RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2018    if (SkipRegDefs > 0)
2019      continue;
2020    unsigned RCId, Cost;
2021    GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2022    if (RegPressure[RCId] < Cost) {
2023      // Register pressure tracking is imprecise. This can happen. But we try
2024      // hard not to let it happen because it likely results in poor scheduling.
2025      DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") has too many regdefs\n");
2026      RegPressure[RCId] = 0;
2027    }
2028    else {
2029      RegPressure[RCId] -= Cost;
2030    }
2031  }
2032  dumpRegPressure();
2033}
2034
2035void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
2036  if (!TracksRegPressure)
2037    return;
2038
2039  const SDNode *N = SU->getNode();
2040  if (!N) return;
2041
2042  if (!N->isMachineOpcode()) {
2043    if (N->getOpcode() != ISD::CopyToReg)
2044      return;
2045  } else {
2046    unsigned Opc = N->getMachineOpcode();
2047    if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2048        Opc == TargetOpcode::INSERT_SUBREG ||
2049        Opc == TargetOpcode::SUBREG_TO_REG ||
2050        Opc == TargetOpcode::REG_SEQUENCE ||
2051        Opc == TargetOpcode::IMPLICIT_DEF)
2052      return;
2053  }
2054
2055  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2056       I != E; ++I) {
2057    if (I->isCtrl())
2058      continue;
2059    SUnit *PredSU = I->getSUnit();
2060    // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2061    // counts data deps.
2062    if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2063      continue;
2064    const SDNode *PN = PredSU->getNode();
2065    if (!PN->isMachineOpcode()) {
2066      if (PN->getOpcode() == ISD::CopyFromReg) {
2067        EVT VT = PN->getValueType(0);
2068        unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2069        RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2070      }
2071      continue;
2072    }
2073    unsigned POpc = PN->getMachineOpcode();
2074    if (POpc == TargetOpcode::IMPLICIT_DEF)
2075      continue;
2076    if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2077        POpc == TargetOpcode::INSERT_SUBREG ||
2078        POpc == TargetOpcode::SUBREG_TO_REG) {
2079      EVT VT = PN->getValueType(0);
2080      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2081      RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2082      continue;
2083    }
2084    unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2085    for (unsigned i = 0; i != NumDefs; ++i) {
2086      EVT VT = PN->getValueType(i);
2087      if (!PN->hasAnyUseOfValue(i))
2088        continue;
2089      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2090      if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2091        // Register pressure tracking is imprecise. This can happen.
2092        RegPressure[RCId] = 0;
2093      else
2094        RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2095    }
2096  }
2097
2098  // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2099  // may transfer data dependencies to CopyToReg.
2100  if (SU->NumSuccs && N->isMachineOpcode()) {
2101    unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2102    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2103      EVT VT = N->getValueType(i);
2104      if (VT == MVT::Glue || VT == MVT::Other)
2105        continue;
2106      if (!N->hasAnyUseOfValue(i))
2107        continue;
2108      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2109      RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2110    }
2111  }
2112
2113  dumpRegPressure();
2114}
2115
2116//===----------------------------------------------------------------------===//
2117//           Dynamic Node Priority for Register Pressure Reduction
2118//===----------------------------------------------------------------------===//
2119
2120/// closestSucc - Returns the scheduled cycle of the successor which is
2121/// closest to the current cycle.
2122static unsigned closestSucc(const SUnit *SU) {
2123  unsigned MaxHeight = 0;
2124  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2125       I != E; ++I) {
2126    if (I->isCtrl()) continue;  // ignore chain succs
2127    unsigned Height = I->getSUnit()->getHeight();
2128    // If there are bunch of CopyToRegs stacked up, they should be considered
2129    // to be at the same position.
2130    if (I->getSUnit()->getNode() &&
2131        I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2132      Height = closestSucc(I->getSUnit())+1;
2133    if (Height > MaxHeight)
2134      MaxHeight = Height;
2135  }
2136  return MaxHeight;
2137}
2138
2139/// calcMaxScratches - Returns an cost estimate of the worse case requirement
2140/// for scratch registers, i.e. number of data dependencies.
2141static unsigned calcMaxScratches(const SUnit *SU) {
2142  unsigned Scratches = 0;
2143  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2144       I != E; ++I) {
2145    if (I->isCtrl()) continue;  // ignore chain preds
2146    Scratches++;
2147  }
2148  return Scratches;
2149}
2150
2151/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2152/// CopyFromReg from a virtual register.
2153static bool hasOnlyLiveInOpers(const SUnit *SU) {
2154  bool RetVal = false;
2155  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2156       I != E; ++I) {
2157    if (I->isCtrl()) continue;
2158    const SUnit *PredSU = I->getSUnit();
2159    if (PredSU->getNode() &&
2160        PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2161      unsigned Reg =
2162        cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2163      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2164        RetVal = true;
2165        continue;
2166      }
2167    }
2168    return false;
2169  }
2170  return RetVal;
2171}
2172
2173/// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2174/// CopyToReg to a virtual register. This SU def is probably a liveout and
2175/// it has no other use. It should be scheduled closer to the terminator.
2176static bool hasOnlyLiveOutUses(const SUnit *SU) {
2177  bool RetVal = false;
2178  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2179       I != E; ++I) {
2180    if (I->isCtrl()) continue;
2181    const SUnit *SuccSU = I->getSUnit();
2182    if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2183      unsigned Reg =
2184        cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2185      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2186        RetVal = true;
2187        continue;
2188      }
2189    }
2190    return false;
2191  }
2192  return RetVal;
2193}
2194
2195// Set isVRegCycle for a node with only live in opers and live out uses. Also
2196// set isVRegCycle for its CopyFromReg operands.
2197//
2198// This is only relevant for single-block loops, in which case the VRegCycle
2199// node is likely an induction variable in which the operand and target virtual
2200// registers should be coalesced (e.g. pre/post increment values). Setting the
2201// isVRegCycle flag helps the scheduler prioritize other uses of the same
2202// CopyFromReg so that this node becomes the virtual register "kill". This
2203// avoids interference between the values live in and out of the block and
2204// eliminates a copy inside the loop.
2205static void initVRegCycle(SUnit *SU) {
2206  if (DisableSchedVRegCycle)
2207    return;
2208
2209  if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2210    return;
2211
2212  DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2213
2214  SU->isVRegCycle = true;
2215
2216  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2217       I != E; ++I) {
2218    if (I->isCtrl()) continue;
2219    I->getSUnit()->isVRegCycle = true;
2220  }
2221}
2222
2223// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2224// CopyFromReg operands. We should no longer penalize other uses of this VReg.
2225static void resetVRegCycle(SUnit *SU) {
2226  if (!SU->isVRegCycle)
2227    return;
2228
2229  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2230       I != E; ++I) {
2231    if (I->isCtrl()) continue;  // ignore chain preds
2232    SUnit *PredSU = I->getSUnit();
2233    if (PredSU->isVRegCycle) {
2234      assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2235             "VRegCycle def must be CopyFromReg");
2236      I->getSUnit()->isVRegCycle = 0;
2237    }
2238  }
2239}
2240
2241// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2242// means a node that defines the VRegCycle has not been scheduled yet.
2243static bool hasVRegCycleUse(const SUnit *SU) {
2244  // If this SU also defines the VReg, don't hoist it as a "use".
2245  if (SU->isVRegCycle)
2246    return false;
2247
2248  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2249       I != E; ++I) {
2250    if (I->isCtrl()) continue;  // ignore chain preds
2251    if (I->getSUnit()->isVRegCycle &&
2252        I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2253      DEBUG(dbgs() << "  VReg cycle use: SU (" << SU->NodeNum << ")\n");
2254      return true;
2255    }
2256  }
2257  return false;
2258}
2259
2260// Check for either a dependence (latency) or resource (hazard) stall.
2261//
2262// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2263static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2264  if ((int)SPQ->getCurCycle() < Height) return true;
2265  if (SPQ->getHazardRec()->getHazardType(SU, 0)
2266      != ScheduleHazardRecognizer::NoHazard)
2267    return true;
2268  return false;
2269}
2270
2271// Return -1 if left has higher priority, 1 if right has higher priority.
2272// Return 0 if latency-based priority is equivalent.
2273static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2274                            RegReductionPQBase *SPQ) {
2275  // Scheduling an instruction that uses a VReg whose postincrement has not yet
2276  // been scheduled will induce a copy. Model this as an extra cycle of latency.
2277  int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2278  int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2279  int LHeight = (int)left->getHeight() + LPenalty;
2280  int RHeight = (int)right->getHeight() + RPenalty;
2281
2282  bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2283    BUHasStall(left, LHeight, SPQ);
2284  bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2285    BUHasStall(right, RHeight, SPQ);
2286
2287  // If scheduling one of the node will cause a pipeline stall, delay it.
2288  // If scheduling either one of the node will cause a pipeline stall, sort
2289  // them according to their height.
2290  if (LStall) {
2291    if (!RStall) {
2292      DEBUG(++FactorCount[FactStall]);
2293      return 1;
2294    }
2295    if (LHeight != RHeight) {
2296      DEBUG(++FactorCount[FactStall]);
2297      return LHeight > RHeight ? 1 : -1;
2298    }
2299  } else if (RStall) {
2300    DEBUG(++FactorCount[FactStall]);
2301    return -1;
2302  }
2303
2304  // If either node is scheduling for latency, sort them by height/depth
2305  // and latency.
2306  if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2307                     right->SchedulingPref == Sched::ILP)) {
2308    if (DisableSchedCycles) {
2309      if (LHeight != RHeight) {
2310        DEBUG(++FactorCount[FactHeight]);
2311        return LHeight > RHeight ? 1 : -1;
2312      }
2313    }
2314    else {
2315      // If neither instruction stalls (!LStall && !RStall) then
2316      // its height is already covered so only its depth matters. We also reach
2317      // this if both stall but have the same height.
2318      int LDepth = left->getDepth() - LPenalty;
2319      int RDepth = right->getDepth() - RPenalty;
2320      if (LDepth != RDepth) {
2321        DEBUG(++FactorCount[FactDepth]);
2322        DEBUG(dbgs() << "  Comparing latency of SU (" << left->NodeNum
2323              << ") depth " << LDepth << " vs SU (" << right->NodeNum
2324              << ") depth " << RDepth << "\n");
2325        return LDepth < RDepth ? 1 : -1;
2326      }
2327    }
2328    if (left->Latency != right->Latency) {
2329      DEBUG(++FactorCount[FactOther]);
2330      return left->Latency > right->Latency ? 1 : -1;
2331    }
2332  }
2333  return 0;
2334}
2335
2336static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2337  // Schedule physical register definitions close to their use. This is
2338  // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2339  // long as shortening physreg live ranges is generally good, we can defer
2340  // creating a subtarget hook.
2341  if (!DisableSchedPhysRegJoin) {
2342    bool LHasPhysReg = left->hasPhysRegDefs;
2343    bool RHasPhysReg = right->hasPhysRegDefs;
2344    if (LHasPhysReg != RHasPhysReg) {
2345      DEBUG(++FactorCount[FactRegUses]);
2346      #ifndef NDEBUG
2347      const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2348      #endif
2349      DEBUG(dbgs() << "  SU (" << left->NodeNum << ") "
2350            << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2351            << PhysRegMsg[RHasPhysReg] << "\n");
2352      return LHasPhysReg < RHasPhysReg;
2353    }
2354  }
2355
2356  // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2357  unsigned LPriority = SPQ->getNodePriority(left);
2358  unsigned RPriority = SPQ->getNodePriority(right);
2359
2360  // Be really careful about hoisting call operands above previous calls.
2361  // Only allows it if it would reduce register pressure.
2362  if (left->isCall && right->isCallOp) {
2363    unsigned RNumVals = right->getNode()->getNumValues();
2364    RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2365  }
2366  if (right->isCall && left->isCallOp) {
2367    unsigned LNumVals = left->getNode()->getNumValues();
2368    LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2369  }
2370
2371  if (LPriority != RPriority) {
2372    DEBUG(++FactorCount[FactStatic]);
2373    return LPriority > RPriority;
2374  }
2375
2376  // One or both of the nodes are calls and their sethi-ullman numbers are the
2377  // same, then keep source order.
2378  if (left->isCall || right->isCall) {
2379    unsigned LOrder = SPQ->getNodeOrdering(left);
2380    unsigned ROrder = SPQ->getNodeOrdering(right);
2381
2382    // Prefer an ordering where the lower the non-zero order number, the higher
2383    // the preference.
2384    if ((LOrder || ROrder) && LOrder != ROrder)
2385      return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2386  }
2387
2388  // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2389  // e.g.
2390  // t1 = op t2, c1
2391  // t3 = op t4, c2
2392  //
2393  // and the following instructions are both ready.
2394  // t2 = op c3
2395  // t4 = op c4
2396  //
2397  // Then schedule t2 = op first.
2398  // i.e.
2399  // t4 = op c4
2400  // t2 = op c3
2401  // t1 = op t2, c1
2402  // t3 = op t4, c2
2403  //
2404  // This creates more short live intervals.
2405  unsigned LDist = closestSucc(left);
2406  unsigned RDist = closestSucc(right);
2407  if (LDist != RDist) {
2408    DEBUG(++FactorCount[FactOther]);
2409    return LDist < RDist;
2410  }
2411
2412  // How many registers becomes live when the node is scheduled.
2413  unsigned LScratch = calcMaxScratches(left);
2414  unsigned RScratch = calcMaxScratches(right);
2415  if (LScratch != RScratch) {
2416    DEBUG(++FactorCount[FactOther]);
2417    return LScratch > RScratch;
2418  }
2419
2420  // Comparing latency against a call makes little sense unless the node
2421  // is register pressure-neutral.
2422  if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2423    return (left->NodeQueueId > right->NodeQueueId);
2424
2425  // Do not compare latencies when one or both of the nodes are calls.
2426  if (!DisableSchedCycles &&
2427      !(left->isCall || right->isCall)) {
2428    int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2429    if (result != 0)
2430      return result > 0;
2431  }
2432  else {
2433    if (left->getHeight() != right->getHeight()) {
2434      DEBUG(++FactorCount[FactHeight]);
2435      return left->getHeight() > right->getHeight();
2436    }
2437
2438    if (left->getDepth() != right->getDepth()) {
2439      DEBUG(++FactorCount[FactDepth]);
2440      return left->getDepth() < right->getDepth();
2441    }
2442  }
2443
2444  assert(left->NodeQueueId && right->NodeQueueId &&
2445         "NodeQueueId cannot be zero");
2446  DEBUG(++FactorCount[FactOther]);
2447  return (left->NodeQueueId > right->NodeQueueId);
2448}
2449
2450// Bottom up
2451bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2452  if (int res = checkSpecialNodes(left, right))
2453    return res > 0;
2454
2455  return BURRSort(left, right, SPQ);
2456}
2457
2458// Source order, otherwise bottom up.
2459bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2460  if (int res = checkSpecialNodes(left, right))
2461    return res > 0;
2462
2463  unsigned LOrder = SPQ->getNodeOrdering(left);
2464  unsigned ROrder = SPQ->getNodeOrdering(right);
2465
2466  // Prefer an ordering where the lower the non-zero order number, the higher
2467  // the preference.
2468  if ((LOrder || ROrder) && LOrder != ROrder)
2469    return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2470
2471  return BURRSort(left, right, SPQ);
2472}
2473
2474// If the time between now and when the instruction will be ready can cover
2475// the spill code, then avoid adding it to the ready queue. This gives long
2476// stalls highest priority and allows hoisting across calls. It should also
2477// speed up processing the available queue.
2478bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2479  static const unsigned ReadyDelay = 3;
2480
2481  if (SPQ->MayReduceRegPressure(SU)) return true;
2482
2483  if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2484
2485  if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2486      != ScheduleHazardRecognizer::NoHazard)
2487    return false;
2488
2489  return true;
2490}
2491
2492// Return true if right should be scheduled with higher priority than left.
2493bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2494  if (int res = checkSpecialNodes(left, right))
2495    return res > 0;
2496
2497  if (left->isCall || right->isCall)
2498    // No way to compute latency of calls.
2499    return BURRSort(left, right, SPQ);
2500
2501  bool LHigh = SPQ->HighRegPressure(left);
2502  bool RHigh = SPQ->HighRegPressure(right);
2503  // Avoid causing spills. If register pressure is high, schedule for
2504  // register pressure reduction.
2505  if (LHigh && !RHigh) {
2506    DEBUG(++FactorCount[FactPressureDiff]);
2507    DEBUG(dbgs() << "  pressure SU(" << left->NodeNum << ") > SU("
2508          << right->NodeNum << ")\n");
2509    return true;
2510  }
2511  else if (!LHigh && RHigh) {
2512    DEBUG(++FactorCount[FactPressureDiff]);
2513    DEBUG(dbgs() << "  pressure SU(" << right->NodeNum << ") > SU("
2514          << left->NodeNum << ")\n");
2515    return false;
2516  }
2517  if (!LHigh && !RHigh) {
2518    int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2519    if (result != 0)
2520      return result > 0;
2521  }
2522  return BURRSort(left, right, SPQ);
2523}
2524
2525// Schedule as many instructions in each cycle as possible. So don't make an
2526// instruction available unless it is ready in the current cycle.
2527bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2528  if (SU->getHeight() > CurCycle) return false;
2529
2530  if (SPQ->getHazardRec()->getHazardType(SU, 0)
2531      != ScheduleHazardRecognizer::NoHazard)
2532    return false;
2533
2534  return true;
2535}
2536
2537static bool canEnableCoalescing(SUnit *SU) {
2538  unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2539  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2540    // CopyToReg should be close to its uses to facilitate coalescing and
2541    // avoid spilling.
2542    return true;
2543
2544  if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2545      Opc == TargetOpcode::SUBREG_TO_REG ||
2546      Opc == TargetOpcode::INSERT_SUBREG)
2547    // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2548    // close to their uses to facilitate coalescing.
2549    return true;
2550
2551  if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2552    // If SU does not have a register def, schedule it close to its uses
2553    // because it does not lengthen any live ranges.
2554    return true;
2555
2556  return false;
2557}
2558
2559// list-ilp is currently an experimental scheduler that allows various
2560// heuristics to be enabled prior to the normal register reduction logic.
2561bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2562  if (int res = checkSpecialNodes(left, right))
2563    return res > 0;
2564
2565  if (left->isCall || right->isCall)
2566    // No way to compute latency of calls.
2567    return BURRSort(left, right, SPQ);
2568
2569  unsigned LLiveUses = 0, RLiveUses = 0;
2570  int LPDiff = 0, RPDiff = 0;
2571  if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2572    LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2573    RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2574  }
2575  if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2576    DEBUG(++FactorCount[FactPressureDiff]);
2577    DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2578          << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2579    return LPDiff > RPDiff;
2580  }
2581
2582  if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2583    bool LReduce = canEnableCoalescing(left);
2584    bool RReduce = canEnableCoalescing(right);
2585    DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2586    if (LReduce && !RReduce) return false;
2587    if (RReduce && !LReduce) return true;
2588  }
2589
2590  if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2591    DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2592          << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2593    DEBUG(++FactorCount[FactRegUses]);
2594    return LLiveUses < RLiveUses;
2595  }
2596
2597  if (!DisableSchedStalls) {
2598    bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2599    bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2600    if (LStall != RStall) {
2601      DEBUG(++FactorCount[FactHeight]);
2602      return left->getHeight() > right->getHeight();
2603    }
2604  }
2605
2606  if (!DisableSchedCriticalPath) {
2607    int spread = (int)left->getDepth() - (int)right->getDepth();
2608    if (std::abs(spread) > MaxReorderWindow) {
2609      DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2610            << left->getDepth() << " != SU(" << right->NodeNum << "): "
2611            << right->getDepth() << "\n");
2612      DEBUG(++FactorCount[FactDepth]);
2613      return left->getDepth() < right->getDepth();
2614    }
2615  }
2616
2617  if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2618    int spread = (int)left->getHeight() - (int)right->getHeight();
2619    if (std::abs(spread) > MaxReorderWindow) {
2620      DEBUG(++FactorCount[FactHeight]);
2621      return left->getHeight() > right->getHeight();
2622    }
2623  }
2624
2625  return BURRSort(left, right, SPQ);
2626}
2627
2628void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2629  SUnits = &sunits;
2630  // Add pseudo dependency edges for two-address nodes.
2631  AddPseudoTwoAddrDeps();
2632  // Reroute edges to nodes with multiple uses.
2633  if (!TracksRegPressure)
2634    PrescheduleNodesWithMultipleUses();
2635  // Calculate node priorities.
2636  CalculateSethiUllmanNumbers();
2637
2638  // For single block loops, mark nodes that look like canonical IV increments.
2639  if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2640    for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2641      initVRegCycle(&sunits[i]);
2642    }
2643  }
2644}
2645
2646//===----------------------------------------------------------------------===//
2647//                    Preschedule for Register Pressure
2648//===----------------------------------------------------------------------===//
2649
2650bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2651  if (SU->isTwoAddress) {
2652    unsigned Opc = SU->getNode()->getMachineOpcode();
2653    const MCInstrDesc &MCID = TII->get(Opc);
2654    unsigned NumRes = MCID.getNumDefs();
2655    unsigned NumOps = MCID.getNumOperands() - NumRes;
2656    for (unsigned i = 0; i != NumOps; ++i) {
2657      if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2658        SDNode *DU = SU->getNode()->getOperand(i).getNode();
2659        if (DU->getNodeId() != -1 &&
2660            Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2661          return true;
2662      }
2663    }
2664  }
2665  return false;
2666}
2667
2668/// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2669/// successor's explicit physregs whose definition can reach DepSU.
2670/// i.e. DepSU should not be scheduled above SU.
2671static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2672                                         ScheduleDAGRRList *scheduleDAG,
2673                                         const TargetInstrInfo *TII,
2674                                         const TargetRegisterInfo *TRI) {
2675  const unsigned *ImpDefs
2676    = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2677  if(!ImpDefs)
2678    return false;
2679
2680  for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2681       SI != SE; ++SI) {
2682    SUnit *SuccSU = SI->getSUnit();
2683    for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2684           PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2685      if (!PI->isAssignedRegDep())
2686        continue;
2687
2688      for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) {
2689        // Return true if SU clobbers this physical register use and the
2690        // definition of the register reaches from DepSU. IsReachable queries a
2691        // topological forward sort of the DAG (following the successors).
2692        if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2693            scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2694          return true;
2695      }
2696    }
2697  }
2698  return false;
2699}
2700
2701/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2702/// physical register defs.
2703static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2704                                  const TargetInstrInfo *TII,
2705                                  const TargetRegisterInfo *TRI) {
2706  SDNode *N = SuccSU->getNode();
2707  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2708  const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2709  assert(ImpDefs && "Caller should check hasPhysRegDefs");
2710  for (const SDNode *SUNode = SU->getNode(); SUNode;
2711       SUNode = SUNode->getGluedNode()) {
2712    if (!SUNode->isMachineOpcode())
2713      continue;
2714    const unsigned *SUImpDefs =
2715      TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2716    if (!SUImpDefs)
2717      return false;
2718    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2719      EVT VT = N->getValueType(i);
2720      if (VT == MVT::Glue || VT == MVT::Other)
2721        continue;
2722      if (!N->hasAnyUseOfValue(i))
2723        continue;
2724      unsigned Reg = ImpDefs[i - NumDefs];
2725      for (;*SUImpDefs; ++SUImpDefs) {
2726        unsigned SUReg = *SUImpDefs;
2727        if (TRI->regsOverlap(Reg, SUReg))
2728          return true;
2729      }
2730    }
2731  }
2732  return false;
2733}
2734
2735/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2736/// are not handled well by the general register pressure reduction
2737/// heuristics. When presented with code like this:
2738///
2739///      N
2740///    / |
2741///   /  |
2742///  U  store
2743///  |
2744/// ...
2745///
2746/// the heuristics tend to push the store up, but since the
2747/// operand of the store has another use (U), this would increase
2748/// the length of that other use (the U->N edge).
2749///
2750/// This function transforms code like the above to route U's
2751/// dependence through the store when possible, like this:
2752///
2753///      N
2754///      ||
2755///      ||
2756///     store
2757///       |
2758///       U
2759///       |
2760///      ...
2761///
2762/// This results in the store being scheduled immediately
2763/// after N, which shortens the U->N live range, reducing
2764/// register pressure.
2765///
2766void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2767  // Visit all the nodes in topological order, working top-down.
2768  for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2769    SUnit *SU = &(*SUnits)[i];
2770    // For now, only look at nodes with no data successors, such as stores.
2771    // These are especially important, due to the heuristics in
2772    // getNodePriority for nodes with no data successors.
2773    if (SU->NumSuccs != 0)
2774      continue;
2775    // For now, only look at nodes with exactly one data predecessor.
2776    if (SU->NumPreds != 1)
2777      continue;
2778    // Avoid prescheduling copies to virtual registers, which don't behave
2779    // like other nodes from the perspective of scheduling heuristics.
2780    if (SDNode *N = SU->getNode())
2781      if (N->getOpcode() == ISD::CopyToReg &&
2782          TargetRegisterInfo::isVirtualRegister
2783            (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2784        continue;
2785
2786    // Locate the single data predecessor.
2787    SUnit *PredSU = 0;
2788    for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2789         EE = SU->Preds.end(); II != EE; ++II)
2790      if (!II->isCtrl()) {
2791        PredSU = II->getSUnit();
2792        break;
2793      }
2794    assert(PredSU);
2795
2796    // Don't rewrite edges that carry physregs, because that requires additional
2797    // support infrastructure.
2798    if (PredSU->hasPhysRegDefs)
2799      continue;
2800    // Short-circuit the case where SU is PredSU's only data successor.
2801    if (PredSU->NumSuccs == 1)
2802      continue;
2803    // Avoid prescheduling to copies from virtual registers, which don't behave
2804    // like other nodes from the perspective of scheduling heuristics.
2805    if (SDNode *N = SU->getNode())
2806      if (N->getOpcode() == ISD::CopyFromReg &&
2807          TargetRegisterInfo::isVirtualRegister
2808            (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2809        continue;
2810
2811    // Perform checks on the successors of PredSU.
2812    for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2813         EE = PredSU->Succs.end(); II != EE; ++II) {
2814      SUnit *PredSuccSU = II->getSUnit();
2815      if (PredSuccSU == SU) continue;
2816      // If PredSU has another successor with no data successors, for
2817      // now don't attempt to choose either over the other.
2818      if (PredSuccSU->NumSuccs == 0)
2819        goto outer_loop_continue;
2820      // Don't break physical register dependencies.
2821      if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2822        if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2823          goto outer_loop_continue;
2824      // Don't introduce graph cycles.
2825      if (scheduleDAG->IsReachable(SU, PredSuccSU))
2826        goto outer_loop_continue;
2827    }
2828
2829    // Ok, the transformation is safe and the heuristics suggest it is
2830    // profitable. Update the graph.
2831    DEBUG(dbgs() << "    Prescheduling SU #" << SU->NodeNum
2832                 << " next to PredSU #" << PredSU->NodeNum
2833                 << " to guide scheduling in the presence of multiple uses\n");
2834    for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2835      SDep Edge = PredSU->Succs[i];
2836      assert(!Edge.isAssignedRegDep());
2837      SUnit *SuccSU = Edge.getSUnit();
2838      if (SuccSU != SU) {
2839        Edge.setSUnit(PredSU);
2840        scheduleDAG->RemovePred(SuccSU, Edge);
2841        scheduleDAG->AddPred(SU, Edge);
2842        Edge.setSUnit(SU);
2843        scheduleDAG->AddPred(SuccSU, Edge);
2844        --i;
2845      }
2846    }
2847  outer_loop_continue:;
2848  }
2849}
2850
2851/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2852/// it as a def&use operand. Add a pseudo control edge from it to the other
2853/// node (if it won't create a cycle) so the two-address one will be scheduled
2854/// first (lower in the schedule). If both nodes are two-address, favor the
2855/// one that has a CopyToReg use (more likely to be a loop induction update).
2856/// If both are two-address, but one is commutable while the other is not
2857/// commutable, favor the one that's not commutable.
2858void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2859  for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2860    SUnit *SU = &(*SUnits)[i];
2861    if (!SU->isTwoAddress)
2862      continue;
2863
2864    SDNode *Node = SU->getNode();
2865    if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2866      continue;
2867
2868    bool isLiveOut = hasOnlyLiveOutUses(SU);
2869    unsigned Opc = Node->getMachineOpcode();
2870    const MCInstrDesc &MCID = TII->get(Opc);
2871    unsigned NumRes = MCID.getNumDefs();
2872    unsigned NumOps = MCID.getNumOperands() - NumRes;
2873    for (unsigned j = 0; j != NumOps; ++j) {
2874      if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2875        continue;
2876      SDNode *DU = SU->getNode()->getOperand(j).getNode();
2877      if (DU->getNodeId() == -1)
2878        continue;
2879      const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2880      if (!DUSU) continue;
2881      for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2882           E = DUSU->Succs.end(); I != E; ++I) {
2883        if (I->isCtrl()) continue;
2884        SUnit *SuccSU = I->getSUnit();
2885        if (SuccSU == SU)
2886          continue;
2887        // Be conservative. Ignore if nodes aren't at roughly the same
2888        // depth and height.
2889        if (SuccSU->getHeight() < SU->getHeight() &&
2890            (SU->getHeight() - SuccSU->getHeight()) > 1)
2891          continue;
2892        // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2893        // constrains whatever is using the copy, instead of the copy
2894        // itself. In the case that the copy is coalesced, this
2895        // preserves the intent of the pseudo two-address heurietics.
2896        while (SuccSU->Succs.size() == 1 &&
2897               SuccSU->getNode()->isMachineOpcode() &&
2898               SuccSU->getNode()->getMachineOpcode() ==
2899                 TargetOpcode::COPY_TO_REGCLASS)
2900          SuccSU = SuccSU->Succs.front().getSUnit();
2901        // Don't constrain non-instruction nodes.
2902        if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2903          continue;
2904        // Don't constrain nodes with physical register defs if the
2905        // predecessor can clobber them.
2906        if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2907          if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2908            continue;
2909        }
2910        // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2911        // these may be coalesced away. We want them close to their uses.
2912        unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2913        if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2914            SuccOpc == TargetOpcode::INSERT_SUBREG ||
2915            SuccOpc == TargetOpcode::SUBREG_TO_REG)
2916          continue;
2917        if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2918            (!canClobber(SuccSU, DUSU) ||
2919             (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2920             (!SU->isCommutable && SuccSU->isCommutable)) &&
2921            !scheduleDAG->IsReachable(SuccSU, SU)) {
2922          DEBUG(dbgs() << "    Adding a pseudo-two-addr edge from SU #"
2923                       << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2924          scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2925                                        /*Reg=*/0, /*isNormalMemory=*/false,
2926                                        /*isMustAlias=*/false,
2927                                        /*isArtificial=*/true));
2928        }
2929      }
2930    }
2931  }
2932}
2933
2934//===----------------------------------------------------------------------===//
2935//                         Public Constructor Functions
2936//===----------------------------------------------------------------------===//
2937
2938llvm::ScheduleDAGSDNodes *
2939llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2940                                 CodeGenOpt::Level OptLevel) {
2941  const TargetMachine &TM = IS->TM;
2942  const TargetInstrInfo *TII = TM.getInstrInfo();
2943  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2944
2945  BURegReductionPriorityQueue *PQ =
2946    new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2947  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2948  PQ->setScheduleDAG(SD);
2949  return SD;
2950}
2951
2952llvm::ScheduleDAGSDNodes *
2953llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2954                                   CodeGenOpt::Level OptLevel) {
2955  const TargetMachine &TM = IS->TM;
2956  const TargetInstrInfo *TII = TM.getInstrInfo();
2957  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2958
2959  SrcRegReductionPriorityQueue *PQ =
2960    new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2961  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2962  PQ->setScheduleDAG(SD);
2963  return SD;
2964}
2965
2966llvm::ScheduleDAGSDNodes *
2967llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2968                                   CodeGenOpt::Level OptLevel) {
2969  const TargetMachine &TM = IS->TM;
2970  const TargetInstrInfo *TII = TM.getInstrInfo();
2971  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2972  const TargetLowering *TLI = &IS->getTargetLowering();
2973
2974  HybridBURRPriorityQueue *PQ =
2975    new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2976
2977  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2978  PQ->setScheduleDAG(SD);
2979  return SD;
2980}
2981
2982llvm::ScheduleDAGSDNodes *
2983llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2984                                CodeGenOpt::Level OptLevel) {
2985  const TargetMachine &TM = IS->TM;
2986  const TargetInstrInfo *TII = TM.getInstrInfo();
2987  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2988  const TargetLowering *TLI = &IS->getTargetLowering();
2989
2990  ILPBURRPriorityQueue *PQ =
2991    new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2992  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2993  PQ->setScheduleDAG(SD);
2994  return SD;
2995}
2996