ScheduleDAGRRList.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms.  The basic approach uses a priority
12// queue of available nodes to schedule.  One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "pre-RA-sched"
19#include "llvm/CodeGen/SchedulerRegistry.h"
20#include "ScheduleDAGSDNodes.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26#include "llvm/CodeGen/SelectionDAGISel.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/InlineAsm.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/raw_ostream.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetLowering.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetRegisterInfo.h"
36#include <climits>
37using namespace llvm;
38
39STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
40STATISTIC(NumUnfolds,    "Number of nodes unfolded");
41STATISTIC(NumDups,       "Number of duplicated nodes");
42STATISTIC(NumPRCopies,   "Number of physical register copies");
43
44static RegisterScheduler
45  burrListDAGScheduler("list-burr",
46                       "Bottom-up register reduction list scheduling",
47                       createBURRListDAGScheduler);
48static RegisterScheduler
49  sourceListDAGScheduler("source",
50                         "Similar to list-burr but schedules in source "
51                         "order when possible",
52                         createSourceListDAGScheduler);
53
54static RegisterScheduler
55  hybridListDAGScheduler("list-hybrid",
56                         "Bottom-up register pressure aware list scheduling "
57                         "which tries to balance latency and register pressure",
58                         createHybridListDAGScheduler);
59
60static RegisterScheduler
61  ILPListDAGScheduler("list-ilp",
62                      "Bottom-up register pressure aware list scheduling "
63                      "which tries to balance ILP and register pressure",
64                      createILPListDAGScheduler);
65
66static cl::opt<bool> DisableSchedCycles(
67  "disable-sched-cycles", cl::Hidden, cl::init(false),
68  cl::desc("Disable cycle-level precision during preRA scheduling"));
69
70// Temporary sched=list-ilp flags until the heuristics are robust.
71// Some options are also available under sched=list-hybrid.
72static cl::opt<bool> DisableSchedRegPressure(
73  "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
74  cl::desc("Disable regpressure priority in sched=list-ilp"));
75static cl::opt<bool> DisableSchedLiveUses(
76  "disable-sched-live-uses", cl::Hidden, cl::init(true),
77  cl::desc("Disable live use priority in sched=list-ilp"));
78static cl::opt<bool> DisableSchedVRegCycle(
79  "disable-sched-vrcycle", cl::Hidden, cl::init(false),
80  cl::desc("Disable virtual register cycle interference checks"));
81static cl::opt<bool> DisableSchedPhysRegJoin(
82  "disable-sched-physreg-join", cl::Hidden, cl::init(false),
83  cl::desc("Disable physreg def-use affinity"));
84static cl::opt<bool> DisableSchedStalls(
85  "disable-sched-stalls", cl::Hidden, cl::init(true),
86  cl::desc("Disable no-stall priority in sched=list-ilp"));
87static cl::opt<bool> DisableSchedCriticalPath(
88  "disable-sched-critical-path", cl::Hidden, cl::init(false),
89  cl::desc("Disable critical path priority in sched=list-ilp"));
90static cl::opt<bool> DisableSchedHeight(
91  "disable-sched-height", cl::Hidden, cl::init(false),
92  cl::desc("Disable scheduled-height priority in sched=list-ilp"));
93static cl::opt<bool> Disable2AddrHack(
94  "disable-2addr-hack", cl::Hidden, cl::init(true),
95  cl::desc("Disable scheduler's two-address hack"));
96
97static cl::opt<int> MaxReorderWindow(
98  "max-sched-reorder", cl::Hidden, cl::init(6),
99  cl::desc("Number of instructions to allow ahead of the critical path "
100           "in sched=list-ilp"));
101
102static cl::opt<unsigned> AvgIPC(
103  "sched-avg-ipc", cl::Hidden, cl::init(1),
104  cl::desc("Average inst/cycle whan no target itinerary exists."));
105
106namespace {
107//===----------------------------------------------------------------------===//
108/// ScheduleDAGRRList - The actual register reduction list scheduler
109/// implementation.  This supports both top-down and bottom-up scheduling.
110///
111class ScheduleDAGRRList : public ScheduleDAGSDNodes {
112private:
113  /// NeedLatency - True if the scheduler will make use of latency information.
114  ///
115  bool NeedLatency;
116
117  /// AvailableQueue - The priority queue to use for the available SUnits.
118  SchedulingPriorityQueue *AvailableQueue;
119
120  /// PendingQueue - This contains all of the instructions whose operands have
121  /// been issued, but their results are not ready yet (due to the latency of
122  /// the operation).  Once the operands becomes available, the instruction is
123  /// added to the AvailableQueue.
124  std::vector<SUnit*> PendingQueue;
125
126  /// HazardRec - The hazard recognizer to use.
127  ScheduleHazardRecognizer *HazardRec;
128
129  /// CurCycle - The current scheduler state corresponds to this cycle.
130  unsigned CurCycle;
131
132  /// MinAvailableCycle - Cycle of the soonest available instruction.
133  unsigned MinAvailableCycle;
134
135  /// IssueCount - Count instructions issued in this cycle
136  /// Currently valid only for bottom-up scheduling.
137  unsigned IssueCount;
138
139  /// LiveRegDefs - A set of physical registers and their definition
140  /// that are "live". These nodes must be scheduled before any other nodes that
141  /// modifies the registers can be scheduled.
142  unsigned NumLiveRegs;
143  std::vector<SUnit*> LiveRegDefs;
144  std::vector<SUnit*> LiveRegGens;
145
146  // Collect interferences between physical register use/defs.
147  // Each interference is an SUnit and set of physical registers.
148  SmallVector<SUnit*, 4> Interferences;
149  typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
150  LRegsMapT LRegsMap;
151
152  /// Topo - A topological ordering for SUnits which permits fast IsReachable
153  /// and similar queries.
154  ScheduleDAGTopologicalSort Topo;
155
156  // Hack to keep track of the inverse of FindCallSeqStart without more crazy
157  // DAG crawling.
158  DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
159
160public:
161  ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
162                    SchedulingPriorityQueue *availqueue,
163                    CodeGenOpt::Level OptLevel)
164    : ScheduleDAGSDNodes(mf),
165      NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
166      Topo(SUnits, NULL) {
167
168    const TargetMachine &tm = mf.getTarget();
169    if (DisableSchedCycles || !NeedLatency)
170      HazardRec = new ScheduleHazardRecognizer();
171    else
172      HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
173  }
174
175  ~ScheduleDAGRRList() {
176    delete HazardRec;
177    delete AvailableQueue;
178  }
179
180  void Schedule() override;
181
182  ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
183
184  /// IsReachable - Checks if SU is reachable from TargetSU.
185  bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
186    return Topo.IsReachable(SU, TargetSU);
187  }
188
189  /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
190  /// create a cycle.
191  bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
192    return Topo.WillCreateCycle(SU, TargetSU);
193  }
194
195  /// AddPred - adds a predecessor edge to SUnit SU.
196  /// This returns true if this is a new predecessor.
197  /// Updates the topological ordering if required.
198  void AddPred(SUnit *SU, const SDep &D) {
199    Topo.AddPred(SU, D.getSUnit());
200    SU->addPred(D);
201  }
202
203  /// RemovePred - removes a predecessor edge from SUnit SU.
204  /// This returns true if an edge was removed.
205  /// Updates the topological ordering if required.
206  void RemovePred(SUnit *SU, const SDep &D) {
207    Topo.RemovePred(SU, D.getSUnit());
208    SU->removePred(D);
209  }
210
211private:
212  bool isReady(SUnit *SU) {
213    return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
214      AvailableQueue->isReady(SU);
215  }
216
217  void ReleasePred(SUnit *SU, const SDep *PredEdge);
218  void ReleasePredecessors(SUnit *SU);
219  void ReleasePending();
220  void AdvanceToCycle(unsigned NextCycle);
221  void AdvancePastStalls(SUnit *SU);
222  void EmitNode(SUnit *SU);
223  void ScheduleNodeBottomUp(SUnit*);
224  void CapturePred(SDep *PredEdge);
225  void UnscheduleNodeBottomUp(SUnit*);
226  void RestoreHazardCheckerBottomUp();
227  void BacktrackBottomUp(SUnit*, SUnit*);
228  SUnit *CopyAndMoveSuccessors(SUnit*);
229  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
230                                const TargetRegisterClass*,
231                                const TargetRegisterClass*,
232                                SmallVectorImpl<SUnit*>&);
233  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
234
235  void releaseInterferences(unsigned Reg = 0);
236
237  SUnit *PickNodeToScheduleBottomUp();
238  void ListScheduleBottomUp();
239
240  /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
241  /// Updates the topological ordering if required.
242  SUnit *CreateNewSUnit(SDNode *N) {
243    unsigned NumSUnits = SUnits.size();
244    SUnit *NewNode = newSUnit(N);
245    // Update the topological ordering.
246    if (NewNode->NodeNum >= NumSUnits)
247      Topo.InitDAGTopologicalSorting();
248    return NewNode;
249  }
250
251  /// CreateClone - Creates a new SUnit from an existing one.
252  /// Updates the topological ordering if required.
253  SUnit *CreateClone(SUnit *N) {
254    unsigned NumSUnits = SUnits.size();
255    SUnit *NewNode = Clone(N);
256    // Update the topological ordering.
257    if (NewNode->NodeNum >= NumSUnits)
258      Topo.InitDAGTopologicalSorting();
259    return NewNode;
260  }
261
262  /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
263  /// need actual latency information but the hybrid scheduler does.
264  bool forceUnitLatencies() const override {
265    return !NeedLatency;
266  }
267};
268}  // end anonymous namespace
269
270/// GetCostForDef - Looks up the register class and cost for a given definition.
271/// Typically this just means looking up the representative register class,
272/// but for untyped values (MVT::Untyped) it means inspecting the node's
273/// opcode to determine what register class is being generated.
274static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
275                          const TargetLowering *TLI,
276                          const TargetInstrInfo *TII,
277                          const TargetRegisterInfo *TRI,
278                          unsigned &RegClass, unsigned &Cost,
279                          const MachineFunction &MF) {
280  MVT VT = RegDefPos.GetValue();
281
282  // Special handling for untyped values.  These values can only come from
283  // the expansion of custom DAG-to-DAG patterns.
284  if (VT == MVT::Untyped) {
285    const SDNode *Node = RegDefPos.GetNode();
286
287    // Special handling for CopyFromReg of untyped values.
288    if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
289      unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
290      const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
291      RegClass = RC->getID();
292      Cost = 1;
293      return;
294    }
295
296    unsigned Opcode = Node->getMachineOpcode();
297    if (Opcode == TargetOpcode::REG_SEQUENCE) {
298      unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
299      const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
300      RegClass = RC->getID();
301      Cost = 1;
302      return;
303    }
304
305    unsigned Idx = RegDefPos.GetIdx();
306    const MCInstrDesc Desc = TII->get(Opcode);
307    const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
308    RegClass = RC->getID();
309    // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
310    // better way to determine it.
311    Cost = 1;
312  } else {
313    RegClass = TLI->getRepRegClassFor(VT)->getID();
314    Cost = TLI->getRepRegClassCostFor(VT);
315  }
316}
317
318/// Schedule - Schedule the DAG using list scheduling.
319void ScheduleDAGRRList::Schedule() {
320  DEBUG(dbgs()
321        << "********** List Scheduling BB#" << BB->getNumber()
322        << " '" << BB->getName() << "' **********\n");
323
324  CurCycle = 0;
325  IssueCount = 0;
326  MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
327  NumLiveRegs = 0;
328  // Allocate slots for each physical register, plus one for a special register
329  // to track the virtual resource of a calling sequence.
330  LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
331  LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
332  CallSeqEndForStart.clear();
333  assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
334
335  // Build the scheduling graph.
336  BuildSchedGraph(NULL);
337
338  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
339          SUnits[su].dumpAll(this));
340  Topo.InitDAGTopologicalSorting();
341
342  AvailableQueue->initNodes(SUnits);
343
344  HazardRec->Reset();
345
346  // Execute the actual scheduling loop.
347  ListScheduleBottomUp();
348
349  AvailableQueue->releaseState();
350
351  DEBUG({
352      dbgs() << "*** Final schedule ***\n";
353      dumpSchedule();
354      dbgs() << '\n';
355    });
356}
357
358//===----------------------------------------------------------------------===//
359//  Bottom-Up Scheduling
360//===----------------------------------------------------------------------===//
361
362/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
363/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
364void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
365  SUnit *PredSU = PredEdge->getSUnit();
366
367#ifndef NDEBUG
368  if (PredSU->NumSuccsLeft == 0) {
369    dbgs() << "*** Scheduling failed! ***\n";
370    PredSU->dump(this);
371    dbgs() << " has been released too many times!\n";
372    llvm_unreachable(0);
373  }
374#endif
375  --PredSU->NumSuccsLeft;
376
377  if (!forceUnitLatencies()) {
378    // Updating predecessor's height. This is now the cycle when the
379    // predecessor can be scheduled without causing a pipeline stall.
380    PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
381  }
382
383  // If all the node's successors are scheduled, this node is ready
384  // to be scheduled. Ignore the special EntrySU node.
385  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
386    PredSU->isAvailable = true;
387
388    unsigned Height = PredSU->getHeight();
389    if (Height < MinAvailableCycle)
390      MinAvailableCycle = Height;
391
392    if (isReady(PredSU)) {
393      AvailableQueue->push(PredSU);
394    }
395    // CapturePred and others may have left the node in the pending queue, avoid
396    // adding it twice.
397    else if (!PredSU->isPending) {
398      PredSU->isPending = true;
399      PendingQueue.push_back(PredSU);
400    }
401  }
402}
403
404/// IsChainDependent - Test if Outer is reachable from Inner through
405/// chain dependencies.
406static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
407                             unsigned NestLevel,
408                             const TargetInstrInfo *TII) {
409  SDNode *N = Outer;
410  for (;;) {
411    if (N == Inner)
412      return true;
413    // For a TokenFactor, examine each operand. There may be multiple ways
414    // to get to the CALLSEQ_BEGIN, but we need to find the path with the
415    // most nesting in order to ensure that we find the corresponding match.
416    if (N->getOpcode() == ISD::TokenFactor) {
417      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
418        if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
419          return true;
420      return false;
421    }
422    // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
423    if (N->isMachineOpcode()) {
424      if (N->getMachineOpcode() ==
425          (unsigned)TII->getCallFrameDestroyOpcode()) {
426        ++NestLevel;
427      } else if (N->getMachineOpcode() ==
428                 (unsigned)TII->getCallFrameSetupOpcode()) {
429        if (NestLevel == 0)
430          return false;
431        --NestLevel;
432      }
433    }
434    // Otherwise, find the chain and continue climbing.
435    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
436      if (N->getOperand(i).getValueType() == MVT::Other) {
437        N = N->getOperand(i).getNode();
438        goto found_chain_operand;
439      }
440    return false;
441  found_chain_operand:;
442    if (N->getOpcode() == ISD::EntryToken)
443      return false;
444  }
445}
446
447/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
448/// the corresponding (lowered) CALLSEQ_BEGIN node.
449///
450/// NestLevel and MaxNested are used in recursion to indcate the current level
451/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
452/// level seen so far.
453///
454/// TODO: It would be better to give CALLSEQ_END an explicit operand to point
455/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
456static SDNode *
457FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
458                 const TargetInstrInfo *TII) {
459  for (;;) {
460    // For a TokenFactor, examine each operand. There may be multiple ways
461    // to get to the CALLSEQ_BEGIN, but we need to find the path with the
462    // most nesting in order to ensure that we find the corresponding match.
463    if (N->getOpcode() == ISD::TokenFactor) {
464      SDNode *Best = 0;
465      unsigned BestMaxNest = MaxNest;
466      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
467        unsigned MyNestLevel = NestLevel;
468        unsigned MyMaxNest = MaxNest;
469        if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
470                                           MyNestLevel, MyMaxNest, TII))
471          if (!Best || (MyMaxNest > BestMaxNest)) {
472            Best = New;
473            BestMaxNest = MyMaxNest;
474          }
475      }
476      assert(Best);
477      MaxNest = BestMaxNest;
478      return Best;
479    }
480    // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
481    if (N->isMachineOpcode()) {
482      if (N->getMachineOpcode() ==
483          (unsigned)TII->getCallFrameDestroyOpcode()) {
484        ++NestLevel;
485        MaxNest = std::max(MaxNest, NestLevel);
486      } else if (N->getMachineOpcode() ==
487                 (unsigned)TII->getCallFrameSetupOpcode()) {
488        assert(NestLevel != 0);
489        --NestLevel;
490        if (NestLevel == 0)
491          return N;
492      }
493    }
494    // Otherwise, find the chain and continue climbing.
495    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
496      if (N->getOperand(i).getValueType() == MVT::Other) {
497        N = N->getOperand(i).getNode();
498        goto found_chain_operand;
499      }
500    return 0;
501  found_chain_operand:;
502    if (N->getOpcode() == ISD::EntryToken)
503      return 0;
504  }
505}
506
507/// Call ReleasePred for each predecessor, then update register live def/gen.
508/// Always update LiveRegDefs for a register dependence even if the current SU
509/// also defines the register. This effectively create one large live range
510/// across a sequence of two-address node. This is important because the
511/// entire chain must be scheduled together. Example:
512///
513/// flags = (3) add
514/// flags = (2) addc flags
515/// flags = (1) addc flags
516///
517/// results in
518///
519/// LiveRegDefs[flags] = 3
520/// LiveRegGens[flags] = 1
521///
522/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
523/// interference on flags.
524void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
525  // Bottom up: release predecessors
526  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
527       I != E; ++I) {
528    ReleasePred(SU, &*I);
529    if (I->isAssignedRegDep()) {
530      // This is a physical register dependency and it's impossible or
531      // expensive to copy the register. Make sure nothing that can
532      // clobber the register is scheduled between the predecessor and
533      // this node.
534      SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
535      assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
536             "interference on register dependence");
537      LiveRegDefs[I->getReg()] = I->getSUnit();
538      if (!LiveRegGens[I->getReg()]) {
539        ++NumLiveRegs;
540        LiveRegGens[I->getReg()] = SU;
541      }
542    }
543  }
544
545  // If we're scheduling a lowered CALLSEQ_END, find the corresponding
546  // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
547  // these nodes, to prevent other calls from being interscheduled with them.
548  unsigned CallResource = TRI->getNumRegs();
549  if (!LiveRegDefs[CallResource])
550    for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
551      if (Node->isMachineOpcode() &&
552          Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
553        unsigned NestLevel = 0;
554        unsigned MaxNest = 0;
555        SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
556
557        SUnit *Def = &SUnits[N->getNodeId()];
558        CallSeqEndForStart[Def] = SU;
559
560        ++NumLiveRegs;
561        LiveRegDefs[CallResource] = Def;
562        LiveRegGens[CallResource] = SU;
563        break;
564      }
565}
566
567/// Check to see if any of the pending instructions are ready to issue.  If
568/// so, add them to the available queue.
569void ScheduleDAGRRList::ReleasePending() {
570  if (DisableSchedCycles) {
571    assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
572    return;
573  }
574
575  // If the available queue is empty, it is safe to reset MinAvailableCycle.
576  if (AvailableQueue->empty())
577    MinAvailableCycle = UINT_MAX;
578
579  // Check to see if any of the pending instructions are ready to issue.  If
580  // so, add them to the available queue.
581  for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
582    unsigned ReadyCycle = PendingQueue[i]->getHeight();
583    if (ReadyCycle < MinAvailableCycle)
584      MinAvailableCycle = ReadyCycle;
585
586    if (PendingQueue[i]->isAvailable) {
587      if (!isReady(PendingQueue[i]))
588          continue;
589      AvailableQueue->push(PendingQueue[i]);
590    }
591    PendingQueue[i]->isPending = false;
592    PendingQueue[i] = PendingQueue.back();
593    PendingQueue.pop_back();
594    --i; --e;
595  }
596}
597
598/// Move the scheduler state forward by the specified number of Cycles.
599void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
600  if (NextCycle <= CurCycle)
601    return;
602
603  IssueCount = 0;
604  AvailableQueue->setCurCycle(NextCycle);
605  if (!HazardRec->isEnabled()) {
606    // Bypass lots of virtual calls in case of long latency.
607    CurCycle = NextCycle;
608  }
609  else {
610    for (; CurCycle != NextCycle; ++CurCycle) {
611      HazardRec->RecedeCycle();
612    }
613  }
614  // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
615  // available Q to release pending nodes at least once before popping.
616  ReleasePending();
617}
618
619/// Move the scheduler state forward until the specified node's dependents are
620/// ready and can be scheduled with no resource conflicts.
621void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
622  if (DisableSchedCycles)
623    return;
624
625  // FIXME: Nodes such as CopyFromReg probably should not advance the current
626  // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
627  // has predecessors the cycle will be advanced when they are scheduled.
628  // But given the crude nature of modeling latency though such nodes, we
629  // currently need to treat these nodes like real instructions.
630  // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
631
632  unsigned ReadyCycle = SU->getHeight();
633
634  // Bump CurCycle to account for latency. We assume the latency of other
635  // available instructions may be hidden by the stall (not a full pipe stall).
636  // This updates the hazard recognizer's cycle before reserving resources for
637  // this instruction.
638  AdvanceToCycle(ReadyCycle);
639
640  // Calls are scheduled in their preceding cycle, so don't conflict with
641  // hazards from instructions after the call. EmitNode will reset the
642  // scoreboard state before emitting the call.
643  if (SU->isCall)
644    return;
645
646  // FIXME: For resource conflicts in very long non-pipelined stages, we
647  // should probably skip ahead here to avoid useless scoreboard checks.
648  int Stalls = 0;
649  while (true) {
650    ScheduleHazardRecognizer::HazardType HT =
651      HazardRec->getHazardType(SU, -Stalls);
652
653    if (HT == ScheduleHazardRecognizer::NoHazard)
654      break;
655
656    ++Stalls;
657  }
658  AdvanceToCycle(CurCycle + Stalls);
659}
660
661/// Record this SUnit in the HazardRecognizer.
662/// Does not update CurCycle.
663void ScheduleDAGRRList::EmitNode(SUnit *SU) {
664  if (!HazardRec->isEnabled())
665    return;
666
667  // Check for phys reg copy.
668  if (!SU->getNode())
669    return;
670
671  switch (SU->getNode()->getOpcode()) {
672  default:
673    assert(SU->getNode()->isMachineOpcode() &&
674           "This target-independent node should not be scheduled.");
675    break;
676  case ISD::MERGE_VALUES:
677  case ISD::TokenFactor:
678  case ISD::LIFETIME_START:
679  case ISD::LIFETIME_END:
680  case ISD::CopyToReg:
681  case ISD::CopyFromReg:
682  case ISD::EH_LABEL:
683    // Noops don't affect the scoreboard state. Copies are likely to be
684    // removed.
685    return;
686  case ISD::INLINEASM:
687    // For inline asm, clear the pipeline state.
688    HazardRec->Reset();
689    return;
690  }
691  if (SU->isCall) {
692    // Calls are scheduled with their preceding instructions. For bottom-up
693    // scheduling, clear the pipeline state before emitting.
694    HazardRec->Reset();
695  }
696
697  HazardRec->EmitInstruction(SU);
698}
699
700static void resetVRegCycle(SUnit *SU);
701
702/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
703/// count of its predecessors. If a predecessor pending count is zero, add it to
704/// the Available queue.
705void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
706  DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
707  DEBUG(SU->dump(this));
708
709#ifndef NDEBUG
710  if (CurCycle < SU->getHeight())
711    DEBUG(dbgs() << "   Height [" << SU->getHeight()
712          << "] pipeline stall!\n");
713#endif
714
715  // FIXME: Do not modify node height. It may interfere with
716  // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
717  // node its ready cycle can aid heuristics, and after scheduling it can
718  // indicate the scheduled cycle.
719  SU->setHeightToAtLeast(CurCycle);
720
721  // Reserve resources for the scheduled instruction.
722  EmitNode(SU);
723
724  Sequence.push_back(SU);
725
726  AvailableQueue->scheduledNode(SU);
727
728  // If HazardRec is disabled, and each inst counts as one cycle, then
729  // advance CurCycle before ReleasePredecessors to avoid useless pushes to
730  // PendingQueue for schedulers that implement HasReadyFilter.
731  if (!HazardRec->isEnabled() && AvgIPC < 2)
732    AdvanceToCycle(CurCycle + 1);
733
734  // Update liveness of predecessors before successors to avoid treating a
735  // two-address node as a live range def.
736  ReleasePredecessors(SU);
737
738  // Release all the implicit physical register defs that are live.
739  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
740       I != E; ++I) {
741    // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
742    if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
743      assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
744      --NumLiveRegs;
745      LiveRegDefs[I->getReg()] = NULL;
746      LiveRegGens[I->getReg()] = NULL;
747      releaseInterferences(I->getReg());
748    }
749  }
750  // Release the special call resource dependence, if this is the beginning
751  // of a call.
752  unsigned CallResource = TRI->getNumRegs();
753  if (LiveRegDefs[CallResource] == SU)
754    for (const SDNode *SUNode = SU->getNode(); SUNode;
755         SUNode = SUNode->getGluedNode()) {
756      if (SUNode->isMachineOpcode() &&
757          SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
758        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
759        --NumLiveRegs;
760        LiveRegDefs[CallResource] = NULL;
761        LiveRegGens[CallResource] = NULL;
762        releaseInterferences(CallResource);
763      }
764    }
765
766  resetVRegCycle(SU);
767
768  SU->isScheduled = true;
769
770  // Conditions under which the scheduler should eagerly advance the cycle:
771  // (1) No available instructions
772  // (2) All pipelines full, so available instructions must have hazards.
773  //
774  // If HazardRec is disabled, the cycle was pre-advanced before calling
775  // ReleasePredecessors. In that case, IssueCount should remain 0.
776  //
777  // Check AvailableQueue after ReleasePredecessors in case of zero latency.
778  if (HazardRec->isEnabled() || AvgIPC > 1) {
779    if (SU->getNode() && SU->getNode()->isMachineOpcode())
780      ++IssueCount;
781    if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
782        || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
783      AdvanceToCycle(CurCycle + 1);
784  }
785}
786
787/// CapturePred - This does the opposite of ReleasePred. Since SU is being
788/// unscheduled, incrcease the succ left count of its predecessors. Remove
789/// them from AvailableQueue if necessary.
790void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
791  SUnit *PredSU = PredEdge->getSUnit();
792  if (PredSU->isAvailable) {
793    PredSU->isAvailable = false;
794    if (!PredSU->isPending)
795      AvailableQueue->remove(PredSU);
796  }
797
798  assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
799  ++PredSU->NumSuccsLeft;
800}
801
802/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
803/// its predecessor states to reflect the change.
804void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
805  DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
806  DEBUG(SU->dump(this));
807
808  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
809       I != E; ++I) {
810    CapturePred(&*I);
811    if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
812      assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
813      assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
814             "Physical register dependency violated?");
815      --NumLiveRegs;
816      LiveRegDefs[I->getReg()] = NULL;
817      LiveRegGens[I->getReg()] = NULL;
818      releaseInterferences(I->getReg());
819    }
820  }
821
822  // Reclaim the special call resource dependence, if this is the beginning
823  // of a call.
824  unsigned CallResource = TRI->getNumRegs();
825  for (const SDNode *SUNode = SU->getNode(); SUNode;
826       SUNode = SUNode->getGluedNode()) {
827    if (SUNode->isMachineOpcode() &&
828        SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
829      ++NumLiveRegs;
830      LiveRegDefs[CallResource] = SU;
831      LiveRegGens[CallResource] = CallSeqEndForStart[SU];
832    }
833  }
834
835  // Release the special call resource dependence, if this is the end
836  // of a call.
837  if (LiveRegGens[CallResource] == SU)
838    for (const SDNode *SUNode = SU->getNode(); SUNode;
839         SUNode = SUNode->getGluedNode()) {
840      if (SUNode->isMachineOpcode() &&
841          SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
842        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
843        --NumLiveRegs;
844        LiveRegDefs[CallResource] = NULL;
845        LiveRegGens[CallResource] = NULL;
846        releaseInterferences(CallResource);
847      }
848    }
849
850  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
851       I != E; ++I) {
852    if (I->isAssignedRegDep()) {
853      if (!LiveRegDefs[I->getReg()])
854        ++NumLiveRegs;
855      // This becomes the nearest def. Note that an earlier def may still be
856      // pending if this is a two-address node.
857      LiveRegDefs[I->getReg()] = SU;
858      if (LiveRegGens[I->getReg()] == NULL ||
859          I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
860        LiveRegGens[I->getReg()] = I->getSUnit();
861    }
862  }
863  if (SU->getHeight() < MinAvailableCycle)
864    MinAvailableCycle = SU->getHeight();
865
866  SU->setHeightDirty();
867  SU->isScheduled = false;
868  SU->isAvailable = true;
869  if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
870    // Don't make available until backtracking is complete.
871    SU->isPending = true;
872    PendingQueue.push_back(SU);
873  }
874  else {
875    AvailableQueue->push(SU);
876  }
877  AvailableQueue->unscheduledNode(SU);
878}
879
880/// After backtracking, the hazard checker needs to be restored to a state
881/// corresponding the current cycle.
882void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
883  HazardRec->Reset();
884
885  unsigned LookAhead = std::min((unsigned)Sequence.size(),
886                                HazardRec->getMaxLookAhead());
887  if (LookAhead == 0)
888    return;
889
890  std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
891  unsigned HazardCycle = (*I)->getHeight();
892  for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
893    SUnit *SU = *I;
894    for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
895      HazardRec->RecedeCycle();
896    }
897    EmitNode(SU);
898  }
899}
900
901/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
902/// BTCycle in order to schedule a specific node.
903void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
904  SUnit *OldSU = Sequence.back();
905  while (true) {
906    Sequence.pop_back();
907    // FIXME: use ready cycle instead of height
908    CurCycle = OldSU->getHeight();
909    UnscheduleNodeBottomUp(OldSU);
910    AvailableQueue->setCurCycle(CurCycle);
911    if (OldSU == BtSU)
912      break;
913    OldSU = Sequence.back();
914  }
915
916  assert(!SU->isSucc(OldSU) && "Something is wrong!");
917
918  RestoreHazardCheckerBottomUp();
919
920  ReleasePending();
921
922  ++NumBacktracks;
923}
924
925static bool isOperandOf(const SUnit *SU, SDNode *N) {
926  for (const SDNode *SUNode = SU->getNode(); SUNode;
927       SUNode = SUNode->getGluedNode()) {
928    if (SUNode->isOperandOf(N))
929      return true;
930  }
931  return false;
932}
933
934/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
935/// successors to the newly created node.
936SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
937  SDNode *N = SU->getNode();
938  if (!N)
939    return NULL;
940
941  if (SU->getNode()->getGluedNode())
942    return NULL;
943
944  SUnit *NewSU;
945  bool TryUnfold = false;
946  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
947    EVT VT = N->getValueType(i);
948    if (VT == MVT::Glue)
949      return NULL;
950    else if (VT == MVT::Other)
951      TryUnfold = true;
952  }
953  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
954    const SDValue &Op = N->getOperand(i);
955    EVT VT = Op.getNode()->getValueType(Op.getResNo());
956    if (VT == MVT::Glue)
957      return NULL;
958  }
959
960  if (TryUnfold) {
961    SmallVector<SDNode*, 2> NewNodes;
962    if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
963      return NULL;
964
965    // unfolding an x86 DEC64m operation results in store, dec, load which
966    // can't be handled here so quit
967    if (NewNodes.size() == 3)
968      return NULL;
969
970    DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
971    assert(NewNodes.size() == 2 && "Expected a load folding node!");
972
973    N = NewNodes[1];
974    SDNode *LoadNode = NewNodes[0];
975    unsigned NumVals = N->getNumValues();
976    unsigned OldNumVals = SU->getNode()->getNumValues();
977    for (unsigned i = 0; i != NumVals; ++i)
978      DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
979    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
980                                   SDValue(LoadNode, 1));
981
982    // LoadNode may already exist. This can happen when there is another
983    // load from the same location and producing the same type of value
984    // but it has different alignment or volatileness.
985    bool isNewLoad = true;
986    SUnit *LoadSU;
987    if (LoadNode->getNodeId() != -1) {
988      LoadSU = &SUnits[LoadNode->getNodeId()];
989      isNewLoad = false;
990    } else {
991      LoadSU = CreateNewSUnit(LoadNode);
992      LoadNode->setNodeId(LoadSU->NodeNum);
993
994      InitNumRegDefsLeft(LoadSU);
995      computeLatency(LoadSU);
996    }
997
998    SUnit *NewSU = CreateNewSUnit(N);
999    assert(N->getNodeId() == -1 && "Node already inserted!");
1000    N->setNodeId(NewSU->NodeNum);
1001
1002    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1003    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1004      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1005        NewSU->isTwoAddress = true;
1006        break;
1007      }
1008    }
1009    if (MCID.isCommutable())
1010      NewSU->isCommutable = true;
1011
1012    InitNumRegDefsLeft(NewSU);
1013    computeLatency(NewSU);
1014
1015    // Record all the edges to and from the old SU, by category.
1016    SmallVector<SDep, 4> ChainPreds;
1017    SmallVector<SDep, 4> ChainSuccs;
1018    SmallVector<SDep, 4> LoadPreds;
1019    SmallVector<SDep, 4> NodePreds;
1020    SmallVector<SDep, 4> NodeSuccs;
1021    for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1022         I != E; ++I) {
1023      if (I->isCtrl())
1024        ChainPreds.push_back(*I);
1025      else if (isOperandOf(I->getSUnit(), LoadNode))
1026        LoadPreds.push_back(*I);
1027      else
1028        NodePreds.push_back(*I);
1029    }
1030    for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1031         I != E; ++I) {
1032      if (I->isCtrl())
1033        ChainSuccs.push_back(*I);
1034      else
1035        NodeSuccs.push_back(*I);
1036    }
1037
1038    // Now assign edges to the newly-created nodes.
1039    for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1040      const SDep &Pred = ChainPreds[i];
1041      RemovePred(SU, Pred);
1042      if (isNewLoad)
1043        AddPred(LoadSU, Pred);
1044    }
1045    for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1046      const SDep &Pred = LoadPreds[i];
1047      RemovePred(SU, Pred);
1048      if (isNewLoad)
1049        AddPred(LoadSU, Pred);
1050    }
1051    for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1052      const SDep &Pred = NodePreds[i];
1053      RemovePred(SU, Pred);
1054      AddPred(NewSU, Pred);
1055    }
1056    for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1057      SDep D = NodeSuccs[i];
1058      SUnit *SuccDep = D.getSUnit();
1059      D.setSUnit(SU);
1060      RemovePred(SuccDep, D);
1061      D.setSUnit(NewSU);
1062      AddPred(SuccDep, D);
1063      // Balance register pressure.
1064      if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1065          && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1066        --NewSU->NumRegDefsLeft;
1067    }
1068    for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1069      SDep D = ChainSuccs[i];
1070      SUnit *SuccDep = D.getSUnit();
1071      D.setSUnit(SU);
1072      RemovePred(SuccDep, D);
1073      if (isNewLoad) {
1074        D.setSUnit(LoadSU);
1075        AddPred(SuccDep, D);
1076      }
1077    }
1078
1079    // Add a data dependency to reflect that NewSU reads the value defined
1080    // by LoadSU.
1081    SDep D(LoadSU, SDep::Data, 0);
1082    D.setLatency(LoadSU->Latency);
1083    AddPred(NewSU, D);
1084
1085    if (isNewLoad)
1086      AvailableQueue->addNode(LoadSU);
1087    AvailableQueue->addNode(NewSU);
1088
1089    ++NumUnfolds;
1090
1091    if (NewSU->NumSuccsLeft == 0) {
1092      NewSU->isAvailable = true;
1093      return NewSU;
1094    }
1095    SU = NewSU;
1096  }
1097
1098  DEBUG(dbgs() << "    Duplicating SU #" << SU->NodeNum << "\n");
1099  NewSU = CreateClone(SU);
1100
1101  // New SUnit has the exact same predecessors.
1102  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1103       I != E; ++I)
1104    if (!I->isArtificial())
1105      AddPred(NewSU, *I);
1106
1107  // Only copy scheduled successors. Cut them from old node's successor
1108  // list and move them over.
1109  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1110  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1111       I != E; ++I) {
1112    if (I->isArtificial())
1113      continue;
1114    SUnit *SuccSU = I->getSUnit();
1115    if (SuccSU->isScheduled) {
1116      SDep D = *I;
1117      D.setSUnit(NewSU);
1118      AddPred(SuccSU, D);
1119      D.setSUnit(SU);
1120      DelDeps.push_back(std::make_pair(SuccSU, D));
1121    }
1122  }
1123  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1124    RemovePred(DelDeps[i].first, DelDeps[i].second);
1125
1126  AvailableQueue->updateNode(SU);
1127  AvailableQueue->addNode(NewSU);
1128
1129  ++NumDups;
1130  return NewSU;
1131}
1132
1133/// InsertCopiesAndMoveSuccs - Insert register copies and move all
1134/// scheduled successors of the given SUnit to the last copy.
1135void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1136                                              const TargetRegisterClass *DestRC,
1137                                              const TargetRegisterClass *SrcRC,
1138                                              SmallVectorImpl<SUnit*> &Copies) {
1139  SUnit *CopyFromSU = CreateNewSUnit(NULL);
1140  CopyFromSU->CopySrcRC = SrcRC;
1141  CopyFromSU->CopyDstRC = DestRC;
1142
1143  SUnit *CopyToSU = CreateNewSUnit(NULL);
1144  CopyToSU->CopySrcRC = DestRC;
1145  CopyToSU->CopyDstRC = SrcRC;
1146
1147  // Only copy scheduled successors. Cut them from old node's successor
1148  // list and move them over.
1149  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1150  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1151       I != E; ++I) {
1152    if (I->isArtificial())
1153      continue;
1154    SUnit *SuccSU = I->getSUnit();
1155    if (SuccSU->isScheduled) {
1156      SDep D = *I;
1157      D.setSUnit(CopyToSU);
1158      AddPred(SuccSU, D);
1159      DelDeps.push_back(std::make_pair(SuccSU, *I));
1160    }
1161    else {
1162      // Avoid scheduling the def-side copy before other successors. Otherwise
1163      // we could introduce another physreg interference on the copy and
1164      // continue inserting copies indefinitely.
1165      AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1166    }
1167  }
1168  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1169    RemovePred(DelDeps[i].first, DelDeps[i].second);
1170
1171  SDep FromDep(SU, SDep::Data, Reg);
1172  FromDep.setLatency(SU->Latency);
1173  AddPred(CopyFromSU, FromDep);
1174  SDep ToDep(CopyFromSU, SDep::Data, 0);
1175  ToDep.setLatency(CopyFromSU->Latency);
1176  AddPred(CopyToSU, ToDep);
1177
1178  AvailableQueue->updateNode(SU);
1179  AvailableQueue->addNode(CopyFromSU);
1180  AvailableQueue->addNode(CopyToSU);
1181  Copies.push_back(CopyFromSU);
1182  Copies.push_back(CopyToSU);
1183
1184  ++NumPRCopies;
1185}
1186
1187/// getPhysicalRegisterVT - Returns the ValueType of the physical register
1188/// definition of the specified node.
1189/// FIXME: Move to SelectionDAG?
1190static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1191                                 const TargetInstrInfo *TII) {
1192  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1193  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1194  unsigned NumRes = MCID.getNumDefs();
1195  for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1196    if (Reg == *ImpDef)
1197      break;
1198    ++NumRes;
1199  }
1200  return N->getValueType(NumRes);
1201}
1202
1203/// CheckForLiveRegDef - Return true and update live register vector if the
1204/// specified register def of the specified SUnit clobbers any "live" registers.
1205static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1206                               std::vector<SUnit*> &LiveRegDefs,
1207                               SmallSet<unsigned, 4> &RegAdded,
1208                               SmallVectorImpl<unsigned> &LRegs,
1209                               const TargetRegisterInfo *TRI) {
1210  for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1211
1212    // Check if Ref is live.
1213    if (!LiveRegDefs[*AliasI]) continue;
1214
1215    // Allow multiple uses of the same def.
1216    if (LiveRegDefs[*AliasI] == SU) continue;
1217
1218    // Add Reg to the set of interfering live regs.
1219    if (RegAdded.insert(*AliasI)) {
1220      LRegs.push_back(*AliasI);
1221    }
1222  }
1223}
1224
1225/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1226/// by RegMask, and add them to LRegs.
1227static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1228                                     std::vector<SUnit*> &LiveRegDefs,
1229                                     SmallSet<unsigned, 4> &RegAdded,
1230                                     SmallVectorImpl<unsigned> &LRegs) {
1231  // Look at all live registers. Skip Reg0 and the special CallResource.
1232  for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1233    if (!LiveRegDefs[i]) continue;
1234    if (LiveRegDefs[i] == SU) continue;
1235    if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1236    if (RegAdded.insert(i))
1237      LRegs.push_back(i);
1238  }
1239}
1240
1241/// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
1242static const uint32_t *getNodeRegMask(const SDNode *N) {
1243  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1244    if (const RegisterMaskSDNode *Op =
1245        dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
1246      return Op->getRegMask();
1247  return NULL;
1248}
1249
1250/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1251/// scheduling of the given node to satisfy live physical register dependencies.
1252/// If the specific node is the last one that's available to schedule, do
1253/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1254bool ScheduleDAGRRList::
1255DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1256  if (NumLiveRegs == 0)
1257    return false;
1258
1259  SmallSet<unsigned, 4> RegAdded;
1260  // If this node would clobber any "live" register, then it's not ready.
1261  //
1262  // If SU is the currently live definition of the same register that it uses,
1263  // then we are free to schedule it.
1264  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1265       I != E; ++I) {
1266    if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1267      CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1268                         RegAdded, LRegs, TRI);
1269  }
1270
1271  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1272    if (Node->getOpcode() == ISD::INLINEASM) {
1273      // Inline asm can clobber physical defs.
1274      unsigned NumOps = Node->getNumOperands();
1275      if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1276        --NumOps;  // Ignore the glue operand.
1277
1278      for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1279        unsigned Flags =
1280          cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1281        unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1282
1283        ++i; // Skip the ID value.
1284        if (InlineAsm::isRegDefKind(Flags) ||
1285            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1286            InlineAsm::isClobberKind(Flags)) {
1287          // Check for def of register or earlyclobber register.
1288          for (; NumVals; --NumVals, ++i) {
1289            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1290            if (TargetRegisterInfo::isPhysicalRegister(Reg))
1291              CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1292          }
1293        } else
1294          i += NumVals;
1295      }
1296      continue;
1297    }
1298
1299    if (!Node->isMachineOpcode())
1300      continue;
1301    // If we're in the middle of scheduling a call, don't begin scheduling
1302    // another call. Also, don't allow any physical registers to be live across
1303    // the call.
1304    if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1305      // Check the special calling-sequence resource.
1306      unsigned CallResource = TRI->getNumRegs();
1307      if (LiveRegDefs[CallResource]) {
1308        SDNode *Gen = LiveRegGens[CallResource]->getNode();
1309        while (SDNode *Glued = Gen->getGluedNode())
1310          Gen = Glued;
1311        if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1312          LRegs.push_back(CallResource);
1313      }
1314    }
1315    if (const uint32_t *RegMask = getNodeRegMask(Node))
1316      CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
1317
1318    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1319    if (!MCID.ImplicitDefs)
1320      continue;
1321    for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1322      CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1323  }
1324
1325  return !LRegs.empty();
1326}
1327
1328void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1329  // Add the nodes that aren't ready back onto the available list.
1330  for (unsigned i = Interferences.size(); i > 0; --i) {
1331    SUnit *SU = Interferences[i-1];
1332    LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1333    if (Reg) {
1334      SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1335      if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
1336        continue;
1337    }
1338    SU->isPending = false;
1339    // The interfering node may no longer be available due to backtracking.
1340    // Furthermore, it may have been made available again, in which case it is
1341    // now already in the AvailableQueue.
1342    if (SU->isAvailable && !SU->NodeQueueId) {
1343      DEBUG(dbgs() << "    Repushing SU #" << SU->NodeNum << '\n');
1344      AvailableQueue->push(SU);
1345    }
1346    if (i < Interferences.size())
1347      Interferences[i-1] = Interferences.back();
1348    Interferences.pop_back();
1349    LRegsMap.erase(LRegsPos);
1350  }
1351}
1352
1353/// Return a node that can be scheduled in this cycle. Requirements:
1354/// (1) Ready: latency has been satisfied
1355/// (2) No Hazards: resources are available
1356/// (3) No Interferences: may unschedule to break register interferences.
1357SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1358  SUnit *CurSU = AvailableQueue->empty() ? 0 : AvailableQueue->pop();
1359  while (CurSU) {
1360    SmallVector<unsigned, 4> LRegs;
1361    if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1362      break;
1363    DEBUG(dbgs() << "    Interfering reg " <<
1364          (LRegs[0] == TRI->getNumRegs() ? "CallResource"
1365           : TRI->getName(LRegs[0]))
1366           << " SU #" << CurSU->NodeNum << '\n');
1367    std::pair<LRegsMapT::iterator, bool> LRegsPair =
1368      LRegsMap.insert(std::make_pair(CurSU, LRegs));
1369    if (LRegsPair.second) {
1370      CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
1371      Interferences.push_back(CurSU);
1372    }
1373    else {
1374      assert(CurSU->isPending && "Intereferences are pending");
1375      // Update the interference with current live regs.
1376      LRegsPair.first->second = LRegs;
1377    }
1378    CurSU = AvailableQueue->pop();
1379  }
1380  if (CurSU)
1381    return CurSU;
1382
1383  // All candidates are delayed due to live physical reg dependencies.
1384  // Try backtracking, code duplication, or inserting cross class copies
1385  // to resolve it.
1386  for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1387    SUnit *TrySU = Interferences[i];
1388    SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1389
1390    // Try unscheduling up to the point where it's safe to schedule
1391    // this node.
1392    SUnit *BtSU = NULL;
1393    unsigned LiveCycle = UINT_MAX;
1394    for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1395      unsigned Reg = LRegs[j];
1396      if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1397        BtSU = LiveRegGens[Reg];
1398        LiveCycle = BtSU->getHeight();
1399      }
1400    }
1401    if (!WillCreateCycle(TrySU, BtSU))  {
1402      // BacktrackBottomUp mutates Interferences!
1403      BacktrackBottomUp(TrySU, BtSU);
1404
1405      // Force the current node to be scheduled before the node that
1406      // requires the physical reg dep.
1407      if (BtSU->isAvailable) {
1408        BtSU->isAvailable = false;
1409        if (!BtSU->isPending)
1410          AvailableQueue->remove(BtSU);
1411      }
1412      DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
1413            << TrySU->NodeNum << ")\n");
1414      AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1415
1416      // If one or more successors has been unscheduled, then the current
1417      // node is no longer available.
1418      if (!TrySU->isAvailable)
1419        CurSU = AvailableQueue->pop();
1420      else {
1421        AvailableQueue->remove(TrySU);
1422        CurSU = TrySU;
1423      }
1424      // Interferences has been mutated. We must break.
1425      break;
1426    }
1427  }
1428
1429  if (!CurSU) {
1430    // Can't backtrack. If it's too expensive to copy the value, then try
1431    // duplicate the nodes that produces these "too expensive to copy"
1432    // values to break the dependency. In case even that doesn't work,
1433    // insert cross class copies.
1434    // If it's not too expensive, i.e. cost != -1, issue copies.
1435    SUnit *TrySU = Interferences[0];
1436    SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1437    assert(LRegs.size() == 1 && "Can't handle this yet!");
1438    unsigned Reg = LRegs[0];
1439    SUnit *LRDef = LiveRegDefs[Reg];
1440    EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1441    const TargetRegisterClass *RC =
1442      TRI->getMinimalPhysRegClass(Reg, VT);
1443    const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1444
1445    // If cross copy register class is the same as RC, then it must be possible
1446    // copy the value directly. Do not try duplicate the def.
1447    // If cross copy register class is not the same as RC, then it's possible to
1448    // copy the value but it require cross register class copies and it is
1449    // expensive.
1450    // If cross copy register class is null, then it's not possible to copy
1451    // the value at all.
1452    SUnit *NewDef = 0;
1453    if (DestRC != RC) {
1454      NewDef = CopyAndMoveSuccessors(LRDef);
1455      if (!DestRC && !NewDef)
1456        report_fatal_error("Can't handle live physical register dependency!");
1457    }
1458    if (!NewDef) {
1459      // Issue copies, these can be expensive cross register class copies.
1460      SmallVector<SUnit*, 2> Copies;
1461      InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1462      DEBUG(dbgs() << "    Adding an edge from SU #" << TrySU->NodeNum
1463            << " to SU #" << Copies.front()->NodeNum << "\n");
1464      AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1465      NewDef = Copies.back();
1466    }
1467
1468    DEBUG(dbgs() << "    Adding an edge from SU #" << NewDef->NodeNum
1469          << " to SU #" << TrySU->NodeNum << "\n");
1470    LiveRegDefs[Reg] = NewDef;
1471    AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1472    TrySU->isAvailable = false;
1473    CurSU = NewDef;
1474  }
1475  assert(CurSU && "Unable to resolve live physical register dependencies!");
1476  return CurSU;
1477}
1478
1479/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1480/// schedulers.
1481void ScheduleDAGRRList::ListScheduleBottomUp() {
1482  // Release any predecessors of the special Exit node.
1483  ReleasePredecessors(&ExitSU);
1484
1485  // Add root to Available queue.
1486  if (!SUnits.empty()) {
1487    SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1488    assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1489    RootSU->isAvailable = true;
1490    AvailableQueue->push(RootSU);
1491  }
1492
1493  // While Available queue is not empty, grab the node with the highest
1494  // priority. If it is not ready put it back.  Schedule the node.
1495  Sequence.reserve(SUnits.size());
1496  while (!AvailableQueue->empty() || !Interferences.empty()) {
1497    DEBUG(dbgs() << "\nExamining Available:\n";
1498          AvailableQueue->dump(this));
1499
1500    // Pick the best node to schedule taking all constraints into
1501    // consideration.
1502    SUnit *SU = PickNodeToScheduleBottomUp();
1503
1504    AdvancePastStalls(SU);
1505
1506    ScheduleNodeBottomUp(SU);
1507
1508    while (AvailableQueue->empty() && !PendingQueue.empty()) {
1509      // Advance the cycle to free resources. Skip ahead to the next ready SU.
1510      assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1511      AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1512    }
1513  }
1514
1515  // Reverse the order if it is bottom up.
1516  std::reverse(Sequence.begin(), Sequence.end());
1517
1518#ifndef NDEBUG
1519  VerifyScheduledSequence(/*isBottomUp=*/true);
1520#endif
1521}
1522
1523//===----------------------------------------------------------------------===//
1524//                RegReductionPriorityQueue Definition
1525//===----------------------------------------------------------------------===//
1526//
1527// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1528// to reduce register pressure.
1529//
1530namespace {
1531class RegReductionPQBase;
1532
1533struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1534  bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1535};
1536
1537#ifndef NDEBUG
1538template<class SF>
1539struct reverse_sort : public queue_sort {
1540  SF &SortFunc;
1541  reverse_sort(SF &sf) : SortFunc(sf) {}
1542
1543  bool operator()(SUnit* left, SUnit* right) const {
1544    // reverse left/right rather than simply !SortFunc(left, right)
1545    // to expose different paths in the comparison logic.
1546    return SortFunc(right, left);
1547  }
1548};
1549#endif // NDEBUG
1550
1551/// bu_ls_rr_sort - Priority function for bottom up register pressure
1552// reduction scheduler.
1553struct bu_ls_rr_sort : public queue_sort {
1554  enum {
1555    IsBottomUp = true,
1556    HasReadyFilter = false
1557  };
1558
1559  RegReductionPQBase *SPQ;
1560  bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1561
1562  bool operator()(SUnit* left, SUnit* right) const;
1563};
1564
1565// src_ls_rr_sort - Priority function for source order scheduler.
1566struct src_ls_rr_sort : public queue_sort {
1567  enum {
1568    IsBottomUp = true,
1569    HasReadyFilter = false
1570  };
1571
1572  RegReductionPQBase *SPQ;
1573  src_ls_rr_sort(RegReductionPQBase *spq)
1574    : SPQ(spq) {}
1575
1576  bool operator()(SUnit* left, SUnit* right) const;
1577};
1578
1579// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1580struct hybrid_ls_rr_sort : public queue_sort {
1581  enum {
1582    IsBottomUp = true,
1583    HasReadyFilter = false
1584  };
1585
1586  RegReductionPQBase *SPQ;
1587  hybrid_ls_rr_sort(RegReductionPQBase *spq)
1588    : SPQ(spq) {}
1589
1590  bool isReady(SUnit *SU, unsigned CurCycle) const;
1591
1592  bool operator()(SUnit* left, SUnit* right) const;
1593};
1594
1595// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1596// scheduler.
1597struct ilp_ls_rr_sort : public queue_sort {
1598  enum {
1599    IsBottomUp = true,
1600    HasReadyFilter = false
1601  };
1602
1603  RegReductionPQBase *SPQ;
1604  ilp_ls_rr_sort(RegReductionPQBase *spq)
1605    : SPQ(spq) {}
1606
1607  bool isReady(SUnit *SU, unsigned CurCycle) const;
1608
1609  bool operator()(SUnit* left, SUnit* right) const;
1610};
1611
1612class RegReductionPQBase : public SchedulingPriorityQueue {
1613protected:
1614  std::vector<SUnit*> Queue;
1615  unsigned CurQueueId;
1616  bool TracksRegPressure;
1617  bool SrcOrder;
1618
1619  // SUnits - The SUnits for the current graph.
1620  std::vector<SUnit> *SUnits;
1621
1622  MachineFunction &MF;
1623  const TargetInstrInfo *TII;
1624  const TargetRegisterInfo *TRI;
1625  const TargetLowering *TLI;
1626  ScheduleDAGRRList *scheduleDAG;
1627
1628  // SethiUllmanNumbers - The SethiUllman number for each node.
1629  std::vector<unsigned> SethiUllmanNumbers;
1630
1631  /// RegPressure - Tracking current reg pressure per register class.
1632  ///
1633  std::vector<unsigned> RegPressure;
1634
1635  /// RegLimit - Tracking the number of allocatable registers per register
1636  /// class.
1637  std::vector<unsigned> RegLimit;
1638
1639public:
1640  RegReductionPQBase(MachineFunction &mf,
1641                     bool hasReadyFilter,
1642                     bool tracksrp,
1643                     bool srcorder,
1644                     const TargetInstrInfo *tii,
1645                     const TargetRegisterInfo *tri,
1646                     const TargetLowering *tli)
1647    : SchedulingPriorityQueue(hasReadyFilter),
1648      CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1649      MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1650    if (TracksRegPressure) {
1651      unsigned NumRC = TRI->getNumRegClasses();
1652      RegLimit.resize(NumRC);
1653      RegPressure.resize(NumRC);
1654      std::fill(RegLimit.begin(), RegLimit.end(), 0);
1655      std::fill(RegPressure.begin(), RegPressure.end(), 0);
1656      for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1657             E = TRI->regclass_end(); I != E; ++I)
1658        RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1659    }
1660  }
1661
1662  void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1663    scheduleDAG = scheduleDag;
1664  }
1665
1666  ScheduleHazardRecognizer* getHazardRec() {
1667    return scheduleDAG->getHazardRec();
1668  }
1669
1670  void initNodes(std::vector<SUnit> &sunits) override;
1671
1672  void addNode(const SUnit *SU) override;
1673
1674  void updateNode(const SUnit *SU) override;
1675
1676  void releaseState() override {
1677    SUnits = 0;
1678    SethiUllmanNumbers.clear();
1679    std::fill(RegPressure.begin(), RegPressure.end(), 0);
1680  }
1681
1682  unsigned getNodePriority(const SUnit *SU) const;
1683
1684  unsigned getNodeOrdering(const SUnit *SU) const {
1685    if (!SU->getNode()) return 0;
1686
1687    return SU->getNode()->getIROrder();
1688  }
1689
1690  bool empty() const override { return Queue.empty(); }
1691
1692  void push(SUnit *U) override {
1693    assert(!U->NodeQueueId && "Node in the queue already");
1694    U->NodeQueueId = ++CurQueueId;
1695    Queue.push_back(U);
1696  }
1697
1698  void remove(SUnit *SU) override {
1699    assert(!Queue.empty() && "Queue is empty!");
1700    assert(SU->NodeQueueId != 0 && "Not in queue!");
1701    std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1702                                                 SU);
1703    if (I != std::prev(Queue.end()))
1704      std::swap(*I, Queue.back());
1705    Queue.pop_back();
1706    SU->NodeQueueId = 0;
1707  }
1708
1709  bool tracksRegPressure() const override { return TracksRegPressure; }
1710
1711  void dumpRegPressure() const;
1712
1713  bool HighRegPressure(const SUnit *SU) const;
1714
1715  bool MayReduceRegPressure(SUnit *SU) const;
1716
1717  int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1718
1719  void scheduledNode(SUnit *SU) override;
1720
1721  void unscheduledNode(SUnit *SU) override;
1722
1723protected:
1724  bool canClobber(const SUnit *SU, const SUnit *Op);
1725  void AddPseudoTwoAddrDeps();
1726  void PrescheduleNodesWithMultipleUses();
1727  void CalculateSethiUllmanNumbers();
1728};
1729
1730template<class SF>
1731static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1732  std::vector<SUnit *>::iterator Best = Q.begin();
1733  for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1734         E = Q.end(); I != E; ++I)
1735    if (Picker(*Best, *I))
1736      Best = I;
1737  SUnit *V = *Best;
1738  if (Best != std::prev(Q.end()))
1739    std::swap(*Best, Q.back());
1740  Q.pop_back();
1741  return V;
1742}
1743
1744template<class SF>
1745SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1746#ifndef NDEBUG
1747  if (DAG->StressSched) {
1748    reverse_sort<SF> RPicker(Picker);
1749    return popFromQueueImpl(Q, RPicker);
1750  }
1751#endif
1752  (void)DAG;
1753  return popFromQueueImpl(Q, Picker);
1754}
1755
1756template<class SF>
1757class RegReductionPriorityQueue : public RegReductionPQBase {
1758  SF Picker;
1759
1760public:
1761  RegReductionPriorityQueue(MachineFunction &mf,
1762                            bool tracksrp,
1763                            bool srcorder,
1764                            const TargetInstrInfo *tii,
1765                            const TargetRegisterInfo *tri,
1766                            const TargetLowering *tli)
1767    : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1768                         tii, tri, tli),
1769      Picker(this) {}
1770
1771  bool isBottomUp() const override { return SF::IsBottomUp; }
1772
1773  bool isReady(SUnit *U) const override {
1774    return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1775  }
1776
1777  SUnit *pop() override {
1778    if (Queue.empty()) return NULL;
1779
1780    SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1781    V->NodeQueueId = 0;
1782    return V;
1783  }
1784
1785#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1786  void dump(ScheduleDAG *DAG) const {
1787    // Emulate pop() without clobbering NodeQueueIds.
1788    std::vector<SUnit*> DumpQueue = Queue;
1789    SF DumpPicker = Picker;
1790    while (!DumpQueue.empty()) {
1791      SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1792      dbgs() << "Height " << SU->getHeight() << ": ";
1793      SU->dump(DAG);
1794    }
1795  }
1796#endif
1797};
1798
1799typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1800BURegReductionPriorityQueue;
1801
1802typedef RegReductionPriorityQueue<src_ls_rr_sort>
1803SrcRegReductionPriorityQueue;
1804
1805typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1806HybridBURRPriorityQueue;
1807
1808typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1809ILPBURRPriorityQueue;
1810} // end anonymous namespace
1811
1812//===----------------------------------------------------------------------===//
1813//           Static Node Priority for Register Pressure Reduction
1814//===----------------------------------------------------------------------===//
1815
1816// Check for special nodes that bypass scheduling heuristics.
1817// Currently this pushes TokenFactor nodes down, but may be used for other
1818// pseudo-ops as well.
1819//
1820// Return -1 to schedule right above left, 1 for left above right.
1821// Return 0 if no bias exists.
1822static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1823  bool LSchedLow = left->isScheduleLow;
1824  bool RSchedLow = right->isScheduleLow;
1825  if (LSchedLow != RSchedLow)
1826    return LSchedLow < RSchedLow ? 1 : -1;
1827  return 0;
1828}
1829
1830/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1831/// Smaller number is the higher priority.
1832static unsigned
1833CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1834  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1835  if (SethiUllmanNumber != 0)
1836    return SethiUllmanNumber;
1837
1838  unsigned Extra = 0;
1839  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1840       I != E; ++I) {
1841    if (I->isCtrl()) continue;  // ignore chain preds
1842    SUnit *PredSU = I->getSUnit();
1843    unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1844    if (PredSethiUllman > SethiUllmanNumber) {
1845      SethiUllmanNumber = PredSethiUllman;
1846      Extra = 0;
1847    } else if (PredSethiUllman == SethiUllmanNumber)
1848      ++Extra;
1849  }
1850
1851  SethiUllmanNumber += Extra;
1852
1853  if (SethiUllmanNumber == 0)
1854    SethiUllmanNumber = 1;
1855
1856  return SethiUllmanNumber;
1857}
1858
1859/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1860/// scheduling units.
1861void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1862  SethiUllmanNumbers.assign(SUnits->size(), 0);
1863
1864  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1865    CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1866}
1867
1868void RegReductionPQBase::addNode(const SUnit *SU) {
1869  unsigned SUSize = SethiUllmanNumbers.size();
1870  if (SUnits->size() > SUSize)
1871    SethiUllmanNumbers.resize(SUSize*2, 0);
1872  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1873}
1874
1875void RegReductionPQBase::updateNode(const SUnit *SU) {
1876  SethiUllmanNumbers[SU->NodeNum] = 0;
1877  CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1878}
1879
1880// Lower priority means schedule further down. For bottom-up scheduling, lower
1881// priority SUs are scheduled before higher priority SUs.
1882unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1883  assert(SU->NodeNum < SethiUllmanNumbers.size());
1884  unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1885  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1886    // CopyToReg should be close to its uses to facilitate coalescing and
1887    // avoid spilling.
1888    return 0;
1889  if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1890      Opc == TargetOpcode::SUBREG_TO_REG ||
1891      Opc == TargetOpcode::INSERT_SUBREG)
1892    // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1893    // close to their uses to facilitate coalescing.
1894    return 0;
1895  if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1896    // If SU does not have a register use, i.e. it doesn't produce a value
1897    // that would be consumed (e.g. store), then it terminates a chain of
1898    // computation.  Give it a large SethiUllman number so it will be
1899    // scheduled right before its predecessors that it doesn't lengthen
1900    // their live ranges.
1901    return 0xffff;
1902  if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1903    // If SU does not have a register def, schedule it close to its uses
1904    // because it does not lengthen any live ranges.
1905    return 0;
1906#if 1
1907  return SethiUllmanNumbers[SU->NodeNum];
1908#else
1909  unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1910  if (SU->isCallOp) {
1911    // FIXME: This assumes all of the defs are used as call operands.
1912    int NP = (int)Priority - SU->getNode()->getNumValues();
1913    return (NP > 0) ? NP : 0;
1914  }
1915  return Priority;
1916#endif
1917}
1918
1919//===----------------------------------------------------------------------===//
1920//                     Register Pressure Tracking
1921//===----------------------------------------------------------------------===//
1922
1923void RegReductionPQBase::dumpRegPressure() const {
1924#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1925  for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1926         E = TRI->regclass_end(); I != E; ++I) {
1927    const TargetRegisterClass *RC = *I;
1928    unsigned Id = RC->getID();
1929    unsigned RP = RegPressure[Id];
1930    if (!RP) continue;
1931    DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1932          << '\n');
1933  }
1934#endif
1935}
1936
1937bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1938  if (!TLI)
1939    return false;
1940
1941  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1942       I != E; ++I) {
1943    if (I->isCtrl())
1944      continue;
1945    SUnit *PredSU = I->getSUnit();
1946    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1947    // to cover the number of registers defined (they are all live).
1948    if (PredSU->NumRegDefsLeft == 0) {
1949      continue;
1950    }
1951    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1952         RegDefPos.IsValid(); RegDefPos.Advance()) {
1953      unsigned RCId, Cost;
1954      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1955
1956      if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1957        return true;
1958    }
1959  }
1960  return false;
1961}
1962
1963bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1964  const SDNode *N = SU->getNode();
1965
1966  if (!N->isMachineOpcode() || !SU->NumSuccs)
1967    return false;
1968
1969  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1970  for (unsigned i = 0; i != NumDefs; ++i) {
1971    MVT VT = N->getSimpleValueType(i);
1972    if (!N->hasAnyUseOfValue(i))
1973      continue;
1974    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1975    if (RegPressure[RCId] >= RegLimit[RCId])
1976      return true;
1977  }
1978  return false;
1979}
1980
1981// Compute the register pressure contribution by this instruction by count up
1982// for uses that are not live and down for defs. Only count register classes
1983// that are already under high pressure. As a side effect, compute the number of
1984// uses of registers that are already live.
1985//
1986// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1987// so could probably be factored.
1988int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1989  LiveUses = 0;
1990  int PDiff = 0;
1991  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1992       I != E; ++I) {
1993    if (I->isCtrl())
1994      continue;
1995    SUnit *PredSU = I->getSUnit();
1996    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1997    // to cover the number of registers defined (they are all live).
1998    if (PredSU->NumRegDefsLeft == 0) {
1999      if (PredSU->getNode()->isMachineOpcode())
2000        ++LiveUses;
2001      continue;
2002    }
2003    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2004         RegDefPos.IsValid(); RegDefPos.Advance()) {
2005      MVT VT = RegDefPos.GetValue();
2006      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2007      if (RegPressure[RCId] >= RegLimit[RCId])
2008        ++PDiff;
2009    }
2010  }
2011  const SDNode *N = SU->getNode();
2012
2013  if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2014    return PDiff;
2015
2016  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2017  for (unsigned i = 0; i != NumDefs; ++i) {
2018    MVT VT = N->getSimpleValueType(i);
2019    if (!N->hasAnyUseOfValue(i))
2020      continue;
2021    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2022    if (RegPressure[RCId] >= RegLimit[RCId])
2023      --PDiff;
2024  }
2025  return PDiff;
2026}
2027
2028void RegReductionPQBase::scheduledNode(SUnit *SU) {
2029  if (!TracksRegPressure)
2030    return;
2031
2032  if (!SU->getNode())
2033    return;
2034
2035  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2036       I != E; ++I) {
2037    if (I->isCtrl())
2038      continue;
2039    SUnit *PredSU = I->getSUnit();
2040    // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2041    // to cover the number of registers defined (they are all live).
2042    if (PredSU->NumRegDefsLeft == 0) {
2043      continue;
2044    }
2045    // FIXME: The ScheduleDAG currently loses information about which of a
2046    // node's values is consumed by each dependence. Consequently, if the node
2047    // defines multiple register classes, we don't know which to pressurize
2048    // here. Instead the following loop consumes the register defs in an
2049    // arbitrary order. At least it handles the common case of clustered loads
2050    // to the same class. For precise liveness, each SDep needs to indicate the
2051    // result number. But that tightly couples the ScheduleDAG with the
2052    // SelectionDAG making updates tricky. A simpler hack would be to attach a
2053    // value type or register class to SDep.
2054    //
2055    // The most important aspect of register tracking is balancing the increase
2056    // here with the reduction further below. Note that this SU may use multiple
2057    // defs in PredSU. The can't be determined here, but we've already
2058    // compensated by reducing NumRegDefsLeft in PredSU during
2059    // ScheduleDAGSDNodes::AddSchedEdges.
2060    --PredSU->NumRegDefsLeft;
2061    unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2062    for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2063         RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2064      if (SkipRegDefs)
2065        continue;
2066
2067      unsigned RCId, Cost;
2068      GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2069      RegPressure[RCId] += Cost;
2070      break;
2071    }
2072  }
2073
2074  // We should have this assert, but there may be dead SDNodes that never
2075  // materialize as SUnits, so they don't appear to generate liveness.
2076  //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2077  int SkipRegDefs = (int)SU->NumRegDefsLeft;
2078  for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2079       RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2080    if (SkipRegDefs > 0)
2081      continue;
2082    unsigned RCId, Cost;
2083    GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2084    if (RegPressure[RCId] < Cost) {
2085      // Register pressure tracking is imprecise. This can happen. But we try
2086      // hard not to let it happen because it likely results in poor scheduling.
2087      DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") has too many regdefs\n");
2088      RegPressure[RCId] = 0;
2089    }
2090    else {
2091      RegPressure[RCId] -= Cost;
2092    }
2093  }
2094  dumpRegPressure();
2095}
2096
2097void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2098  if (!TracksRegPressure)
2099    return;
2100
2101  const SDNode *N = SU->getNode();
2102  if (!N) return;
2103
2104  if (!N->isMachineOpcode()) {
2105    if (N->getOpcode() != ISD::CopyToReg)
2106      return;
2107  } else {
2108    unsigned Opc = N->getMachineOpcode();
2109    if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2110        Opc == TargetOpcode::INSERT_SUBREG ||
2111        Opc == TargetOpcode::SUBREG_TO_REG ||
2112        Opc == TargetOpcode::REG_SEQUENCE ||
2113        Opc == TargetOpcode::IMPLICIT_DEF)
2114      return;
2115  }
2116
2117  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2118       I != E; ++I) {
2119    if (I->isCtrl())
2120      continue;
2121    SUnit *PredSU = I->getSUnit();
2122    // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2123    // counts data deps.
2124    if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2125      continue;
2126    const SDNode *PN = PredSU->getNode();
2127    if (!PN->isMachineOpcode()) {
2128      if (PN->getOpcode() == ISD::CopyFromReg) {
2129        MVT VT = PN->getSimpleValueType(0);
2130        unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2131        RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2132      }
2133      continue;
2134    }
2135    unsigned POpc = PN->getMachineOpcode();
2136    if (POpc == TargetOpcode::IMPLICIT_DEF)
2137      continue;
2138    if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2139        POpc == TargetOpcode::INSERT_SUBREG ||
2140        POpc == TargetOpcode::SUBREG_TO_REG) {
2141      MVT VT = PN->getSimpleValueType(0);
2142      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2143      RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2144      continue;
2145    }
2146    unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2147    for (unsigned i = 0; i != NumDefs; ++i) {
2148      MVT VT = PN->getSimpleValueType(i);
2149      if (!PN->hasAnyUseOfValue(i))
2150        continue;
2151      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2152      if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2153        // Register pressure tracking is imprecise. This can happen.
2154        RegPressure[RCId] = 0;
2155      else
2156        RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2157    }
2158  }
2159
2160  // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2161  // may transfer data dependencies to CopyToReg.
2162  if (SU->NumSuccs && N->isMachineOpcode()) {
2163    unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2164    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2165      MVT VT = N->getSimpleValueType(i);
2166      if (VT == MVT::Glue || VT == MVT::Other)
2167        continue;
2168      if (!N->hasAnyUseOfValue(i))
2169        continue;
2170      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2171      RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2172    }
2173  }
2174
2175  dumpRegPressure();
2176}
2177
2178//===----------------------------------------------------------------------===//
2179//           Dynamic Node Priority for Register Pressure Reduction
2180//===----------------------------------------------------------------------===//
2181
2182/// closestSucc - Returns the scheduled cycle of the successor which is
2183/// closest to the current cycle.
2184static unsigned closestSucc(const SUnit *SU) {
2185  unsigned MaxHeight = 0;
2186  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2187       I != E; ++I) {
2188    if (I->isCtrl()) continue;  // ignore chain succs
2189    unsigned Height = I->getSUnit()->getHeight();
2190    // If there are bunch of CopyToRegs stacked up, they should be considered
2191    // to be at the same position.
2192    if (I->getSUnit()->getNode() &&
2193        I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2194      Height = closestSucc(I->getSUnit())+1;
2195    if (Height > MaxHeight)
2196      MaxHeight = Height;
2197  }
2198  return MaxHeight;
2199}
2200
2201/// calcMaxScratches - Returns an cost estimate of the worse case requirement
2202/// for scratch registers, i.e. number of data dependencies.
2203static unsigned calcMaxScratches(const SUnit *SU) {
2204  unsigned Scratches = 0;
2205  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2206       I != E; ++I) {
2207    if (I->isCtrl()) continue;  // ignore chain preds
2208    Scratches++;
2209  }
2210  return Scratches;
2211}
2212
2213/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2214/// CopyFromReg from a virtual register.
2215static bool hasOnlyLiveInOpers(const SUnit *SU) {
2216  bool RetVal = false;
2217  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2218       I != E; ++I) {
2219    if (I->isCtrl()) continue;
2220    const SUnit *PredSU = I->getSUnit();
2221    if (PredSU->getNode() &&
2222        PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2223      unsigned Reg =
2224        cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2225      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2226        RetVal = true;
2227        continue;
2228      }
2229    }
2230    return false;
2231  }
2232  return RetVal;
2233}
2234
2235/// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2236/// CopyToReg to a virtual register. This SU def is probably a liveout and
2237/// it has no other use. It should be scheduled closer to the terminator.
2238static bool hasOnlyLiveOutUses(const SUnit *SU) {
2239  bool RetVal = false;
2240  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2241       I != E; ++I) {
2242    if (I->isCtrl()) continue;
2243    const SUnit *SuccSU = I->getSUnit();
2244    if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2245      unsigned Reg =
2246        cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2247      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2248        RetVal = true;
2249        continue;
2250      }
2251    }
2252    return false;
2253  }
2254  return RetVal;
2255}
2256
2257// Set isVRegCycle for a node with only live in opers and live out uses. Also
2258// set isVRegCycle for its CopyFromReg operands.
2259//
2260// This is only relevant for single-block loops, in which case the VRegCycle
2261// node is likely an induction variable in which the operand and target virtual
2262// registers should be coalesced (e.g. pre/post increment values). Setting the
2263// isVRegCycle flag helps the scheduler prioritize other uses of the same
2264// CopyFromReg so that this node becomes the virtual register "kill". This
2265// avoids interference between the values live in and out of the block and
2266// eliminates a copy inside the loop.
2267static void initVRegCycle(SUnit *SU) {
2268  if (DisableSchedVRegCycle)
2269    return;
2270
2271  if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2272    return;
2273
2274  DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2275
2276  SU->isVRegCycle = true;
2277
2278  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2279       I != E; ++I) {
2280    if (I->isCtrl()) continue;
2281    I->getSUnit()->isVRegCycle = true;
2282  }
2283}
2284
2285// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2286// CopyFromReg operands. We should no longer penalize other uses of this VReg.
2287static void resetVRegCycle(SUnit *SU) {
2288  if (!SU->isVRegCycle)
2289    return;
2290
2291  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2292       I != E; ++I) {
2293    if (I->isCtrl()) continue;  // ignore chain preds
2294    SUnit *PredSU = I->getSUnit();
2295    if (PredSU->isVRegCycle) {
2296      assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2297             "VRegCycle def must be CopyFromReg");
2298      I->getSUnit()->isVRegCycle = 0;
2299    }
2300  }
2301}
2302
2303// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2304// means a node that defines the VRegCycle has not been scheduled yet.
2305static bool hasVRegCycleUse(const SUnit *SU) {
2306  // If this SU also defines the VReg, don't hoist it as a "use".
2307  if (SU->isVRegCycle)
2308    return false;
2309
2310  for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2311       I != E; ++I) {
2312    if (I->isCtrl()) continue;  // ignore chain preds
2313    if (I->getSUnit()->isVRegCycle &&
2314        I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2315      DEBUG(dbgs() << "  VReg cycle use: SU (" << SU->NodeNum << ")\n");
2316      return true;
2317    }
2318  }
2319  return false;
2320}
2321
2322// Check for either a dependence (latency) or resource (hazard) stall.
2323//
2324// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2325static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2326  if ((int)SPQ->getCurCycle() < Height) return true;
2327  if (SPQ->getHazardRec()->getHazardType(SU, 0)
2328      != ScheduleHazardRecognizer::NoHazard)
2329    return true;
2330  return false;
2331}
2332
2333// Return -1 if left has higher priority, 1 if right has higher priority.
2334// Return 0 if latency-based priority is equivalent.
2335static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2336                            RegReductionPQBase *SPQ) {
2337  // Scheduling an instruction that uses a VReg whose postincrement has not yet
2338  // been scheduled will induce a copy. Model this as an extra cycle of latency.
2339  int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2340  int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2341  int LHeight = (int)left->getHeight() + LPenalty;
2342  int RHeight = (int)right->getHeight() + RPenalty;
2343
2344  bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2345    BUHasStall(left, LHeight, SPQ);
2346  bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2347    BUHasStall(right, RHeight, SPQ);
2348
2349  // If scheduling one of the node will cause a pipeline stall, delay it.
2350  // If scheduling either one of the node will cause a pipeline stall, sort
2351  // them according to their height.
2352  if (LStall) {
2353    if (!RStall)
2354      return 1;
2355    if (LHeight != RHeight)
2356      return LHeight > RHeight ? 1 : -1;
2357  } else if (RStall)
2358    return -1;
2359
2360  // If either node is scheduling for latency, sort them by height/depth
2361  // and latency.
2362  if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2363                     right->SchedulingPref == Sched::ILP)) {
2364    // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2365    // is enabled, grouping instructions by cycle, then its height is already
2366    // covered so only its depth matters. We also reach this point if both stall
2367    // but have the same height.
2368    if (!SPQ->getHazardRec()->isEnabled()) {
2369      if (LHeight != RHeight)
2370        return LHeight > RHeight ? 1 : -1;
2371    }
2372    int LDepth = left->getDepth() - LPenalty;
2373    int RDepth = right->getDepth() - RPenalty;
2374    if (LDepth != RDepth) {
2375      DEBUG(dbgs() << "  Comparing latency of SU (" << left->NodeNum
2376            << ") depth " << LDepth << " vs SU (" << right->NodeNum
2377            << ") depth " << RDepth << "\n");
2378      return LDepth < RDepth ? 1 : -1;
2379    }
2380    if (left->Latency != right->Latency)
2381      return left->Latency > right->Latency ? 1 : -1;
2382  }
2383  return 0;
2384}
2385
2386static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2387  // Schedule physical register definitions close to their use. This is
2388  // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2389  // long as shortening physreg live ranges is generally good, we can defer
2390  // creating a subtarget hook.
2391  if (!DisableSchedPhysRegJoin) {
2392    bool LHasPhysReg = left->hasPhysRegDefs;
2393    bool RHasPhysReg = right->hasPhysRegDefs;
2394    if (LHasPhysReg != RHasPhysReg) {
2395      #ifndef NDEBUG
2396      static const char *const PhysRegMsg[] = { " has no physreg",
2397                                                " defines a physreg" };
2398      #endif
2399      DEBUG(dbgs() << "  SU (" << left->NodeNum << ") "
2400            << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2401            << PhysRegMsg[RHasPhysReg] << "\n");
2402      return LHasPhysReg < RHasPhysReg;
2403    }
2404  }
2405
2406  // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2407  unsigned LPriority = SPQ->getNodePriority(left);
2408  unsigned RPriority = SPQ->getNodePriority(right);
2409
2410  // Be really careful about hoisting call operands above previous calls.
2411  // Only allows it if it would reduce register pressure.
2412  if (left->isCall && right->isCallOp) {
2413    unsigned RNumVals = right->getNode()->getNumValues();
2414    RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2415  }
2416  if (right->isCall && left->isCallOp) {
2417    unsigned LNumVals = left->getNode()->getNumValues();
2418    LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2419  }
2420
2421  if (LPriority != RPriority)
2422    return LPriority > RPriority;
2423
2424  // One or both of the nodes are calls and their sethi-ullman numbers are the
2425  // same, then keep source order.
2426  if (left->isCall || right->isCall) {
2427    unsigned LOrder = SPQ->getNodeOrdering(left);
2428    unsigned ROrder = SPQ->getNodeOrdering(right);
2429
2430    // Prefer an ordering where the lower the non-zero order number, the higher
2431    // the preference.
2432    if ((LOrder || ROrder) && LOrder != ROrder)
2433      return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2434  }
2435
2436  // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2437  // e.g.
2438  // t1 = op t2, c1
2439  // t3 = op t4, c2
2440  //
2441  // and the following instructions are both ready.
2442  // t2 = op c3
2443  // t4 = op c4
2444  //
2445  // Then schedule t2 = op first.
2446  // i.e.
2447  // t4 = op c4
2448  // t2 = op c3
2449  // t1 = op t2, c1
2450  // t3 = op t4, c2
2451  //
2452  // This creates more short live intervals.
2453  unsigned LDist = closestSucc(left);
2454  unsigned RDist = closestSucc(right);
2455  if (LDist != RDist)
2456    return LDist < RDist;
2457
2458  // How many registers becomes live when the node is scheduled.
2459  unsigned LScratch = calcMaxScratches(left);
2460  unsigned RScratch = calcMaxScratches(right);
2461  if (LScratch != RScratch)
2462    return LScratch > RScratch;
2463
2464  // Comparing latency against a call makes little sense unless the node
2465  // is register pressure-neutral.
2466  if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2467    return (left->NodeQueueId > right->NodeQueueId);
2468
2469  // Do not compare latencies when one or both of the nodes are calls.
2470  if (!DisableSchedCycles &&
2471      !(left->isCall || right->isCall)) {
2472    int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2473    if (result != 0)
2474      return result > 0;
2475  }
2476  else {
2477    if (left->getHeight() != right->getHeight())
2478      return left->getHeight() > right->getHeight();
2479
2480    if (left->getDepth() != right->getDepth())
2481      return left->getDepth() < right->getDepth();
2482  }
2483
2484  assert(left->NodeQueueId && right->NodeQueueId &&
2485         "NodeQueueId cannot be zero");
2486  return (left->NodeQueueId > right->NodeQueueId);
2487}
2488
2489// Bottom up
2490bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2491  if (int res = checkSpecialNodes(left, right))
2492    return res > 0;
2493
2494  return BURRSort(left, right, SPQ);
2495}
2496
2497// Source order, otherwise bottom up.
2498bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2499  if (int res = checkSpecialNodes(left, right))
2500    return res > 0;
2501
2502  unsigned LOrder = SPQ->getNodeOrdering(left);
2503  unsigned ROrder = SPQ->getNodeOrdering(right);
2504
2505  // Prefer an ordering where the lower the non-zero order number, the higher
2506  // the preference.
2507  if ((LOrder || ROrder) && LOrder != ROrder)
2508    return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2509
2510  return BURRSort(left, right, SPQ);
2511}
2512
2513// If the time between now and when the instruction will be ready can cover
2514// the spill code, then avoid adding it to the ready queue. This gives long
2515// stalls highest priority and allows hoisting across calls. It should also
2516// speed up processing the available queue.
2517bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2518  static const unsigned ReadyDelay = 3;
2519
2520  if (SPQ->MayReduceRegPressure(SU)) return true;
2521
2522  if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2523
2524  if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2525      != ScheduleHazardRecognizer::NoHazard)
2526    return false;
2527
2528  return true;
2529}
2530
2531// Return true if right should be scheduled with higher priority than left.
2532bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2533  if (int res = checkSpecialNodes(left, right))
2534    return res > 0;
2535
2536  if (left->isCall || right->isCall)
2537    // No way to compute latency of calls.
2538    return BURRSort(left, right, SPQ);
2539
2540  bool LHigh = SPQ->HighRegPressure(left);
2541  bool RHigh = SPQ->HighRegPressure(right);
2542  // Avoid causing spills. If register pressure is high, schedule for
2543  // register pressure reduction.
2544  if (LHigh && !RHigh) {
2545    DEBUG(dbgs() << "  pressure SU(" << left->NodeNum << ") > SU("
2546          << right->NodeNum << ")\n");
2547    return true;
2548  }
2549  else if (!LHigh && RHigh) {
2550    DEBUG(dbgs() << "  pressure SU(" << right->NodeNum << ") > SU("
2551          << left->NodeNum << ")\n");
2552    return false;
2553  }
2554  if (!LHigh && !RHigh) {
2555    int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2556    if (result != 0)
2557      return result > 0;
2558  }
2559  return BURRSort(left, right, SPQ);
2560}
2561
2562// Schedule as many instructions in each cycle as possible. So don't make an
2563// instruction available unless it is ready in the current cycle.
2564bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2565  if (SU->getHeight() > CurCycle) return false;
2566
2567  if (SPQ->getHazardRec()->getHazardType(SU, 0)
2568      != ScheduleHazardRecognizer::NoHazard)
2569    return false;
2570
2571  return true;
2572}
2573
2574static bool canEnableCoalescing(SUnit *SU) {
2575  unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2576  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2577    // CopyToReg should be close to its uses to facilitate coalescing and
2578    // avoid spilling.
2579    return true;
2580
2581  if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2582      Opc == TargetOpcode::SUBREG_TO_REG ||
2583      Opc == TargetOpcode::INSERT_SUBREG)
2584    // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2585    // close to their uses to facilitate coalescing.
2586    return true;
2587
2588  if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2589    // If SU does not have a register def, schedule it close to its uses
2590    // because it does not lengthen any live ranges.
2591    return true;
2592
2593  return false;
2594}
2595
2596// list-ilp is currently an experimental scheduler that allows various
2597// heuristics to be enabled prior to the normal register reduction logic.
2598bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2599  if (int res = checkSpecialNodes(left, right))
2600    return res > 0;
2601
2602  if (left->isCall || right->isCall)
2603    // No way to compute latency of calls.
2604    return BURRSort(left, right, SPQ);
2605
2606  unsigned LLiveUses = 0, RLiveUses = 0;
2607  int LPDiff = 0, RPDiff = 0;
2608  if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2609    LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2610    RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2611  }
2612  if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2613    DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2614          << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2615    return LPDiff > RPDiff;
2616  }
2617
2618  if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2619    bool LReduce = canEnableCoalescing(left);
2620    bool RReduce = canEnableCoalescing(right);
2621    if (LReduce && !RReduce) return false;
2622    if (RReduce && !LReduce) return true;
2623  }
2624
2625  if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2626    DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2627          << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2628    return LLiveUses < RLiveUses;
2629  }
2630
2631  if (!DisableSchedStalls) {
2632    bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2633    bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2634    if (LStall != RStall)
2635      return left->getHeight() > right->getHeight();
2636  }
2637
2638  if (!DisableSchedCriticalPath) {
2639    int spread = (int)left->getDepth() - (int)right->getDepth();
2640    if (std::abs(spread) > MaxReorderWindow) {
2641      DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2642            << left->getDepth() << " != SU(" << right->NodeNum << "): "
2643            << right->getDepth() << "\n");
2644      return left->getDepth() < right->getDepth();
2645    }
2646  }
2647
2648  if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2649    int spread = (int)left->getHeight() - (int)right->getHeight();
2650    if (std::abs(spread) > MaxReorderWindow)
2651      return left->getHeight() > right->getHeight();
2652  }
2653
2654  return BURRSort(left, right, SPQ);
2655}
2656
2657void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2658  SUnits = &sunits;
2659  // Add pseudo dependency edges for two-address nodes.
2660  if (!Disable2AddrHack)
2661    AddPseudoTwoAddrDeps();
2662  // Reroute edges to nodes with multiple uses.
2663  if (!TracksRegPressure && !SrcOrder)
2664    PrescheduleNodesWithMultipleUses();
2665  // Calculate node priorities.
2666  CalculateSethiUllmanNumbers();
2667
2668  // For single block loops, mark nodes that look like canonical IV increments.
2669  if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2670    for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2671      initVRegCycle(&sunits[i]);
2672    }
2673  }
2674}
2675
2676//===----------------------------------------------------------------------===//
2677//                    Preschedule for Register Pressure
2678//===----------------------------------------------------------------------===//
2679
2680bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2681  if (SU->isTwoAddress) {
2682    unsigned Opc = SU->getNode()->getMachineOpcode();
2683    const MCInstrDesc &MCID = TII->get(Opc);
2684    unsigned NumRes = MCID.getNumDefs();
2685    unsigned NumOps = MCID.getNumOperands() - NumRes;
2686    for (unsigned i = 0; i != NumOps; ++i) {
2687      if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2688        SDNode *DU = SU->getNode()->getOperand(i).getNode();
2689        if (DU->getNodeId() != -1 &&
2690            Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2691          return true;
2692      }
2693    }
2694  }
2695  return false;
2696}
2697
2698/// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2699/// successor's explicit physregs whose definition can reach DepSU.
2700/// i.e. DepSU should not be scheduled above SU.
2701static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2702                                         ScheduleDAGRRList *scheduleDAG,
2703                                         const TargetInstrInfo *TII,
2704                                         const TargetRegisterInfo *TRI) {
2705  const uint16_t *ImpDefs
2706    = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2707  const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2708  if(!ImpDefs && !RegMask)
2709    return false;
2710
2711  for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2712       SI != SE; ++SI) {
2713    SUnit *SuccSU = SI->getSUnit();
2714    for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2715           PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2716      if (!PI->isAssignedRegDep())
2717        continue;
2718
2719      if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2720          scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2721        return true;
2722
2723      if (ImpDefs)
2724        for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2725          // Return true if SU clobbers this physical register use and the
2726          // definition of the register reaches from DepSU. IsReachable queries
2727          // a topological forward sort of the DAG (following the successors).
2728          if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2729              scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2730            return true;
2731    }
2732  }
2733  return false;
2734}
2735
2736/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2737/// physical register defs.
2738static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2739                                  const TargetInstrInfo *TII,
2740                                  const TargetRegisterInfo *TRI) {
2741  SDNode *N = SuccSU->getNode();
2742  unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2743  const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2744  assert(ImpDefs && "Caller should check hasPhysRegDefs");
2745  for (const SDNode *SUNode = SU->getNode(); SUNode;
2746       SUNode = SUNode->getGluedNode()) {
2747    if (!SUNode->isMachineOpcode())
2748      continue;
2749    const uint16_t *SUImpDefs =
2750      TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2751    const uint32_t *SURegMask = getNodeRegMask(SUNode);
2752    if (!SUImpDefs && !SURegMask)
2753      continue;
2754    for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2755      EVT VT = N->getValueType(i);
2756      if (VT == MVT::Glue || VT == MVT::Other)
2757        continue;
2758      if (!N->hasAnyUseOfValue(i))
2759        continue;
2760      unsigned Reg = ImpDefs[i - NumDefs];
2761      if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2762        return true;
2763      if (!SUImpDefs)
2764        continue;
2765      for (;*SUImpDefs; ++SUImpDefs) {
2766        unsigned SUReg = *SUImpDefs;
2767        if (TRI->regsOverlap(Reg, SUReg))
2768          return true;
2769      }
2770    }
2771  }
2772  return false;
2773}
2774
2775/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2776/// are not handled well by the general register pressure reduction
2777/// heuristics. When presented with code like this:
2778///
2779///      N
2780///    / |
2781///   /  |
2782///  U  store
2783///  |
2784/// ...
2785///
2786/// the heuristics tend to push the store up, but since the
2787/// operand of the store has another use (U), this would increase
2788/// the length of that other use (the U->N edge).
2789///
2790/// This function transforms code like the above to route U's
2791/// dependence through the store when possible, like this:
2792///
2793///      N
2794///      ||
2795///      ||
2796///     store
2797///       |
2798///       U
2799///       |
2800///      ...
2801///
2802/// This results in the store being scheduled immediately
2803/// after N, which shortens the U->N live range, reducing
2804/// register pressure.
2805///
2806void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2807  // Visit all the nodes in topological order, working top-down.
2808  for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2809    SUnit *SU = &(*SUnits)[i];
2810    // For now, only look at nodes with no data successors, such as stores.
2811    // These are especially important, due to the heuristics in
2812    // getNodePriority for nodes with no data successors.
2813    if (SU->NumSuccs != 0)
2814      continue;
2815    // For now, only look at nodes with exactly one data predecessor.
2816    if (SU->NumPreds != 1)
2817      continue;
2818    // Avoid prescheduling copies to virtual registers, which don't behave
2819    // like other nodes from the perspective of scheduling heuristics.
2820    if (SDNode *N = SU->getNode())
2821      if (N->getOpcode() == ISD::CopyToReg &&
2822          TargetRegisterInfo::isVirtualRegister
2823            (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2824        continue;
2825
2826    // Locate the single data predecessor.
2827    SUnit *PredSU = 0;
2828    for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2829         EE = SU->Preds.end(); II != EE; ++II)
2830      if (!II->isCtrl()) {
2831        PredSU = II->getSUnit();
2832        break;
2833      }
2834    assert(PredSU);
2835
2836    // Don't rewrite edges that carry physregs, because that requires additional
2837    // support infrastructure.
2838    if (PredSU->hasPhysRegDefs)
2839      continue;
2840    // Short-circuit the case where SU is PredSU's only data successor.
2841    if (PredSU->NumSuccs == 1)
2842      continue;
2843    // Avoid prescheduling to copies from virtual registers, which don't behave
2844    // like other nodes from the perspective of scheduling heuristics.
2845    if (SDNode *N = SU->getNode())
2846      if (N->getOpcode() == ISD::CopyFromReg &&
2847          TargetRegisterInfo::isVirtualRegister
2848            (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2849        continue;
2850
2851    // Perform checks on the successors of PredSU.
2852    for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2853         EE = PredSU->Succs.end(); II != EE; ++II) {
2854      SUnit *PredSuccSU = II->getSUnit();
2855      if (PredSuccSU == SU) continue;
2856      // If PredSU has another successor with no data successors, for
2857      // now don't attempt to choose either over the other.
2858      if (PredSuccSU->NumSuccs == 0)
2859        goto outer_loop_continue;
2860      // Don't break physical register dependencies.
2861      if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2862        if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2863          goto outer_loop_continue;
2864      // Don't introduce graph cycles.
2865      if (scheduleDAG->IsReachable(SU, PredSuccSU))
2866        goto outer_loop_continue;
2867    }
2868
2869    // Ok, the transformation is safe and the heuristics suggest it is
2870    // profitable. Update the graph.
2871    DEBUG(dbgs() << "    Prescheduling SU #" << SU->NodeNum
2872                 << " next to PredSU #" << PredSU->NodeNum
2873                 << " to guide scheduling in the presence of multiple uses\n");
2874    for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2875      SDep Edge = PredSU->Succs[i];
2876      assert(!Edge.isAssignedRegDep());
2877      SUnit *SuccSU = Edge.getSUnit();
2878      if (SuccSU != SU) {
2879        Edge.setSUnit(PredSU);
2880        scheduleDAG->RemovePred(SuccSU, Edge);
2881        scheduleDAG->AddPred(SU, Edge);
2882        Edge.setSUnit(SU);
2883        scheduleDAG->AddPred(SuccSU, Edge);
2884        --i;
2885      }
2886    }
2887  outer_loop_continue:;
2888  }
2889}
2890
2891/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2892/// it as a def&use operand. Add a pseudo control edge from it to the other
2893/// node (if it won't create a cycle) so the two-address one will be scheduled
2894/// first (lower in the schedule). If both nodes are two-address, favor the
2895/// one that has a CopyToReg use (more likely to be a loop induction update).
2896/// If both are two-address, but one is commutable while the other is not
2897/// commutable, favor the one that's not commutable.
2898void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2899  for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2900    SUnit *SU = &(*SUnits)[i];
2901    if (!SU->isTwoAddress)
2902      continue;
2903
2904    SDNode *Node = SU->getNode();
2905    if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2906      continue;
2907
2908    bool isLiveOut = hasOnlyLiveOutUses(SU);
2909    unsigned Opc = Node->getMachineOpcode();
2910    const MCInstrDesc &MCID = TII->get(Opc);
2911    unsigned NumRes = MCID.getNumDefs();
2912    unsigned NumOps = MCID.getNumOperands() - NumRes;
2913    for (unsigned j = 0; j != NumOps; ++j) {
2914      if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2915        continue;
2916      SDNode *DU = SU->getNode()->getOperand(j).getNode();
2917      if (DU->getNodeId() == -1)
2918        continue;
2919      const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2920      if (!DUSU) continue;
2921      for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2922           E = DUSU->Succs.end(); I != E; ++I) {
2923        if (I->isCtrl()) continue;
2924        SUnit *SuccSU = I->getSUnit();
2925        if (SuccSU == SU)
2926          continue;
2927        // Be conservative. Ignore if nodes aren't at roughly the same
2928        // depth and height.
2929        if (SuccSU->getHeight() < SU->getHeight() &&
2930            (SU->getHeight() - SuccSU->getHeight()) > 1)
2931          continue;
2932        // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2933        // constrains whatever is using the copy, instead of the copy
2934        // itself. In the case that the copy is coalesced, this
2935        // preserves the intent of the pseudo two-address heurietics.
2936        while (SuccSU->Succs.size() == 1 &&
2937               SuccSU->getNode()->isMachineOpcode() &&
2938               SuccSU->getNode()->getMachineOpcode() ==
2939                 TargetOpcode::COPY_TO_REGCLASS)
2940          SuccSU = SuccSU->Succs.front().getSUnit();
2941        // Don't constrain non-instruction nodes.
2942        if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2943          continue;
2944        // Don't constrain nodes with physical register defs if the
2945        // predecessor can clobber them.
2946        if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2947          if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2948            continue;
2949        }
2950        // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2951        // these may be coalesced away. We want them close to their uses.
2952        unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2953        if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2954            SuccOpc == TargetOpcode::INSERT_SUBREG ||
2955            SuccOpc == TargetOpcode::SUBREG_TO_REG)
2956          continue;
2957        if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2958            (!canClobber(SuccSU, DUSU) ||
2959             (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2960             (!SU->isCommutable && SuccSU->isCommutable)) &&
2961            !scheduleDAG->IsReachable(SuccSU, SU)) {
2962          DEBUG(dbgs() << "    Adding a pseudo-two-addr edge from SU #"
2963                       << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2964          scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2965        }
2966      }
2967    }
2968  }
2969}
2970
2971//===----------------------------------------------------------------------===//
2972//                         Public Constructor Functions
2973//===----------------------------------------------------------------------===//
2974
2975llvm::ScheduleDAGSDNodes *
2976llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2977                                 CodeGenOpt::Level OptLevel) {
2978  const TargetMachine &TM = IS->TM;
2979  const TargetInstrInfo *TII = TM.getInstrInfo();
2980  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2981
2982  BURegReductionPriorityQueue *PQ =
2983    new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0);
2984  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2985  PQ->setScheduleDAG(SD);
2986  return SD;
2987}
2988
2989llvm::ScheduleDAGSDNodes *
2990llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2991                                   CodeGenOpt::Level OptLevel) {
2992  const TargetMachine &TM = IS->TM;
2993  const TargetInstrInfo *TII = TM.getInstrInfo();
2994  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2995
2996  SrcRegReductionPriorityQueue *PQ =
2997    new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0);
2998  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2999  PQ->setScheduleDAG(SD);
3000  return SD;
3001}
3002
3003llvm::ScheduleDAGSDNodes *
3004llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
3005                                   CodeGenOpt::Level OptLevel) {
3006  const TargetMachine &TM = IS->TM;
3007  const TargetInstrInfo *TII = TM.getInstrInfo();
3008  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3009  const TargetLowering *TLI = IS->getTargetLowering();
3010
3011  HybridBURRPriorityQueue *PQ =
3012    new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3013
3014  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3015  PQ->setScheduleDAG(SD);
3016  return SD;
3017}
3018
3019llvm::ScheduleDAGSDNodes *
3020llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3021                                CodeGenOpt::Level OptLevel) {
3022  const TargetMachine &TM = IS->TM;
3023  const TargetInstrInfo *TII = TM.getInstrInfo();
3024  const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3025  const TargetLowering *TLI = IS->getTargetLowering();
3026
3027  ILPBURRPriorityQueue *PQ =
3028    new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3029  ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3030  PQ->setScheduleDAG(SD);
3031  return SD;
3032}
3033