MachineScheduler.cpp revision ffd2526fa4e2d78564694b4797b96236c9ba9d85
1//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "llvm/CodeGen/LiveIntervalAnalysis.h"
18#include "llvm/CodeGen/MachineScheduler.h"
19#include "llvm/CodeGen/Passes.h"
20#include "llvm/CodeGen/RegisterClassInfo.h"
21#include "llvm/CodeGen/RegisterPressure.h"
22#include "llvm/CodeGen/ScheduleDAGInstrs.h"
23#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24#include "llvm/Target/TargetInstrInfo.h"
25#include "llvm/MC/MCInstrItineraries.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/ErrorHandling.h"
30#include "llvm/Support/raw_ostream.h"
31#include "llvm/ADT/OwningPtr.h"
32#include "llvm/ADT/PriorityQueue.h"
33
34#include <queue>
35
36using namespace llvm;
37
38static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
39                                  cl::desc("Force top-down list scheduling"));
40static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
41                                  cl::desc("Force bottom-up list scheduling"));
42
43#ifndef NDEBUG
44static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
45  cl::desc("Pop up a window to show MISched dags after they are processed"));
46
47static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
48  cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
49#else
50static bool ViewMISchedDAGs = false;
51#endif // NDEBUG
52
53//===----------------------------------------------------------------------===//
54// Machine Instruction Scheduling Pass and Registry
55//===----------------------------------------------------------------------===//
56
57MachineSchedContext::MachineSchedContext():
58    MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
59  RegClassInfo = new RegisterClassInfo();
60}
61
62MachineSchedContext::~MachineSchedContext() {
63  delete RegClassInfo;
64}
65
66namespace {
67/// MachineScheduler runs after coalescing and before register allocation.
68class MachineScheduler : public MachineSchedContext,
69                         public MachineFunctionPass {
70public:
71  MachineScheduler();
72
73  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
74
75  virtual void releaseMemory() {}
76
77  virtual bool runOnMachineFunction(MachineFunction&);
78
79  virtual void print(raw_ostream &O, const Module* = 0) const;
80
81  static char ID; // Class identification, replacement for typeinfo
82};
83} // namespace
84
85char MachineScheduler::ID = 0;
86
87char &llvm::MachineSchedulerID = MachineScheduler::ID;
88
89INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
90                      "Machine Instruction Scheduler", false, false)
91INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
92INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
93INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
94INITIALIZE_PASS_END(MachineScheduler, "misched",
95                    "Machine Instruction Scheduler", false, false)
96
97MachineScheduler::MachineScheduler()
98: MachineFunctionPass(ID) {
99  initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
100}
101
102void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
103  AU.setPreservesCFG();
104  AU.addRequiredID(MachineDominatorsID);
105  AU.addRequired<MachineLoopInfo>();
106  AU.addRequired<AliasAnalysis>();
107  AU.addRequired<TargetPassConfig>();
108  AU.addRequired<SlotIndexes>();
109  AU.addPreserved<SlotIndexes>();
110  AU.addRequired<LiveIntervals>();
111  AU.addPreserved<LiveIntervals>();
112  MachineFunctionPass::getAnalysisUsage(AU);
113}
114
115MachinePassRegistry MachineSchedRegistry::Registry;
116
117/// A dummy default scheduler factory indicates whether the scheduler
118/// is overridden on the command line.
119static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
120  return 0;
121}
122
123/// MachineSchedOpt allows command line selection of the scheduler.
124static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
125               RegisterPassParser<MachineSchedRegistry> >
126MachineSchedOpt("misched",
127                cl::init(&useDefaultMachineSched), cl::Hidden,
128                cl::desc("Machine instruction scheduler to use"));
129
130static MachineSchedRegistry
131DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
132                     useDefaultMachineSched);
133
134/// Forward declare the standard machine scheduler. This will be used as the
135/// default scheduler if the target does not set a default.
136static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
137
138
139/// Decrement this iterator until reaching the top or a non-debug instr.
140static MachineBasicBlock::iterator
141priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
142  assert(I != Beg && "reached the top of the region, cannot decrement");
143  while (--I != Beg) {
144    if (!I->isDebugValue())
145      break;
146  }
147  return I;
148}
149
150/// If this iterator is a debug value, increment until reaching the End or a
151/// non-debug instruction.
152static MachineBasicBlock::iterator
153nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
154  for(; I != End; ++I) {
155    if (!I->isDebugValue())
156      break;
157  }
158  return I;
159}
160
161/// Top-level MachineScheduler pass driver.
162///
163/// Visit blocks in function order. Divide each block into scheduling regions
164/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
165/// consistent with the DAG builder, which traverses the interior of the
166/// scheduling regions bottom-up.
167///
168/// This design avoids exposing scheduling boundaries to the DAG builder,
169/// simplifying the DAG builder's support for "special" target instructions.
170/// At the same time the design allows target schedulers to operate across
171/// scheduling boundaries, for example to bundle the boudary instructions
172/// without reordering them. This creates complexity, because the target
173/// scheduler must update the RegionBegin and RegionEnd positions cached by
174/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
175/// design would be to split blocks at scheduling boundaries, but LLVM has a
176/// general bias against block splitting purely for implementation simplicity.
177bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
178  DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
179
180  // Initialize the context of the pass.
181  MF = &mf;
182  MLI = &getAnalysis<MachineLoopInfo>();
183  MDT = &getAnalysis<MachineDominatorTree>();
184  PassConfig = &getAnalysis<TargetPassConfig>();
185  AA = &getAnalysis<AliasAnalysis>();
186
187  LIS = &getAnalysis<LiveIntervals>();
188  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
189
190  RegClassInfo->runOnMachineFunction(*MF);
191
192  // Select the scheduler, or set the default.
193  MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
194  if (Ctor == useDefaultMachineSched) {
195    // Get the default scheduler set by the target.
196    Ctor = MachineSchedRegistry::getDefault();
197    if (!Ctor) {
198      Ctor = createConvergingSched;
199      MachineSchedRegistry::setDefault(Ctor);
200    }
201  }
202  // Instantiate the selected scheduler.
203  OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
204
205  // Visit all machine basic blocks.
206  //
207  // TODO: Visit blocks in global postorder or postorder within the bottom-up
208  // loop tree. Then we can optionally compute global RegPressure.
209  for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
210       MBB != MBBEnd; ++MBB) {
211
212    Scheduler->startBlock(MBB);
213
214    // Break the block into scheduling regions [I, RegionEnd), and schedule each
215    // region as soon as it is discovered. RegionEnd points the scheduling
216    // boundary at the bottom of the region. The DAG does not include RegionEnd,
217    // but the region does (i.e. the next RegionEnd is above the previous
218    // RegionBegin). If the current block has no terminator then RegionEnd ==
219    // MBB->end() for the bottom region.
220    //
221    // The Scheduler may insert instructions during either schedule() or
222    // exitRegion(), even for empty regions. So the local iterators 'I' and
223    // 'RegionEnd' are invalid across these calls.
224    unsigned RemainingCount = MBB->size();
225    for(MachineBasicBlock::iterator RegionEnd = MBB->end();
226        RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
227
228      // Avoid decrementing RegionEnd for blocks with no terminator.
229      if (RegionEnd != MBB->end()
230          || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
231        --RegionEnd;
232        // Count the boundary instruction.
233        --RemainingCount;
234      }
235
236      // The next region starts above the previous region. Look backward in the
237      // instruction stream until we find the nearest boundary.
238      MachineBasicBlock::iterator I = RegionEnd;
239      for(;I != MBB->begin(); --I, --RemainingCount) {
240        if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
241          break;
242      }
243      // Notify the scheduler of the region, even if we may skip scheduling
244      // it. Perhaps it still needs to be bundled.
245      Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount);
246
247      // Skip empty scheduling regions (0 or 1 schedulable instructions).
248      if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
249        // Close the current region. Bundle the terminator if needed.
250        // This invalidates 'RegionEnd' and 'I'.
251        Scheduler->exitRegion();
252        continue;
253      }
254      DEBUG(dbgs() << "********** MI Scheduling **********\n");
255      DEBUG(dbgs() << MF->getName()
256            << ":BB#" << MBB->getNumber() << "\n  From: " << *I << "    To: ";
257            if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
258            else dbgs() << "End";
259            dbgs() << " Remaining: " << RemainingCount << "\n");
260
261      // Schedule a region: possibly reorder instructions.
262      // This invalidates 'RegionEnd' and 'I'.
263      Scheduler->schedule();
264
265      // Close the current region.
266      Scheduler->exitRegion();
267
268      // Scheduling has invalidated the current iterator 'I'. Ask the
269      // scheduler for the top of it's scheduled region.
270      RegionEnd = Scheduler->begin();
271    }
272    assert(RemainingCount == 0 && "Instruction count mismatch!");
273    Scheduler->finishBlock();
274  }
275  Scheduler->finalizeSchedule();
276  DEBUG(LIS->print(dbgs()));
277  return true;
278}
279
280void MachineScheduler::print(raw_ostream &O, const Module* m) const {
281  // unimplemented
282}
283
284//===----------------------------------------------------------------------===//
285// MachineSchedStrategy - Interface to a machine scheduling algorithm.
286//===----------------------------------------------------------------------===//
287
288namespace {
289class ScheduleDAGMI;
290
291/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected
292/// scheduling algorithm.
293///
294/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it
295/// in ScheduleDAGInstrs.h
296class MachineSchedStrategy {
297public:
298  virtual ~MachineSchedStrategy() {}
299
300  /// Initialize the strategy after building the DAG for a new region.
301  virtual void initialize(ScheduleDAGMI *DAG) = 0;
302
303  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
304  /// schedule the node at the top of the unscheduled region. Otherwise it will
305  /// be scheduled at the bottom.
306  virtual SUnit *pickNode(bool &IsTopNode) = 0;
307
308  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node.
309  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
310
311  /// When all predecessor dependencies have been resolved, free this node for
312  /// top-down scheduling.
313  virtual void releaseTopNode(SUnit *SU) = 0;
314  /// When all successor dependencies have been resolved, free this node for
315  /// bottom-up scheduling.
316  virtual void releaseBottomNode(SUnit *SU) = 0;
317};
318} // namespace
319
320//===----------------------------------------------------------------------===//
321// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
322// preservation.
323//===----------------------------------------------------------------------===//
324
325namespace {
326/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
327/// machine instructions while updating LiveIntervals.
328class ScheduleDAGMI : public ScheduleDAGInstrs {
329  AliasAnalysis *AA;
330  RegisterClassInfo *RegClassInfo;
331  MachineSchedStrategy *SchedImpl;
332
333  MachineBasicBlock::iterator LiveRegionEnd;
334
335  /// Register pressure in this region computed by buildSchedGraph.
336  IntervalPressure RegPressure;
337  RegPressureTracker RPTracker;
338
339  /// List of pressure sets that exceed the target's pressure limit before
340  /// scheduling, listed in increasing set ID order. Each pressure set is paired
341  /// with its max pressure in the currently scheduled regions.
342  std::vector<PressureElement> RegionCriticalPSets;
343
344  /// The top of the unscheduled zone.
345  MachineBasicBlock::iterator CurrentTop;
346  IntervalPressure TopPressure;
347  RegPressureTracker TopRPTracker;
348
349  /// The bottom of the unscheduled zone.
350  MachineBasicBlock::iterator CurrentBottom;
351  IntervalPressure BotPressure;
352  RegPressureTracker BotRPTracker;
353
354#ifndef NDEBUG
355  /// The number of instructions scheduled so far. Used to cut off the
356  /// scheduler at the point determined by misched-cutoff.
357  unsigned NumInstrsScheduled;
358#endif
359public:
360  ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
361    ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
362    AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
363    RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
364    CurrentBottom(), BotRPTracker(BotPressure) {
365#ifndef NDEBUG
366    NumInstrsScheduled = 0;
367#endif
368  }
369
370  ~ScheduleDAGMI() {
371    delete SchedImpl;
372  }
373
374  MachineBasicBlock::iterator top() const { return CurrentTop; }
375  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
376
377  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
378  /// region. This covers all instructions in a block, while schedule() may only
379  /// cover a subset.
380  void enterRegion(MachineBasicBlock *bb,
381                   MachineBasicBlock::iterator begin,
382                   MachineBasicBlock::iterator end,
383                   unsigned endcount);
384
385  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
386  /// reorderable instructions.
387  void schedule();
388
389  /// Get current register pressure for the top scheduled instructions.
390  const IntervalPressure &getTopPressure() const { return TopPressure; }
391  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
392
393  /// Get current register pressure for the bottom scheduled instructions.
394  const IntervalPressure &getBotPressure() const { return BotPressure; }
395  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
396
397  /// Get register pressure for the entire scheduling region before scheduling.
398  const IntervalPressure &getRegPressure() const { return RegPressure; }
399
400  const std::vector<PressureElement> &getRegionCriticalPSets() const {
401    return RegionCriticalPSets;
402  }
403
404  /// getIssueWidth - Return the max instructions per scheduling group.
405  unsigned getIssueWidth() const {
406    return (InstrItins && InstrItins->SchedModel)
407      ? InstrItins->SchedModel->IssueWidth : 1;
408  }
409
410  /// getNumMicroOps - Return the number of issue slots required for this MI.
411  unsigned getNumMicroOps(MachineInstr *MI) const {
412    if (!InstrItins) return 1;
413    int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass());
414    return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI);
415  }
416
417protected:
418  void initRegPressure();
419  void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
420
421  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
422  bool checkSchedLimit();
423
424  void releaseRoots();
425
426  void releaseSucc(SUnit *SU, SDep *SuccEdge);
427  void releaseSuccessors(SUnit *SU);
428  void releasePred(SUnit *SU, SDep *PredEdge);
429  void releasePredecessors(SUnit *SU);
430
431  void placeDebugValues();
432};
433} // namespace
434
435/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
436/// NumPredsLeft reaches zero, release the successor node.
437///
438/// FIXME: Adjust SuccSU height based on MinLatency.
439void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
440  SUnit *SuccSU = SuccEdge->getSUnit();
441
442#ifndef NDEBUG
443  if (SuccSU->NumPredsLeft == 0) {
444    dbgs() << "*** Scheduling failed! ***\n";
445    SuccSU->dump(this);
446    dbgs() << " has been released too many times!\n";
447    llvm_unreachable(0);
448  }
449#endif
450  --SuccSU->NumPredsLeft;
451  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
452    SchedImpl->releaseTopNode(SuccSU);
453}
454
455/// releaseSuccessors - Call releaseSucc on each of SU's successors.
456void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
457  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
458       I != E; ++I) {
459    releaseSucc(SU, &*I);
460  }
461}
462
463/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
464/// NumSuccsLeft reaches zero, release the predecessor node.
465///
466/// FIXME: Adjust PredSU height based on MinLatency.
467void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
468  SUnit *PredSU = PredEdge->getSUnit();
469
470#ifndef NDEBUG
471  if (PredSU->NumSuccsLeft == 0) {
472    dbgs() << "*** Scheduling failed! ***\n";
473    PredSU->dump(this);
474    dbgs() << " has been released too many times!\n";
475    llvm_unreachable(0);
476  }
477#endif
478  --PredSU->NumSuccsLeft;
479  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
480    SchedImpl->releaseBottomNode(PredSU);
481}
482
483/// releasePredecessors - Call releasePred on each of SU's predecessors.
484void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
485  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
486       I != E; ++I) {
487    releasePred(SU, &*I);
488  }
489}
490
491void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
492                                    MachineBasicBlock::iterator InsertPos) {
493  // Advance RegionBegin if the first instruction moves down.
494  if (&*RegionBegin == MI)
495    ++RegionBegin;
496
497  // Update the instruction stream.
498  BB->splice(InsertPos, BB, MI);
499
500  // Update LiveIntervals
501  LIS->handleMove(MI);
502
503  // Recede RegionBegin if an instruction moves above the first.
504  if (RegionBegin == InsertPos)
505    RegionBegin = MI;
506}
507
508bool ScheduleDAGMI::checkSchedLimit() {
509#ifndef NDEBUG
510  if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
511    CurrentTop = CurrentBottom;
512    return false;
513  }
514  ++NumInstrsScheduled;
515#endif
516  return true;
517}
518
519/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
520/// crossing a scheduling boundary. [begin, end) includes all instructions in
521/// the region, including the boundary itself and single-instruction regions
522/// that don't get scheduled.
523void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
524                                MachineBasicBlock::iterator begin,
525                                MachineBasicBlock::iterator end,
526                                unsigned endcount)
527{
528  ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
529
530  // For convenience remember the end of the liveness region.
531  LiveRegionEnd =
532    (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
533}
534
535// Setup the register pressure trackers for the top scheduled top and bottom
536// scheduled regions.
537void ScheduleDAGMI::initRegPressure() {
538  TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
539  BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
540
541  // Close the RPTracker to finalize live ins.
542  RPTracker.closeRegion();
543
544  DEBUG(RPTracker.getPressure().dump(TRI));
545
546  // Initialize the live ins and live outs.
547  TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
548  BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
549
550  // Close one end of the tracker so we can call
551  // getMaxUpward/DownwardPressureDelta before advancing across any
552  // instructions. This converts currently live regs into live ins/outs.
553  TopRPTracker.closeTop();
554  BotRPTracker.closeBottom();
555
556  // Account for liveness generated by the region boundary.
557  if (LiveRegionEnd != RegionEnd)
558    BotRPTracker.recede();
559
560  assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
561
562  // Cache the list of excess pressure sets in this region. This will also track
563  // the max pressure in the scheduled code for these sets.
564  RegionCriticalPSets.clear();
565  std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
566  for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
567    unsigned Limit = TRI->getRegPressureSetLimit(i);
568    if (RegionPressure[i] > Limit)
569      RegionCriticalPSets.push_back(PressureElement(i, 0));
570  }
571  DEBUG(dbgs() << "Excess PSets: ";
572        for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
573          dbgs() << TRI->getRegPressureSetName(
574            RegionCriticalPSets[i].PSetID) << " ";
575        dbgs() << "\n");
576}
577
578// FIXME: When the pressure tracker deals in pressure differences then we won't
579// iterate over all RegionCriticalPSets[i].
580void ScheduleDAGMI::
581updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
582  for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
583    unsigned ID = RegionCriticalPSets[i].PSetID;
584    int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
585    if ((int)NewMaxPressure[ID] > MaxUnits)
586      MaxUnits = NewMaxPressure[ID];
587  }
588}
589
590// Release all DAG roots for scheduling.
591void ScheduleDAGMI::releaseRoots() {
592  SmallVector<SUnit*, 16> BotRoots;
593
594  for (std::vector<SUnit>::iterator
595         I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
596    // A SUnit is ready to top schedule if it has no predecessors.
597    if (I->Preds.empty())
598      SchedImpl->releaseTopNode(&(*I));
599    // A SUnit is ready to bottom schedule if it has no successors.
600    if (I->Succs.empty())
601      BotRoots.push_back(&(*I));
602  }
603  // Release bottom roots in reverse order so the higher priority nodes appear
604  // first. This is more natural and slightly more efficient.
605  for (SmallVectorImpl<SUnit*>::const_reverse_iterator
606         I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I)
607    SchedImpl->releaseBottomNode(*I);
608}
609
610/// schedule - Called back from MachineScheduler::runOnMachineFunction
611/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
612/// only includes instructions that have DAG nodes, not scheduling boundaries.
613void ScheduleDAGMI::schedule() {
614  // Initialize the register pressure tracker used by buildSchedGraph.
615  RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
616
617  // Account for liveness generate by the region boundary.
618  if (LiveRegionEnd != RegionEnd)
619    RPTracker.recede();
620
621  // Build the DAG, and compute current register pressure.
622  buildSchedGraph(AA, &RPTracker);
623
624  // Initialize top/bottom trackers after computing region pressure.
625  initRegPressure();
626
627  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
628          SUnits[su].dumpAll(this));
629
630  if (ViewMISchedDAGs) viewGraph();
631
632  SchedImpl->initialize(this);
633
634  // Release edges from the special Entry node or to the special Exit node.
635  releaseSuccessors(&EntrySU);
636  releasePredecessors(&ExitSU);
637
638  // Release all DAG roots for scheduling.
639  releaseRoots();
640
641  CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
642  CurrentBottom = RegionEnd;
643  bool IsTopNode = false;
644  while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
645    if (!checkSchedLimit())
646      break;
647
648    // Move the instruction to its new location in the instruction stream.
649    MachineInstr *MI = SU->getInstr();
650
651    if (IsTopNode) {
652      assert(SU->isTopReady() && "node still has unscheduled dependencies");
653      if (&*CurrentTop == MI)
654        CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
655      else {
656        moveInstruction(MI, CurrentTop);
657        TopRPTracker.setPos(MI);
658      }
659
660      // Update top scheduled pressure.
661      TopRPTracker.advance();
662      assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
663      updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
664
665      // Release dependent instructions for scheduling.
666      releaseSuccessors(SU);
667    }
668    else {
669      assert(SU->isBottomReady() && "node still has unscheduled dependencies");
670      MachineBasicBlock::iterator priorII =
671        priorNonDebug(CurrentBottom, CurrentTop);
672      if (&*priorII == MI)
673        CurrentBottom = priorII;
674      else {
675        if (&*CurrentTop == MI) {
676          CurrentTop = nextIfDebug(++CurrentTop, priorII);
677          TopRPTracker.setPos(CurrentTop);
678        }
679        moveInstruction(MI, CurrentBottom);
680        CurrentBottom = MI;
681      }
682      // Update bottom scheduled pressure.
683      BotRPTracker.recede();
684      assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
685      updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
686
687      // Release dependent instructions for scheduling.
688      releasePredecessors(SU);
689    }
690    SU->isScheduled = true;
691    SchedImpl->schedNode(SU, IsTopNode);
692  }
693  assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
694
695  placeDebugValues();
696}
697
698/// Reinsert any remaining debug_values, just like the PostRA scheduler.
699void ScheduleDAGMI::placeDebugValues() {
700  // If first instruction was a DBG_VALUE then put it back.
701  if (FirstDbgValue) {
702    BB->splice(RegionBegin, BB, FirstDbgValue);
703    RegionBegin = FirstDbgValue;
704  }
705
706  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
707         DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
708    std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
709    MachineInstr *DbgValue = P.first;
710    MachineBasicBlock::iterator OrigPrevMI = P.second;
711    BB->splice(++OrigPrevMI, BB, DbgValue);
712    if (OrigPrevMI == llvm::prior(RegionEnd))
713      RegionEnd = DbgValue;
714  }
715  DbgValues.clear();
716  FirstDbgValue = NULL;
717}
718
719//===----------------------------------------------------------------------===//
720// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
721//===----------------------------------------------------------------------===//
722
723namespace {
724/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
725/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
726/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
727class ReadyQueue {
728  unsigned ID;
729  std::string Name;
730  std::vector<SUnit*> Queue;
731
732public:
733  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
734
735  unsigned getID() const { return ID; }
736
737  StringRef getName() const { return Name; }
738
739  // SU is in this queue if it's NodeQueueID is a superset of this ID.
740  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
741
742  bool empty() const { return Queue.empty(); }
743
744  unsigned size() const { return Queue.size(); }
745
746  typedef std::vector<SUnit*>::iterator iterator;
747
748  iterator begin() { return Queue.begin(); }
749
750  iterator end() { return Queue.end(); }
751
752  iterator find(SUnit *SU) {
753    return std::find(Queue.begin(), Queue.end(), SU);
754  }
755
756  void push(SUnit *SU) {
757    Queue.push_back(SU);
758    SU->NodeQueueId |= ID;
759  }
760
761  void remove(iterator I) {
762    (*I)->NodeQueueId &= ~ID;
763    *I = Queue.back();
764    Queue.pop_back();
765  }
766
767  void dump() {
768    dbgs() << Name << ": ";
769    for (unsigned i = 0, e = Queue.size(); i < e; ++i)
770      dbgs() << Queue[i]->NodeNum << " ";
771    dbgs() << "\n";
772  }
773};
774
775/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
776/// the schedule.
777class ConvergingScheduler : public MachineSchedStrategy {
778
779  /// Store the state used by ConvergingScheduler heuristics, required for the
780  /// lifetime of one invocation of pickNode().
781  struct SchedCandidate {
782    // The best SUnit candidate.
783    SUnit *SU;
784
785    // Register pressure values for the best candidate.
786    RegPressureDelta RPDelta;
787
788    SchedCandidate(): SU(NULL) {}
789  };
790  /// Represent the type of SchedCandidate found within a single queue.
791  enum CandResult {
792    NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure };
793
794  /// Each Scheduling boundary is associated with ready queues. It tracks the
795  /// current cycle in whichever direction at has moved, and maintains the state
796  /// of "hazards" and other interlocks at the current cycle.
797  struct SchedBoundary {
798    ScheduleDAGMI *DAG;
799
800    ReadyQueue Available;
801    ReadyQueue Pending;
802    bool CheckPending;
803
804    ScheduleHazardRecognizer *HazardRec;
805
806    unsigned CurrCycle;
807    unsigned IssueCount;
808
809    /// MinReadyCycle - Cycle of the soonest available instruction.
810    unsigned MinReadyCycle;
811
812    // Remember the greatest min operand latency.
813    unsigned MaxMinLatency;
814
815    /// Pending queues extend the ready queues with the same ID and the
816    /// PendingFlag set.
817    SchedBoundary(unsigned ID, const Twine &Name):
818      DAG(0), Available(ID, Name+".A"),
819      Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
820      CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0),
821      MinReadyCycle(UINT_MAX), MaxMinLatency(0) {}
822
823    ~SchedBoundary() { delete HazardRec; }
824
825    bool isTop() const {
826      return Available.getID() == ConvergingScheduler::TopQID;
827    }
828
829    bool checkHazard(SUnit *SU);
830
831    void releaseNode(SUnit *SU, unsigned ReadyCycle);
832
833    void bumpCycle();
834
835    void bumpNode(SUnit *SU);
836
837    void releasePending();
838
839    void removeReady(SUnit *SU);
840
841    SUnit *pickOnlyChoice();
842  };
843
844  ScheduleDAGMI *DAG;
845  const TargetRegisterInfo *TRI;
846
847  // State of the top and bottom scheduled instruction boundaries.
848  SchedBoundary Top;
849  SchedBoundary Bot;
850
851public:
852  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
853  enum {
854    TopQID = 1,
855    BotQID = 2,
856    LogMaxQID = 2
857  };
858
859  ConvergingScheduler():
860    DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
861
862  virtual void initialize(ScheduleDAGMI *dag);
863
864  virtual SUnit *pickNode(bool &IsTopNode);
865
866  virtual void schedNode(SUnit *SU, bool IsTopNode);
867
868  virtual void releaseTopNode(SUnit *SU);
869
870  virtual void releaseBottomNode(SUnit *SU);
871
872protected:
873  SUnit *pickNodeBidrectional(bool &IsTopNode);
874
875  CandResult pickNodeFromQueue(ReadyQueue &Q,
876                               const RegPressureTracker &RPTracker,
877                               SchedCandidate &Candidate);
878#ifndef NDEBUG
879  void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
880                      PressureElement P = PressureElement());
881#endif
882};
883} // namespace
884
885void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
886  DAG = dag;
887  TRI = DAG->TRI;
888  Top.DAG = dag;
889  Bot.DAG = dag;
890
891  // Initialize the HazardRecognizers.
892  const TargetMachine &TM = DAG->MF.getTarget();
893  const InstrItineraryData *Itin = TM.getInstrItineraryData();
894  Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
895  Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
896
897  assert((!ForceTopDown || !ForceBottomUp) &&
898         "-misched-topdown incompatible with -misched-bottomup");
899}
900
901void ConvergingScheduler::releaseTopNode(SUnit *SU) {
902  if (SU->isScheduled)
903    return;
904
905  for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
906       I != E; ++I) {
907    unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
908    unsigned MinLatency = I->getMinLatency();
909#ifndef NDEBUG
910    Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
911#endif
912    if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
913      SU->TopReadyCycle = PredReadyCycle + MinLatency;
914  }
915  Top.releaseNode(SU, SU->TopReadyCycle);
916}
917
918void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
919  if (SU->isScheduled)
920    return;
921
922  assert(SU->getInstr() && "Scheduled SUnit must have instr");
923
924  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
925       I != E; ++I) {
926    unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
927    unsigned MinLatency = I->getMinLatency();
928#ifndef NDEBUG
929    Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
930#endif
931    if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
932      SU->BotReadyCycle = SuccReadyCycle + MinLatency;
933  }
934  Bot.releaseNode(SU, SU->BotReadyCycle);
935}
936
937/// Does this SU have a hazard within the current instruction group.
938///
939/// The scheduler supports two modes of hazard recognition. The first is the
940/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
941/// supports highly complicated in-order reservation tables
942/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
943///
944/// The second is a streamlined mechanism that checks for hazards based on
945/// simple counters that the scheduler itself maintains. It explicitly checks
946/// for instruction dispatch limitations, including the number of micro-ops that
947/// can dispatch per cycle.
948///
949/// TODO: Also check whether the SU must start a new group.
950bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
951  if (HazardRec->isEnabled())
952    return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
953
954  if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth())
955    return true;
956
957  return false;
958}
959
960void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
961                                                     unsigned ReadyCycle) {
962  if (ReadyCycle < MinReadyCycle)
963    MinReadyCycle = ReadyCycle;
964
965  // Check for interlocks first. For the purpose of other heuristics, an
966  // instruction that cannot issue appears as if it's not in the ReadyQueue.
967  if (ReadyCycle > CurrCycle || checkHazard(SU))
968    Pending.push(SU);
969  else
970    Available.push(SU);
971}
972
973/// Move the boundary of scheduled code by one cycle.
974void ConvergingScheduler::SchedBoundary::bumpCycle() {
975  unsigned Width = DAG->getIssueWidth();
976  IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
977
978  assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
979  unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
980
981  if (!HazardRec->isEnabled()) {
982    // Bypass HazardRec virtual calls.
983    CurrCycle = NextCycle;
984  }
985  else {
986    // Bypass getHazardType calls in case of long latency.
987    for (; CurrCycle != NextCycle; ++CurrCycle) {
988      if (isTop())
989        HazardRec->AdvanceCycle();
990      else
991        HazardRec->RecedeCycle();
992    }
993  }
994  CheckPending = true;
995
996  DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
997        << CurrCycle << '\n');
998}
999
1000/// Move the boundary of scheduled code by one SUnit.
1001void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1002  // Update the reservation table.
1003  if (HazardRec->isEnabled()) {
1004    if (!isTop() && SU->isCall) {
1005      // Calls are scheduled with their preceding instructions. For bottom-up
1006      // scheduling, clear the pipeline state before emitting.
1007      HazardRec->Reset();
1008    }
1009    HazardRec->EmitInstruction(SU);
1010  }
1011  // Check the instruction group dispatch limit.
1012  // TODO: Check if this SU must end a dispatch group.
1013  IssueCount += DAG->getNumMicroOps(SU->getInstr());
1014  if (IssueCount >= DAG->getIssueWidth()) {
1015    DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
1016    bumpCycle();
1017  }
1018}
1019
1020/// Release pending ready nodes in to the available queue. This makes them
1021/// visible to heuristics.
1022void ConvergingScheduler::SchedBoundary::releasePending() {
1023  // If the available queue is empty, it is safe to reset MinReadyCycle.
1024  if (Available.empty())
1025    MinReadyCycle = UINT_MAX;
1026
1027  // Check to see if any of the pending instructions are ready to issue.  If
1028  // so, add them to the available queue.
1029  for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
1030    SUnit *SU = *(Pending.begin()+i);
1031    unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
1032
1033    if (ReadyCycle < MinReadyCycle)
1034      MinReadyCycle = ReadyCycle;
1035
1036    if (ReadyCycle > CurrCycle)
1037      continue;
1038
1039    if (checkHazard(SU))
1040      continue;
1041
1042    Available.push(SU);
1043    Pending.remove(Pending.begin()+i);
1044    --i; --e;
1045  }
1046  CheckPending = false;
1047}
1048
1049/// Remove SU from the ready set for this boundary.
1050void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
1051  if (Available.isInQueue(SU))
1052    Available.remove(Available.find(SU));
1053  else {
1054    assert(Pending.isInQueue(SU) && "bad ready count");
1055    Pending.remove(Pending.find(SU));
1056  }
1057}
1058
1059/// If this queue only has one ready candidate, return it. As a side effect,
1060/// advance the cycle until at least one node is ready. If multiple instructions
1061/// are ready, return NULL.
1062SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
1063  if (CheckPending)
1064    releasePending();
1065
1066  for (unsigned i = 0; Available.empty(); ++i) {
1067    assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
1068           "permanent hazard"); (void)i;
1069    bumpCycle();
1070    releasePending();
1071  }
1072  if (Available.size() == 1)
1073    return *Available.begin();
1074  return NULL;
1075}
1076
1077#ifndef NDEBUG
1078void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q,
1079                                         SUnit *SU, PressureElement P) {
1080  dbgs() << Label << " " << Q.getName() << " ";
1081  if (P.isValid())
1082    dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
1083           << " ";
1084  else
1085    dbgs() << "     ";
1086  SU->dump(DAG);
1087}
1088#endif
1089
1090/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
1091/// more desirable than RHS from scheduling standpoint.
1092static bool compareRPDelta(const RegPressureDelta &LHS,
1093                           const RegPressureDelta &RHS) {
1094  // Compare each component of pressure in decreasing order of importance
1095  // without checking if any are valid. Invalid PressureElements are assumed to
1096  // have UnitIncrease==0, so are neutral.
1097
1098  // Avoid increasing the max critical pressure in the scheduled region.
1099  if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease)
1100    return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
1101
1102  // Avoid increasing the max critical pressure in the scheduled region.
1103  if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease)
1104    return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
1105
1106  // Avoid increasing the max pressure of the entire region.
1107  if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease)
1108    return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
1109
1110  return false;
1111}
1112
1113/// Pick the best candidate from the top queue.
1114///
1115/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
1116/// DAG building. To adjust for the current scheduling location we need to
1117/// maintain the number of vreg uses remaining to be top-scheduled.
1118ConvergingScheduler::CandResult ConvergingScheduler::
1119pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
1120                  SchedCandidate &Candidate) {
1121  DEBUG(Q.dump());
1122
1123  // getMaxPressureDelta temporarily modifies the tracker.
1124  RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
1125
1126  // BestSU remains NULL if no top candidates beat the best existing candidate.
1127  CandResult FoundCandidate = NoCand;
1128  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
1129    RegPressureDelta RPDelta;
1130    TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
1131                                    DAG->getRegionCriticalPSets(),
1132                                    DAG->getRegPressure().MaxSetPressure);
1133
1134    // Initialize the candidate if needed.
1135    if (!Candidate.SU) {
1136      Candidate.SU = *I;
1137      Candidate.RPDelta = RPDelta;
1138      FoundCandidate = NodeOrder;
1139      continue;
1140    }
1141    // Avoid exceeding the target's limit.
1142    if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) {
1143      DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess));
1144      Candidate.SU = *I;
1145      Candidate.RPDelta = RPDelta;
1146      FoundCandidate = SingleExcess;
1147      continue;
1148    }
1149    if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease)
1150      continue;
1151    if (FoundCandidate == SingleExcess)
1152      FoundCandidate = MultiPressure;
1153
1154    // Avoid increasing the max critical pressure in the scheduled region.
1155    if (RPDelta.CriticalMax.UnitIncrease
1156        < Candidate.RPDelta.CriticalMax.UnitIncrease) {
1157      DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax));
1158      Candidate.SU = *I;
1159      Candidate.RPDelta = RPDelta;
1160      FoundCandidate = SingleCritical;
1161      continue;
1162    }
1163    if (RPDelta.CriticalMax.UnitIncrease
1164        > Candidate.RPDelta.CriticalMax.UnitIncrease)
1165      continue;
1166    if (FoundCandidate == SingleCritical)
1167      FoundCandidate = MultiPressure;
1168
1169    // Avoid increasing the max pressure of the entire region.
1170    if (RPDelta.CurrentMax.UnitIncrease
1171        < Candidate.RPDelta.CurrentMax.UnitIncrease) {
1172      DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax));
1173      Candidate.SU = *I;
1174      Candidate.RPDelta = RPDelta;
1175      FoundCandidate = SingleMax;
1176      continue;
1177    }
1178    if (RPDelta.CurrentMax.UnitIncrease
1179        > Candidate.RPDelta.CurrentMax.UnitIncrease)
1180      continue;
1181    if (FoundCandidate == SingleMax)
1182      FoundCandidate = MultiPressure;
1183
1184    // Fall through to original instruction order.
1185    // Only consider node order if Candidate was chosen from this Q.
1186    if (FoundCandidate == NoCand)
1187      continue;
1188
1189    if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
1190        || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
1191      DEBUG(traceCandidate("NCAND", Q, *I));
1192      Candidate.SU = *I;
1193      Candidate.RPDelta = RPDelta;
1194      FoundCandidate = NodeOrder;
1195    }
1196  }
1197  return FoundCandidate;
1198}
1199
1200/// Pick the best candidate node from either the top or bottom queue.
1201SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
1202  // Schedule as far as possible in the direction of no choice. This is most
1203  // efficient, but also provides the best heuristics for CriticalPSets.
1204  if (SUnit *SU = Bot.pickOnlyChoice()) {
1205    IsTopNode = false;
1206    return SU;
1207  }
1208  if (SUnit *SU = Top.pickOnlyChoice()) {
1209    IsTopNode = true;
1210    return SU;
1211  }
1212  SchedCandidate BotCand;
1213  // Prefer bottom scheduling when heuristics are silent.
1214  CandResult BotResult = pickNodeFromQueue(Bot.Available,
1215                                           DAG->getBotRPTracker(), BotCand);
1216  assert(BotResult != NoCand && "failed to find the first candidate");
1217
1218  // If either Q has a single candidate that provides the least increase in
1219  // Excess pressure, we can immediately schedule from that Q.
1220  //
1221  // RegionCriticalPSets summarizes the pressure within the scheduled region and
1222  // affects picking from either Q. If scheduling in one direction must
1223  // increase pressure for one of the excess PSets, then schedule in that
1224  // direction first to provide more freedom in the other direction.
1225  if (BotResult == SingleExcess || BotResult == SingleCritical) {
1226    IsTopNode = false;
1227    return BotCand.SU;
1228  }
1229  // Check if the top Q has a better candidate.
1230  SchedCandidate TopCand;
1231  CandResult TopResult = pickNodeFromQueue(Top.Available,
1232                                           DAG->getTopRPTracker(), TopCand);
1233  assert(TopResult != NoCand && "failed to find the first candidate");
1234
1235  if (TopResult == SingleExcess || TopResult == SingleCritical) {
1236    IsTopNode = true;
1237    return TopCand.SU;
1238  }
1239  // If either Q has a single candidate that minimizes pressure above the
1240  // original region's pressure pick it.
1241  if (BotResult == SingleMax) {
1242    IsTopNode = false;
1243    return BotCand.SU;
1244  }
1245  if (TopResult == SingleMax) {
1246    IsTopNode = true;
1247    return TopCand.SU;
1248  }
1249  // Check for a salient pressure difference and pick the best from either side.
1250  if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
1251    IsTopNode = true;
1252    return TopCand.SU;
1253  }
1254  // Otherwise prefer the bottom candidate in node order.
1255  IsTopNode = false;
1256  return BotCand.SU;
1257}
1258
1259/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
1260SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
1261  if (DAG->top() == DAG->bottom()) {
1262    assert(Top.Available.empty() && Top.Pending.empty() &&
1263           Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
1264    return NULL;
1265  }
1266  SUnit *SU;
1267  if (ForceTopDown) {
1268    SU = Top.pickOnlyChoice();
1269    if (!SU) {
1270      SchedCandidate TopCand;
1271      CandResult TopResult =
1272        pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
1273      assert(TopResult != NoCand && "failed to find the first candidate");
1274      (void)TopResult;
1275      SU = TopCand.SU;
1276    }
1277    IsTopNode = true;
1278  }
1279  else if (ForceBottomUp) {
1280    SU = Bot.pickOnlyChoice();
1281    if (!SU) {
1282      SchedCandidate BotCand;
1283      CandResult BotResult =
1284        pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
1285      assert(BotResult != NoCand && "failed to find the first candidate");
1286      (void)BotResult;
1287      SU = BotCand.SU;
1288    }
1289    IsTopNode = false;
1290  }
1291  else {
1292    SU = pickNodeBidrectional(IsTopNode);
1293  }
1294  if (SU->isTopReady())
1295    Top.removeReady(SU);
1296  if (SU->isBottomReady())
1297    Bot.removeReady(SU);
1298
1299  DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
1300        << " Scheduling Instruction in cycle "
1301        << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
1302        SU->dump(DAG));
1303  return SU;
1304}
1305
1306/// Update the scheduler's state after scheduling a node. This is the same node
1307/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
1308/// it's state based on the current cycle before MachineSchedStrategy does.
1309void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
1310  if (IsTopNode) {
1311    SU->TopReadyCycle = Top.CurrCycle;
1312    Top.bumpNode(SU);
1313  }
1314  else {
1315    SU->BotReadyCycle = Bot.CurrCycle;
1316    Bot.bumpNode(SU);
1317  }
1318}
1319
1320/// Create the standard converging machine scheduler. This will be used as the
1321/// default scheduler if the target does not set a default.
1322static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
1323  assert((!ForceTopDown || !ForceBottomUp) &&
1324         "-misched-topdown incompatible with -misched-bottomup");
1325  return new ScheduleDAGMI(C, new ConvergingScheduler());
1326}
1327static MachineSchedRegistry
1328ConvergingSchedRegistry("converge", "Standard converging scheduler.",
1329                        createConvergingSched);
1330
1331//===----------------------------------------------------------------------===//
1332// Machine Instruction Shuffler for Correctness Testing
1333//===----------------------------------------------------------------------===//
1334
1335#ifndef NDEBUG
1336namespace {
1337/// Apply a less-than relation on the node order, which corresponds to the
1338/// instruction order prior to scheduling. IsReverse implements greater-than.
1339template<bool IsReverse>
1340struct SUnitOrder {
1341  bool operator()(SUnit *A, SUnit *B) const {
1342    if (IsReverse)
1343      return A->NodeNum > B->NodeNum;
1344    else
1345      return A->NodeNum < B->NodeNum;
1346  }
1347};
1348
1349/// Reorder instructions as much as possible.
1350class InstructionShuffler : public MachineSchedStrategy {
1351  bool IsAlternating;
1352  bool IsTopDown;
1353
1354  // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
1355  // gives nodes with a higher number higher priority causing the latest
1356  // instructions to be scheduled first.
1357  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
1358    TopQ;
1359  // When scheduling bottom-up, use greater-than as the queue priority.
1360  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
1361    BottomQ;
1362public:
1363  InstructionShuffler(bool alternate, bool topdown)
1364    : IsAlternating(alternate), IsTopDown(topdown) {}
1365
1366  virtual void initialize(ScheduleDAGMI *) {
1367    TopQ.clear();
1368    BottomQ.clear();
1369  }
1370
1371  /// Implement MachineSchedStrategy interface.
1372  /// -----------------------------------------
1373
1374  virtual SUnit *pickNode(bool &IsTopNode) {
1375    SUnit *SU;
1376    if (IsTopDown) {
1377      do {
1378        if (TopQ.empty()) return NULL;
1379        SU = TopQ.top();
1380        TopQ.pop();
1381      } while (SU->isScheduled);
1382      IsTopNode = true;
1383    }
1384    else {
1385      do {
1386        if (BottomQ.empty()) return NULL;
1387        SU = BottomQ.top();
1388        BottomQ.pop();
1389      } while (SU->isScheduled);
1390      IsTopNode = false;
1391    }
1392    if (IsAlternating)
1393      IsTopDown = !IsTopDown;
1394    return SU;
1395  }
1396
1397  virtual void schedNode(SUnit *SU, bool IsTopNode) {}
1398
1399  virtual void releaseTopNode(SUnit *SU) {
1400    TopQ.push(SU);
1401  }
1402  virtual void releaseBottomNode(SUnit *SU) {
1403    BottomQ.push(SU);
1404  }
1405};
1406} // namespace
1407
1408static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
1409  bool Alternate = !ForceTopDown && !ForceBottomUp;
1410  bool TopDown = !ForceBottomUp;
1411  assert((TopDown || !ForceTopDown) &&
1412         "-misched-topdown incompatible with -misched-bottomup");
1413  return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
1414}
1415static MachineSchedRegistry ShufflerRegistry(
1416  "shuffle", "Shuffle machine instructions alternating directions",
1417  createInstructionShuffler);
1418#endif // !NDEBUG
1419