MachineScheduler.cpp revision f323424d71a8c7c129f4fc7c9c109ca6ce2f4460
1//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "RegisterClassInfo.h"
18#include "RegisterPressure.h"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/CodeGen/MachineScheduler.h"
21#include "llvm/CodeGen/Passes.h"
22#include "llvm/CodeGen/ScheduleDAGInstrs.h"
23#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/CommandLine.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/PriorityQueue.h"
32
33#include <queue>
34
35using namespace llvm;
36
37static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
38                                  cl::desc("Force top-down list scheduling"));
39static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
40                                  cl::desc("Force bottom-up list scheduling"));
41
42#ifndef NDEBUG
43static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
44  cl::desc("Pop up a window to show MISched dags after they are processed"));
45
46static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
47  cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
48#else
49static bool ViewMISchedDAGs = false;
50#endif // NDEBUG
51
52//===----------------------------------------------------------------------===//
53// Machine Instruction Scheduling Pass and Registry
54//===----------------------------------------------------------------------===//
55
56MachineSchedContext::MachineSchedContext():
57    MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
58  RegClassInfo = new RegisterClassInfo();
59}
60
61MachineSchedContext::~MachineSchedContext() {
62  delete RegClassInfo;
63}
64
65namespace {
66/// MachineScheduler runs after coalescing and before register allocation.
67class MachineScheduler : public MachineSchedContext,
68                         public MachineFunctionPass {
69public:
70  MachineScheduler();
71
72  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
73
74  virtual void releaseMemory() {}
75
76  virtual bool runOnMachineFunction(MachineFunction&);
77
78  virtual void print(raw_ostream &O, const Module* = 0) const;
79
80  static char ID; // Class identification, replacement for typeinfo
81};
82} // namespace
83
84char MachineScheduler::ID = 0;
85
86char &llvm::MachineSchedulerID = MachineScheduler::ID;
87
88INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
89                      "Machine Instruction Scheduler", false, false)
90INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
91INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
92INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
93INITIALIZE_PASS_END(MachineScheduler, "misched",
94                    "Machine Instruction Scheduler", false, false)
95
96MachineScheduler::MachineScheduler()
97: MachineFunctionPass(ID) {
98  initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
99}
100
101void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
102  AU.setPreservesCFG();
103  AU.addRequiredID(MachineDominatorsID);
104  AU.addRequired<MachineLoopInfo>();
105  AU.addRequired<AliasAnalysis>();
106  AU.addRequired<TargetPassConfig>();
107  AU.addRequired<SlotIndexes>();
108  AU.addPreserved<SlotIndexes>();
109  AU.addRequired<LiveIntervals>();
110  AU.addPreserved<LiveIntervals>();
111  MachineFunctionPass::getAnalysisUsage(AU);
112}
113
114MachinePassRegistry MachineSchedRegistry::Registry;
115
116/// A dummy default scheduler factory indicates whether the scheduler
117/// is overridden on the command line.
118static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
119  return 0;
120}
121
122/// MachineSchedOpt allows command line selection of the scheduler.
123static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
124               RegisterPassParser<MachineSchedRegistry> >
125MachineSchedOpt("misched",
126                cl::init(&useDefaultMachineSched), cl::Hidden,
127                cl::desc("Machine instruction scheduler to use"));
128
129static MachineSchedRegistry
130DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
131                     useDefaultMachineSched);
132
133/// Forward declare the standard machine scheduler. This will be used as the
134/// default scheduler if the target does not set a default.
135static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
136
137
138/// Decrement this iterator until reaching the top or a non-debug instr.
139static MachineBasicBlock::iterator
140priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
141  assert(I != Beg && "reached the top of the region, cannot decrement");
142  while (--I != Beg) {
143    if (!I->isDebugValue())
144      break;
145  }
146  return I;
147}
148
149/// If this iterator is a debug value, increment until reaching the End or a
150/// non-debug instruction.
151static MachineBasicBlock::iterator
152nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
153  for(; I != End; ++I) {
154    if (!I->isDebugValue())
155      break;
156  }
157  return I;
158}
159
160/// Top-level MachineScheduler pass driver.
161///
162/// Visit blocks in function order. Divide each block into scheduling regions
163/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
164/// consistent with the DAG builder, which traverses the interior of the
165/// scheduling regions bottom-up.
166///
167/// This design avoids exposing scheduling boundaries to the DAG builder,
168/// simplifying the DAG builder's support for "special" target instructions.
169/// At the same time the design allows target schedulers to operate across
170/// scheduling boundaries, for example to bundle the boudary instructions
171/// without reordering them. This creates complexity, because the target
172/// scheduler must update the RegionBegin and RegionEnd positions cached by
173/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
174/// design would be to split blocks at scheduling boundaries, but LLVM has a
175/// general bias against block splitting purely for implementation simplicity.
176bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
177  DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
178
179  // Initialize the context of the pass.
180  MF = &mf;
181  MLI = &getAnalysis<MachineLoopInfo>();
182  MDT = &getAnalysis<MachineDominatorTree>();
183  PassConfig = &getAnalysis<TargetPassConfig>();
184  AA = &getAnalysis<AliasAnalysis>();
185
186  LIS = &getAnalysis<LiveIntervals>();
187  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
188
189  RegClassInfo->runOnMachineFunction(*MF);
190
191  // Select the scheduler, or set the default.
192  MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
193  if (Ctor == useDefaultMachineSched) {
194    // Get the default scheduler set by the target.
195    Ctor = MachineSchedRegistry::getDefault();
196    if (!Ctor) {
197      Ctor = createConvergingSched;
198      MachineSchedRegistry::setDefault(Ctor);
199    }
200  }
201  // Instantiate the selected scheduler.
202  OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
203
204  // Visit all machine basic blocks.
205  //
206  // TODO: Visit blocks in global postorder or postorder within the bottom-up
207  // loop tree. Then we can optionally compute global RegPressure.
208  for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
209       MBB != MBBEnd; ++MBB) {
210
211    Scheduler->startBlock(MBB);
212
213    // Break the block into scheduling regions [I, RegionEnd), and schedule each
214    // region as soon as it is discovered. RegionEnd points the the scheduling
215    // boundary at the bottom of the region. The DAG does not include RegionEnd,
216    // but the region does (i.e. the next RegionEnd is above the previous
217    // RegionBegin). If the current block has no terminator then RegionEnd ==
218    // MBB->end() for the bottom region.
219    //
220    // The Scheduler may insert instructions during either schedule() or
221    // exitRegion(), even for empty regions. So the local iterators 'I' and
222    // 'RegionEnd' are invalid across these calls.
223    unsigned RemainingCount = MBB->size();
224    for(MachineBasicBlock::iterator RegionEnd = MBB->end();
225        RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
226
227      // Avoid decrementing RegionEnd for blocks with no terminator.
228      if (RegionEnd != MBB->end()
229          || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
230        --RegionEnd;
231        // Count the boundary instruction.
232        --RemainingCount;
233      }
234
235      // The next region starts above the previous region. Look backward in the
236      // instruction stream until we find the nearest boundary.
237      MachineBasicBlock::iterator I = RegionEnd;
238      for(;I != MBB->begin(); --I, --RemainingCount) {
239        if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
240          break;
241      }
242      // Notify the scheduler of the region, even if we may skip scheduling
243      // it. Perhaps it still needs to be bundled.
244      Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount);
245
246      // Skip empty scheduling regions (0 or 1 schedulable instructions).
247      if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
248        // Close the current region. Bundle the terminator if needed.
249        // This invalidates 'RegionEnd' and 'I'.
250        Scheduler->exitRegion();
251        continue;
252      }
253      DEBUG(dbgs() << "MachineScheduling " << MF->getFunction()->getName()
254            << ":BB#" << MBB->getNumber() << "\n  From: " << *I << "    To: ";
255            if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
256            else dbgs() << "End";
257            dbgs() << " Remaining: " << RemainingCount << "\n");
258
259      // Schedule a region: possibly reorder instructions.
260      // This invalidates 'RegionEnd' and 'I'.
261      Scheduler->schedule();
262
263      // Close the current region.
264      Scheduler->exitRegion();
265
266      // Scheduling has invalidated the current iterator 'I'. Ask the
267      // scheduler for the top of it's scheduled region.
268      RegionEnd = Scheduler->begin();
269    }
270    assert(RemainingCount == 0 && "Instruction count mismatch!");
271    Scheduler->finishBlock();
272  }
273  Scheduler->finalizeSchedule();
274  DEBUG(LIS->print(dbgs()));
275  return true;
276}
277
278void MachineScheduler::print(raw_ostream &O, const Module* m) const {
279  // unimplemented
280}
281
282//===----------------------------------------------------------------------===//
283// MachineSchedStrategy - Interface to a machine scheduling algorithm.
284//===----------------------------------------------------------------------===//
285
286namespace {
287class ScheduleDAGMI;
288
289/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected
290/// scheduling algorithm.
291///
292/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it
293/// in ScheduleDAGInstrs.h
294class MachineSchedStrategy {
295public:
296  virtual ~MachineSchedStrategy() {}
297
298  /// Initialize the strategy after building the DAG for a new region.
299  virtual void initialize(ScheduleDAGMI *DAG) = 0;
300
301  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
302  /// schedule the node at the top of the unscheduled region. Otherwise it will
303  /// be scheduled at the bottom.
304  virtual SUnit *pickNode(bool &IsTopNode) = 0;
305
306  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node.
307  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
308
309  /// When all predecessor dependencies have been resolved, free this node for
310  /// top-down scheduling.
311  virtual void releaseTopNode(SUnit *SU) = 0;
312  /// When all successor dependencies have been resolved, free this node for
313  /// bottom-up scheduling.
314  virtual void releaseBottomNode(SUnit *SU) = 0;
315};
316} // namespace
317
318//===----------------------------------------------------------------------===//
319// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
320// preservation.
321//===----------------------------------------------------------------------===//
322
323namespace {
324/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
325/// machine instructions while updating LiveIntervals.
326class ScheduleDAGMI : public ScheduleDAGInstrs {
327  AliasAnalysis *AA;
328  RegisterClassInfo *RegClassInfo;
329  MachineSchedStrategy *SchedImpl;
330
331  MachineBasicBlock::iterator LiveRegionEnd;
332
333  /// Register pressure in this region computed by buildSchedGraph.
334  IntervalPressure RegPressure;
335  RegPressureTracker RPTracker;
336
337  /// List of pressure sets that exceed the target's pressure limit before
338  /// scheduling, listed in increasing set ID order. Each pressure set is paired
339  /// with its max pressure in the currently scheduled regions.
340  std::vector<PressureElement> RegionCriticalPSets;
341
342  /// The top of the unscheduled zone.
343  MachineBasicBlock::iterator CurrentTop;
344  IntervalPressure TopPressure;
345  RegPressureTracker TopRPTracker;
346
347  /// The bottom of the unscheduled zone.
348  MachineBasicBlock::iterator CurrentBottom;
349  IntervalPressure BotPressure;
350  RegPressureTracker BotRPTracker;
351
352  /// The number of instructions scheduled so far. Used to cut off the
353  /// scheduler at the point determined by misched-cutoff.
354  unsigned NumInstrsScheduled;
355public:
356  ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
357    ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
358    AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
359    RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
360    CurrentBottom(), BotRPTracker(BotPressure), NumInstrsScheduled(0) {}
361
362  ~ScheduleDAGMI() {
363    delete SchedImpl;
364  }
365
366  MachineBasicBlock::iterator top() const { return CurrentTop; }
367  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
368
369  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
370  /// region. This covers all instructions in a block, while schedule() may only
371  /// cover a subset.
372  void enterRegion(MachineBasicBlock *bb,
373                   MachineBasicBlock::iterator begin,
374                   MachineBasicBlock::iterator end,
375                   unsigned endcount);
376
377  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
378  /// reorderable instructions.
379  void schedule();
380
381  /// Get current register pressure for the top scheduled instructions.
382  const IntervalPressure &getTopPressure() const { return TopPressure; }
383  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
384
385  /// Get current register pressure for the bottom scheduled instructions.
386  const IntervalPressure &getBotPressure() const { return BotPressure; }
387  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
388
389  /// Get register pressure for the entire scheduling region before scheduling.
390  const IntervalPressure &getRegPressure() const { return RegPressure; }
391
392  const std::vector<PressureElement> &getRegionCriticalPSets() const {
393    return RegionCriticalPSets;
394  }
395
396protected:
397  void initRegPressure();
398  void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
399
400  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
401  bool checkSchedLimit();
402
403  void releaseRoots();
404
405  void releaseSucc(SUnit *SU, SDep *SuccEdge);
406  void releaseSuccessors(SUnit *SU);
407  void releasePred(SUnit *SU, SDep *PredEdge);
408  void releasePredecessors(SUnit *SU);
409
410  void placeDebugValues();
411};
412} // namespace
413
414/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
415/// NumPredsLeft reaches zero, release the successor node.
416///
417/// FIXME: Adjust SuccSU height based on MinLatency.
418void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
419  SUnit *SuccSU = SuccEdge->getSUnit();
420
421#ifndef NDEBUG
422  if (SuccSU->NumPredsLeft == 0) {
423    dbgs() << "*** Scheduling failed! ***\n";
424    SuccSU->dump(this);
425    dbgs() << " has been released too many times!\n";
426    llvm_unreachable(0);
427  }
428#endif
429  --SuccSU->NumPredsLeft;
430  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
431    SchedImpl->releaseTopNode(SuccSU);
432}
433
434/// releaseSuccessors - Call releaseSucc on each of SU's successors.
435void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
436  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
437       I != E; ++I) {
438    releaseSucc(SU, &*I);
439  }
440}
441
442/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
443/// NumSuccsLeft reaches zero, release the predecessor node.
444///
445/// FIXME: Adjust PredSU height based on MinLatency.
446void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
447  SUnit *PredSU = PredEdge->getSUnit();
448
449#ifndef NDEBUG
450  if (PredSU->NumSuccsLeft == 0) {
451    dbgs() << "*** Scheduling failed! ***\n";
452    PredSU->dump(this);
453    dbgs() << " has been released too many times!\n";
454    llvm_unreachable(0);
455  }
456#endif
457  --PredSU->NumSuccsLeft;
458  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
459    SchedImpl->releaseBottomNode(PredSU);
460}
461
462/// releasePredecessors - Call releasePred on each of SU's predecessors.
463void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
464  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
465       I != E; ++I) {
466    releasePred(SU, &*I);
467  }
468}
469
470void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
471                                    MachineBasicBlock::iterator InsertPos) {
472  // Advance RegionBegin if the first instruction moves down.
473  if (&*RegionBegin == MI)
474    ++RegionBegin;
475
476  // Update the instruction stream.
477  BB->splice(InsertPos, BB, MI);
478
479  // Update LiveIntervals
480  LIS->handleMove(MI);
481
482  // Recede RegionBegin if an instruction moves above the first.
483  if (RegionBegin == InsertPos)
484    RegionBegin = MI;
485}
486
487bool ScheduleDAGMI::checkSchedLimit() {
488#ifndef NDEBUG
489  if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
490    CurrentTop = CurrentBottom;
491    return false;
492  }
493  ++NumInstrsScheduled;
494#endif
495  return true;
496}
497
498/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
499/// crossing a scheduling boundary. [begin, end) includes all instructions in
500/// the region, including the boundary itself and single-instruction regions
501/// that don't get scheduled.
502void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
503                                MachineBasicBlock::iterator begin,
504                                MachineBasicBlock::iterator end,
505                                unsigned endcount)
506{
507  ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
508
509  // For convenience remember the end of the liveness region.
510  LiveRegionEnd =
511    (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
512}
513
514// Setup the register pressure trackers for the top scheduled top and bottom
515// scheduled regions.
516void ScheduleDAGMI::initRegPressure() {
517  TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
518  BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
519
520  // Close the RPTracker to finalize live ins.
521  RPTracker.closeRegion();
522
523  // Initialize the live ins and live outs.
524  TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
525  BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
526
527  // Close one end of the tracker so we can call
528  // getMaxUpward/DownwardPressureDelta before advancing across any
529  // instructions. This converts currently live regs into live ins/outs.
530  TopRPTracker.closeTop();
531  BotRPTracker.closeBottom();
532
533  // Account for liveness generated by the region boundary.
534  if (LiveRegionEnd != RegionEnd)
535    BotRPTracker.recede();
536
537  assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
538
539  // Cache the list of excess pressure sets in this region. This will also track
540  // the max pressure in the scheduled code for these sets.
541  RegionCriticalPSets.clear();
542  std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
543  for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
544    unsigned Limit = TRI->getRegPressureSetLimit(i);
545    if (RegionPressure[i] > Limit)
546      RegionCriticalPSets.push_back(PressureElement(i, 0));
547  }
548  DEBUG(dbgs() << "Excess PSets: ";
549        for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
550          dbgs() << TRI->getRegPressureSetName(
551            RegionCriticalPSets[i].PSetID) << " ";
552        dbgs() << "\n");
553}
554
555// FIXME: When the pressure tracker deals in pressure differences then we won't
556// iterate over all RegionCriticalPSets[i].
557void ScheduleDAGMI::
558updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
559  for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
560    unsigned ID = RegionCriticalPSets[i].PSetID;
561    int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
562    if ((int)NewMaxPressure[ID] > MaxUnits)
563      MaxUnits = NewMaxPressure[ID];
564  }
565}
566
567// Release all DAG roots for scheduling.
568void ScheduleDAGMI::releaseRoots() {
569  SmallVector<SUnit*, 16> BotRoots;
570
571  for (std::vector<SUnit>::iterator
572         I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
573    // A SUnit is ready to top schedule if it has no predecessors.
574    if (I->Preds.empty())
575      SchedImpl->releaseTopNode(&(*I));
576    // A SUnit is ready to bottom schedule if it has no successors.
577    if (I->Succs.empty())
578      BotRoots.push_back(&(*I));
579  }
580  // Release bottom roots in reverse order so the higher priority nodes appear
581  // first. This is more natural and slightly more efficient.
582  for (SmallVectorImpl<SUnit*>::const_reverse_iterator
583         I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I)
584    SchedImpl->releaseBottomNode(*I);
585}
586
587/// schedule - Called back from MachineScheduler::runOnMachineFunction
588/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
589/// only includes instructions that have DAG nodes, not scheduling boundaries.
590void ScheduleDAGMI::schedule() {
591  // Initialize the register pressure tracker used by buildSchedGraph.
592  RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
593
594  // Account for liveness generate by the region boundary.
595  if (LiveRegionEnd != RegionEnd)
596    RPTracker.recede();
597
598  // Build the DAG, and compute current register pressure.
599  buildSchedGraph(AA, &RPTracker);
600
601  // Initialize top/bottom trackers after computing region pressure.
602  initRegPressure();
603
604  DEBUG(dbgs() << "********** MI Scheduling **********\n");
605  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
606          SUnits[su].dumpAll(this));
607
608  if (ViewMISchedDAGs) viewGraph();
609
610  SchedImpl->initialize(this);
611
612  // Release edges from the special Entry node or to the special Exit node.
613  releaseSuccessors(&EntrySU);
614  releasePredecessors(&ExitSU);
615
616  // Release all DAG roots for scheduling.
617  releaseRoots();
618
619  CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
620  CurrentBottom = RegionEnd;
621  bool IsTopNode = false;
622  while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
623    DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
624          << " Scheduling Instruction");
625    if (!checkSchedLimit())
626      break;
627
628    // Move the instruction to its new location in the instruction stream.
629    MachineInstr *MI = SU->getInstr();
630
631    if (IsTopNode) {
632      assert(SU->isTopReady() && "node still has unscheduled dependencies");
633      if (&*CurrentTop == MI)
634        CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
635      else {
636        moveInstruction(MI, CurrentTop);
637        TopRPTracker.setPos(MI);
638      }
639
640      // Update top scheduled pressure.
641      TopRPTracker.advance();
642      assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
643      updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
644
645      // Release dependent instructions for scheduling.
646      releaseSuccessors(SU);
647    }
648    else {
649      assert(SU->isBottomReady() && "node still has unscheduled dependencies");
650      MachineBasicBlock::iterator priorII =
651        priorNonDebug(CurrentBottom, CurrentTop);
652      if (&*priorII == MI)
653        CurrentBottom = priorII;
654      else {
655        if (&*CurrentTop == MI) {
656          CurrentTop = nextIfDebug(++CurrentTop, priorII);
657          TopRPTracker.setPos(CurrentTop);
658        }
659        moveInstruction(MI, CurrentBottom);
660        CurrentBottom = MI;
661      }
662      // Update bottom scheduled pressure.
663      BotRPTracker.recede();
664      assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
665      updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
666
667      // Release dependent instructions for scheduling.
668      releasePredecessors(SU);
669    }
670    SU->isScheduled = true;
671    SchedImpl->schedNode(SU, IsTopNode);
672    DEBUG(SU->dump(this));
673  }
674  assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
675
676  placeDebugValues();
677}
678
679/// Reinsert any remaining debug_values, just like the PostRA scheduler.
680void ScheduleDAGMI::placeDebugValues() {
681  // If first instruction was a DBG_VALUE then put it back.
682  if (FirstDbgValue) {
683    BB->splice(RegionBegin, BB, FirstDbgValue);
684    RegionBegin = FirstDbgValue;
685  }
686
687  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
688         DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
689    std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
690    MachineInstr *DbgValue = P.first;
691    MachineBasicBlock::iterator OrigPrevMI = P.second;
692    BB->splice(++OrigPrevMI, BB, DbgValue);
693    if (OrigPrevMI == llvm::prior(RegionEnd))
694      RegionEnd = DbgValue;
695  }
696  DbgValues.clear();
697  FirstDbgValue = NULL;
698}
699
700//===----------------------------------------------------------------------===//
701// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
702//===----------------------------------------------------------------------===//
703
704namespace {
705/// ReadyQ encapsulates vector of "ready" SUnits with basic convenience methods
706/// for pushing and removing nodes. ReadyQ's are uniquely identified by an
707/// ID. SUnit::NodeQueueId us a mask of the ReadyQs that the SUnit is in.
708class ReadyQueue {
709  unsigned ID;
710  std::string Name;
711  std::vector<SUnit*> Queue;
712
713public:
714  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
715
716  unsigned getID() const { return ID; }
717
718  StringRef getName() const { return Name; }
719
720  // SU is in this queue if it's NodeQueueID is a superset of this ID.
721  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
722
723  bool empty() const { return Queue.empty(); }
724
725  unsigned size() const { return Queue.size(); }
726
727  typedef std::vector<SUnit*>::iterator iterator;
728
729  iterator begin() { return Queue.begin(); }
730
731  iterator end() { return Queue.end(); }
732
733  iterator find(SUnit *SU) {
734    return std::find(Queue.begin(), Queue.end(), SU);
735  }
736
737  void push(SUnit *SU) {
738    Queue.push_back(SU);
739    SU->NodeQueueId |= ID;
740  }
741
742  void remove(iterator I) {
743    (*I)->NodeQueueId &= ~ID;
744    *I = Queue.back();
745    Queue.pop_back();
746  }
747
748  void dump() {
749    dbgs() << Name << ": ";
750    for (unsigned i = 0, e = Queue.size(); i < e; ++i)
751      dbgs() << Queue[i]->NodeNum << " ";
752    dbgs() << "\n";
753  }
754};
755
756/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
757/// the schedule.
758class ConvergingScheduler : public MachineSchedStrategy {
759
760  /// Store the state used by ConvergingScheduler heuristics, required for the
761  /// lifetime of one invocation of pickNode().
762  struct SchedCandidate {
763    // The best SUnit candidate.
764    SUnit *SU;
765
766    // Register pressure values for the best candidate.
767    RegPressureDelta RPDelta;
768
769    SchedCandidate(): SU(NULL) {}
770  };
771  /// Represent the type of SchedCandidate found within a single queue.
772  enum CandResult {
773    NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure };
774
775  /// Each Scheduling boundary is associated with ready queues. It tracks the
776  /// current cycle in whichever direction at has moved, and maintains the state
777  /// of "hazards" and other interlocks at the current cycle.
778  struct SchedBoundary {
779    ReadyQueue Available;
780    ReadyQueue Pending;
781    bool CheckPending;
782
783    ScheduleHazardRecognizer *HazardRec;
784
785    unsigned CurrCycle;
786    unsigned IssueCount;
787
788    /// MinReadyCycle - Cycle of the soonest available instruction.
789    unsigned MinReadyCycle;
790
791    /// Pending queues extend the ready queues with the same ID and the
792    /// PendingFlag set.
793    SchedBoundary(unsigned ID, const Twine &Name):
794      Available(ID, Name+".A"),
795      Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
796      CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0),
797      MinReadyCycle(UINT_MAX) {}
798
799    ~SchedBoundary() { delete HazardRec; }
800
801    bool isTop() const {
802      return Available.getID() == ConvergingScheduler::TopQID;
803    }
804
805    void releaseNode(SUnit *SU, unsigned ReadyCycle);
806
807    void bumpCycle();
808
809    void releasePending();
810
811    void removeReady(SUnit *SU);
812
813    SUnit *pickOnlyChoice();
814  };
815
816  ScheduleDAGMI *DAG;
817  const TargetRegisterInfo *TRI;
818
819  // State of the top and bottom scheduled instruction boundaries.
820  SchedBoundary Top;
821  SchedBoundary Bot;
822
823public:
824  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
825  enum {
826    TopQID = 1,
827    BotQID = 2,
828    LogMaxQID = 2
829  };
830
831  ConvergingScheduler():
832    DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
833
834  virtual void initialize(ScheduleDAGMI *dag);
835
836  virtual SUnit *pickNode(bool &IsTopNode);
837
838  virtual void schedNode(SUnit *SU, bool IsTopNode);
839
840  virtual void releaseTopNode(SUnit *SU);
841
842  virtual void releaseBottomNode(SUnit *SU);
843
844protected:
845  SUnit *pickNodeBidrectional(bool &IsTopNode);
846
847  CandResult pickNodeFromQueue(ReadyQueue &Q,
848                               const RegPressureTracker &RPTracker,
849                               SchedCandidate &Candidate);
850#ifndef NDEBUG
851  void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
852                      PressureElement P = PressureElement());
853#endif
854};
855} // namespace
856
857void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
858  DAG = dag;
859  TRI = DAG->TRI;
860
861  // Initialize the HazardRecognizers.
862  const TargetMachine &TM = DAG->MF.getTarget();
863  const InstrItineraryData *Itin = TM.getInstrItineraryData();
864  Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
865  Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
866
867  assert((!ForceTopDown || !ForceBottomUp) &&
868         "-misched-topdown incompatible with -misched-bottomup");
869}
870
871void ConvergingScheduler::releaseTopNode(SUnit *SU) {
872  Top.releaseNode(SU, SU->getDepth());
873}
874
875void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
876  Bot.releaseNode(SU, SU->getHeight());
877}
878
879void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
880                                                     unsigned ReadyCycle) {
881  if (SU->isScheduled)
882    return;
883
884  if (ReadyCycle < MinReadyCycle)
885    MinReadyCycle = ReadyCycle;
886
887  // Check for interlocks first. For the purpose of other heuristics, an
888  // instruction that cannot issue appears as if it's not in the ReadyQueue.
889  if (HazardRec->isEnabled()
890      && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard)
891    Pending.push(SU);
892  else
893    Available.push(SU);
894}
895
896/// Move the boundary of scheduled code by one cycle.
897void ConvergingScheduler::SchedBoundary::bumpCycle() {
898  IssueCount = 0;
899
900  assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
901  unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
902
903  if (!HazardRec->isEnabled()) {
904    // Bypass lots of virtual calls in case of long latency.
905    CurrCycle = NextCycle;
906  }
907  else {
908    for (; CurrCycle != NextCycle; ++CurrCycle) {
909      if (isTop())
910        HazardRec->AdvanceCycle();
911      else
912        HazardRec->RecedeCycle();
913    }
914  }
915  CheckPending = true;
916
917  DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
918        << CurrCycle << '\n');
919}
920
921/// Release pending ready nodes in to the available queue. This makes them
922/// visible to heuristics.
923void ConvergingScheduler::SchedBoundary::releasePending() {
924  // If the available queue is empty, it is safe to reset MinReadyCycle.
925  if (Available.empty())
926    MinReadyCycle = UINT_MAX;
927
928  // Check to see if any of the pending instructions are ready to issue.  If
929  // so, add them to the available queue.
930  for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
931    SUnit *SU = *(Pending.begin()+i);
932    unsigned ReadyCycle = isTop() ? SU->getHeight() : SU->getDepth();
933
934    if (ReadyCycle < MinReadyCycle)
935      MinReadyCycle = ReadyCycle;
936
937    if (ReadyCycle > CurrCycle)
938      continue;
939
940    if (HazardRec->isEnabled()
941        && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard)
942      continue;
943
944    Available.push(SU);
945    Pending.remove(Pending.begin()+i);
946    --i; --e;
947  }
948  CheckPending = false;
949}
950
951/// Remove SU from the ready set for this boundary.
952void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
953  if (Available.isInQueue(SU))
954    Available.remove(Available.find(SU));
955  else {
956    assert(Pending.isInQueue(SU) && "bad ready count");
957    Pending.remove(Pending.find(SU));
958  }
959}
960
961/// If this queue only has one ready candidate, return it. As a side effect,
962/// advance the cycle until at least one node is ready. If multiple instructions
963/// are ready, return NULL.
964SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
965  if (CheckPending)
966    releasePending();
967
968  for (unsigned i = 0; Available.empty(); ++i) {
969    assert(i <= HazardRec->getMaxLookAhead() && "permanent hazard"); (void)i;
970    bumpCycle();
971    releasePending();
972  }
973  if (Available.size() == 1)
974    return *Available.begin();
975  return NULL;
976}
977
978#ifndef NDEBUG
979void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q,
980                                         SUnit *SU, PressureElement P) {
981  dbgs() << Label << " " << Q.getName() << " ";
982  if (P.isValid())
983    dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
984           << " ";
985  else
986    dbgs() << "     ";
987  SU->dump(DAG);
988}
989#endif
990
991/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
992/// more desirable than RHS from scheduling standpoint.
993static bool compareRPDelta(const RegPressureDelta &LHS,
994                           const RegPressureDelta &RHS) {
995  // Compare each component of pressure in decreasing order of importance
996  // without checking if any are valid. Invalid PressureElements are assumed to
997  // have UnitIncrease==0, so are neutral.
998
999  // Avoid increasing the max critical pressure in the scheduled region.
1000  if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease)
1001    return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
1002
1003  // Avoid increasing the max critical pressure in the scheduled region.
1004  if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease)
1005    return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
1006
1007  // Avoid increasing the max pressure of the entire region.
1008  if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease)
1009    return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
1010
1011  return false;
1012}
1013
1014/// Pick the best candidate from the top queue.
1015///
1016/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
1017/// DAG building. To adjust for the current scheduling location we need to
1018/// maintain the number of vreg uses remaining to be top-scheduled.
1019ConvergingScheduler::CandResult ConvergingScheduler::
1020pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
1021                  SchedCandidate &Candidate) {
1022  DEBUG(Q.dump());
1023
1024  // getMaxPressureDelta temporarily modifies the tracker.
1025  RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
1026
1027  // BestSU remains NULL if no top candidates beat the best existing candidate.
1028  CandResult FoundCandidate = NoCand;
1029  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
1030    RegPressureDelta RPDelta;
1031    TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
1032                                    DAG->getRegionCriticalPSets(),
1033                                    DAG->getRegPressure().MaxSetPressure);
1034
1035    // Initialize the candidate if needed.
1036    if (!Candidate.SU) {
1037      Candidate.SU = *I;
1038      Candidate.RPDelta = RPDelta;
1039      FoundCandidate = NodeOrder;
1040      continue;
1041    }
1042    // Avoid exceeding the target's limit.
1043    if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) {
1044      DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess));
1045      Candidate.SU = *I;
1046      Candidate.RPDelta = RPDelta;
1047      FoundCandidate = SingleExcess;
1048      continue;
1049    }
1050    if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease)
1051      continue;
1052    if (FoundCandidate == SingleExcess)
1053      FoundCandidate = MultiPressure;
1054
1055    // Avoid increasing the max critical pressure in the scheduled region.
1056    if (RPDelta.CriticalMax.UnitIncrease
1057        < Candidate.RPDelta.CriticalMax.UnitIncrease) {
1058      DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax));
1059      Candidate.SU = *I;
1060      Candidate.RPDelta = RPDelta;
1061      FoundCandidate = SingleCritical;
1062      continue;
1063    }
1064    if (RPDelta.CriticalMax.UnitIncrease
1065        > Candidate.RPDelta.CriticalMax.UnitIncrease)
1066      continue;
1067    if (FoundCandidate == SingleCritical)
1068      FoundCandidate = MultiPressure;
1069
1070    // Avoid increasing the max pressure of the entire region.
1071    if (RPDelta.CurrentMax.UnitIncrease
1072        < Candidate.RPDelta.CurrentMax.UnitIncrease) {
1073      DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax));
1074      Candidate.SU = *I;
1075      Candidate.RPDelta = RPDelta;
1076      FoundCandidate = SingleMax;
1077      continue;
1078    }
1079    if (RPDelta.CurrentMax.UnitIncrease
1080        > Candidate.RPDelta.CurrentMax.UnitIncrease)
1081      continue;
1082    if (FoundCandidate == SingleMax)
1083      FoundCandidate = MultiPressure;
1084
1085    // Fall through to original instruction order.
1086    // Only consider node order if Candidate was chosen from this Q.
1087    if (FoundCandidate == NoCand)
1088      continue;
1089
1090    if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
1091        || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
1092      DEBUG(traceCandidate("NCAND", Q, *I));
1093      Candidate.SU = *I;
1094      Candidate.RPDelta = RPDelta;
1095      FoundCandidate = NodeOrder;
1096    }
1097  }
1098  return FoundCandidate;
1099}
1100
1101/// Pick the best candidate node from either the top or bottom queue.
1102SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
1103  // Schedule as far as possible in the direction of no choice. This is most
1104  // efficient, but also provides the best heuristics for CriticalPSets.
1105  if (SUnit *SU = Bot.pickOnlyChoice()) {
1106    IsTopNode = false;
1107    return SU;
1108  }
1109  if (SUnit *SU = Top.pickOnlyChoice()) {
1110    IsTopNode = true;
1111    return SU;
1112  }
1113  SchedCandidate BotCand;
1114  // Prefer bottom scheduling when heuristics are silent.
1115  CandResult BotResult = pickNodeFromQueue(Bot.Available,
1116                                           DAG->getBotRPTracker(), BotCand);
1117  assert(BotResult != NoCand && "failed to find the first candidate");
1118
1119  // If either Q has a single candidate that provides the least increase in
1120  // Excess pressure, we can immediately schedule from that Q.
1121  //
1122  // RegionCriticalPSets summarizes the pressure within the scheduled region and
1123  // affects picking from either Q. If scheduling in one direction must
1124  // increase pressure for one of the excess PSets, then schedule in that
1125  // direction first to provide more freedom in the other direction.
1126  if (BotResult == SingleExcess || BotResult == SingleCritical) {
1127    IsTopNode = false;
1128    return BotCand.SU;
1129  }
1130  // Check if the top Q has a better candidate.
1131  SchedCandidate TopCand;
1132  CandResult TopResult = pickNodeFromQueue(Top.Available,
1133                                           DAG->getTopRPTracker(), TopCand);
1134  assert(TopResult != NoCand && "failed to find the first candidate");
1135
1136  if (TopResult == SingleExcess || TopResult == SingleCritical) {
1137    IsTopNode = true;
1138    return TopCand.SU;
1139  }
1140  // If either Q has a single candidate that minimizes pressure above the
1141  // original region's pressure pick it.
1142  if (BotResult == SingleMax) {
1143    IsTopNode = false;
1144    return BotCand.SU;
1145  }
1146  if (TopResult == SingleMax) {
1147    IsTopNode = true;
1148    return TopCand.SU;
1149  }
1150  // Check for a salient pressure difference and pick the best from either side.
1151  if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
1152    IsTopNode = true;
1153    return TopCand.SU;
1154  }
1155  // Otherwise prefer the bottom candidate in node order.
1156  IsTopNode = false;
1157  return BotCand.SU;
1158}
1159
1160/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
1161SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
1162  if (DAG->top() == DAG->bottom()) {
1163    assert(Top.Available.empty() && Top.Pending.empty() &&
1164           Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
1165    return NULL;
1166  }
1167  SUnit *SU;
1168  if (ForceTopDown) {
1169    SU = DAG->getSUnit(DAG->top());
1170    IsTopNode = true;
1171  }
1172  else if (ForceBottomUp) {
1173    SU = DAG->getSUnit(priorNonDebug(DAG->bottom(), DAG->top()));
1174    IsTopNode = false;
1175  }
1176  else {
1177    SU = pickNodeBidrectional(IsTopNode);
1178  }
1179  if (SU->isTopReady())
1180    Top.removeReady(SU);
1181  if (SU->isBottomReady())
1182    Bot.removeReady(SU);
1183  return SU;
1184}
1185
1186/// Update the scheduler's state after scheduling a node. This is the same node
1187/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
1188/// it's state based on the current cycle before MachineSchedStrategy.
1189void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
1190  DEBUG(dbgs() << " in cycle " << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle)
1191        << '\n');
1192
1193  // Update the reservation table.
1194  if (IsTopNode && Top.HazardRec->isEnabled()) {
1195    Top.HazardRec->EmitInstruction(SU);
1196    if (Top.HazardRec->atIssueLimit()) {
1197      DEBUG(dbgs() << "*** Max instrs at cycle " << Top.CurrCycle << '\n');
1198      Top.bumpCycle();
1199    }
1200  }
1201  else if (Bot.HazardRec->isEnabled()) {
1202    if (SU->isCall) {
1203      // Calls are scheduled with their preceding instructions. For bottom-up
1204      // scheduling, clear the pipeline state before emitting.
1205      Bot.HazardRec->Reset();
1206    }
1207    Bot.HazardRec->EmitInstruction(SU);
1208    if (Bot.HazardRec->atIssueLimit()) {
1209      DEBUG(dbgs() << "*** Max instrs at cycle " << Bot.CurrCycle << '\n');
1210      Bot.bumpCycle();
1211    }
1212  }
1213}
1214
1215/// Create the standard converging machine scheduler. This will be used as the
1216/// default scheduler if the target does not set a default.
1217static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
1218  assert((!ForceTopDown || !ForceBottomUp) &&
1219         "-misched-topdown incompatible with -misched-bottomup");
1220  return new ScheduleDAGMI(C, new ConvergingScheduler());
1221}
1222static MachineSchedRegistry
1223ConvergingSchedRegistry("converge", "Standard converging scheduler.",
1224                        createConvergingSched);
1225
1226//===----------------------------------------------------------------------===//
1227// Machine Instruction Shuffler for Correctness Testing
1228//===----------------------------------------------------------------------===//
1229
1230#ifndef NDEBUG
1231namespace {
1232/// Apply a less-than relation on the node order, which corresponds to the
1233/// instruction order prior to scheduling. IsReverse implements greater-than.
1234template<bool IsReverse>
1235struct SUnitOrder {
1236  bool operator()(SUnit *A, SUnit *B) const {
1237    if (IsReverse)
1238      return A->NodeNum > B->NodeNum;
1239    else
1240      return A->NodeNum < B->NodeNum;
1241  }
1242};
1243
1244/// Reorder instructions as much as possible.
1245class InstructionShuffler : public MachineSchedStrategy {
1246  bool IsAlternating;
1247  bool IsTopDown;
1248
1249  // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
1250  // gives nodes with a higher number higher priority causing the latest
1251  // instructions to be scheduled first.
1252  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
1253    TopQ;
1254  // When scheduling bottom-up, use greater-than as the queue priority.
1255  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
1256    BottomQ;
1257public:
1258  InstructionShuffler(bool alternate, bool topdown)
1259    : IsAlternating(alternate), IsTopDown(topdown) {}
1260
1261  virtual void initialize(ScheduleDAGMI *) {
1262    TopQ.clear();
1263    BottomQ.clear();
1264  }
1265
1266  /// Implement MachineSchedStrategy interface.
1267  /// -----------------------------------------
1268
1269  virtual SUnit *pickNode(bool &IsTopNode) {
1270    SUnit *SU;
1271    if (IsTopDown) {
1272      do {
1273        if (TopQ.empty()) return NULL;
1274        SU = TopQ.top();
1275        TopQ.pop();
1276      } while (SU->isScheduled);
1277      IsTopNode = true;
1278    }
1279    else {
1280      do {
1281        if (BottomQ.empty()) return NULL;
1282        SU = BottomQ.top();
1283        BottomQ.pop();
1284      } while (SU->isScheduled);
1285      IsTopNode = false;
1286    }
1287    if (IsAlternating)
1288      IsTopDown = !IsTopDown;
1289    return SU;
1290  }
1291
1292  virtual void schedNode(SUnit *SU, bool IsTopNode) {}
1293
1294  virtual void releaseTopNode(SUnit *SU) {
1295    TopQ.push(SU);
1296  }
1297  virtual void releaseBottomNode(SUnit *SU) {
1298    BottomQ.push(SU);
1299  }
1300};
1301} // namespace
1302
1303static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
1304  bool Alternate = !ForceTopDown && !ForceBottomUp;
1305  bool TopDown = !ForceBottomUp;
1306  assert((TopDown || !ForceTopDown) &&
1307         "-misched-topdown incompatible with -misched-bottomup");
1308  return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
1309}
1310static MachineSchedRegistry ShufflerRegistry(
1311  "shuffle", "Shuffle machine instructions alternating directions",
1312  createInstructionShuffler);
1313#endif // !NDEBUG
1314