MachineScheduler.cpp revision c7a098fbb23f7f6cfbbbfc097b22c10cf4211ab6
1//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "RegisterClassInfo.h"
18#include "RegisterPressure.h"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/CodeGen/MachineScheduler.h"
21#include "llvm/CodeGen/Passes.h"
22#include "llvm/CodeGen/ScheduleDAGInstrs.h"
23#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/CommandLine.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Support/raw_ostream.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/PriorityQueue.h"
32
33#include <queue>
34
35using namespace llvm;
36
37static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
38                                  cl::desc("Force top-down list scheduling"));
39static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
40                                  cl::desc("Force bottom-up list scheduling"));
41
42#ifndef NDEBUG
43static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
44  cl::desc("Pop up a window to show MISched dags after they are processed"));
45
46static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
47  cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
48#else
49static bool ViewMISchedDAGs = false;
50#endif // NDEBUG
51
52//===----------------------------------------------------------------------===//
53// Machine Instruction Scheduling Pass and Registry
54//===----------------------------------------------------------------------===//
55
56MachineSchedContext::MachineSchedContext():
57    MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
58  RegClassInfo = new RegisterClassInfo();
59}
60
61MachineSchedContext::~MachineSchedContext() {
62  delete RegClassInfo;
63}
64
65namespace {
66/// MachineScheduler runs after coalescing and before register allocation.
67class MachineScheduler : public MachineSchedContext,
68                         public MachineFunctionPass {
69public:
70  MachineScheduler();
71
72  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
73
74  virtual void releaseMemory() {}
75
76  virtual bool runOnMachineFunction(MachineFunction&);
77
78  virtual void print(raw_ostream &O, const Module* = 0) const;
79
80  static char ID; // Class identification, replacement for typeinfo
81};
82} // namespace
83
84char MachineScheduler::ID = 0;
85
86char &llvm::MachineSchedulerID = MachineScheduler::ID;
87
88INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
89                      "Machine Instruction Scheduler", false, false)
90INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
91INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
92INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
93INITIALIZE_PASS_END(MachineScheduler, "misched",
94                    "Machine Instruction Scheduler", false, false)
95
96MachineScheduler::MachineScheduler()
97: MachineFunctionPass(ID) {
98  initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
99}
100
101void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
102  AU.setPreservesCFG();
103  AU.addRequiredID(MachineDominatorsID);
104  AU.addRequired<MachineLoopInfo>();
105  AU.addRequired<AliasAnalysis>();
106  AU.addRequired<TargetPassConfig>();
107  AU.addRequired<SlotIndexes>();
108  AU.addPreserved<SlotIndexes>();
109  AU.addRequired<LiveIntervals>();
110  AU.addPreserved<LiveIntervals>();
111  MachineFunctionPass::getAnalysisUsage(AU);
112}
113
114MachinePassRegistry MachineSchedRegistry::Registry;
115
116/// A dummy default scheduler factory indicates whether the scheduler
117/// is overridden on the command line.
118static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
119  return 0;
120}
121
122/// MachineSchedOpt allows command line selection of the scheduler.
123static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
124               RegisterPassParser<MachineSchedRegistry> >
125MachineSchedOpt("misched",
126                cl::init(&useDefaultMachineSched), cl::Hidden,
127                cl::desc("Machine instruction scheduler to use"));
128
129static MachineSchedRegistry
130DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
131                     useDefaultMachineSched);
132
133/// Forward declare the standard machine scheduler. This will be used as the
134/// default scheduler if the target does not set a default.
135static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
136
137
138/// Decrement this iterator until reaching the top or a non-debug instr.
139static MachineBasicBlock::iterator
140priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
141  assert(I != Beg && "reached the top of the region, cannot decrement");
142  while (--I != Beg) {
143    if (!I->isDebugValue())
144      break;
145  }
146  return I;
147}
148
149/// If this iterator is a debug value, increment until reaching the End or a
150/// non-debug instruction.
151static MachineBasicBlock::iterator
152nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
153  for(; I != End; ++I) {
154    if (!I->isDebugValue())
155      break;
156  }
157  return I;
158}
159
160/// Top-level MachineScheduler pass driver.
161///
162/// Visit blocks in function order. Divide each block into scheduling regions
163/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
164/// consistent with the DAG builder, which traverses the interior of the
165/// scheduling regions bottom-up.
166///
167/// This design avoids exposing scheduling boundaries to the DAG builder,
168/// simplifying the DAG builder's support for "special" target instructions.
169/// At the same time the design allows target schedulers to operate across
170/// scheduling boundaries, for example to bundle the boudary instructions
171/// without reordering them. This creates complexity, because the target
172/// scheduler must update the RegionBegin and RegionEnd positions cached by
173/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
174/// design would be to split blocks at scheduling boundaries, but LLVM has a
175/// general bias against block splitting purely for implementation simplicity.
176bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
177  DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
178
179  // Initialize the context of the pass.
180  MF = &mf;
181  MLI = &getAnalysis<MachineLoopInfo>();
182  MDT = &getAnalysis<MachineDominatorTree>();
183  PassConfig = &getAnalysis<TargetPassConfig>();
184  AA = &getAnalysis<AliasAnalysis>();
185
186  LIS = &getAnalysis<LiveIntervals>();
187  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
188
189  RegClassInfo->runOnMachineFunction(*MF);
190
191  // Select the scheduler, or set the default.
192  MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
193  if (Ctor == useDefaultMachineSched) {
194    // Get the default scheduler set by the target.
195    Ctor = MachineSchedRegistry::getDefault();
196    if (!Ctor) {
197      Ctor = createConvergingSched;
198      MachineSchedRegistry::setDefault(Ctor);
199    }
200  }
201  // Instantiate the selected scheduler.
202  OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
203
204  // Visit all machine basic blocks.
205  //
206  // TODO: Visit blocks in global postorder or postorder within the bottom-up
207  // loop tree. Then we can optionally compute global RegPressure.
208  for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
209       MBB != MBBEnd; ++MBB) {
210
211    Scheduler->startBlock(MBB);
212
213    // Break the block into scheduling regions [I, RegionEnd), and schedule each
214    // region as soon as it is discovered. RegionEnd points the the scheduling
215    // boundary at the bottom of the region. The DAG does not include RegionEnd,
216    // but the region does (i.e. the next RegionEnd is above the previous
217    // RegionBegin). If the current block has no terminator then RegionEnd ==
218    // MBB->end() for the bottom region.
219    //
220    // The Scheduler may insert instructions during either schedule() or
221    // exitRegion(), even for empty regions. So the local iterators 'I' and
222    // 'RegionEnd' are invalid across these calls.
223    unsigned RemainingCount = MBB->size();
224    for(MachineBasicBlock::iterator RegionEnd = MBB->end();
225        RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
226
227      // Avoid decrementing RegionEnd for blocks with no terminator.
228      if (RegionEnd != MBB->end()
229          || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
230        --RegionEnd;
231        // Count the boundary instruction.
232        --RemainingCount;
233      }
234
235      // The next region starts above the previous region. Look backward in the
236      // instruction stream until we find the nearest boundary.
237      MachineBasicBlock::iterator I = RegionEnd;
238      for(;I != MBB->begin(); --I, --RemainingCount) {
239        if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
240          break;
241      }
242      // Notify the scheduler of the region, even if we may skip scheduling
243      // it. Perhaps it still needs to be bundled.
244      Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount);
245
246      // Skip empty scheduling regions (0 or 1 schedulable instructions).
247      if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
248        // Close the current region. Bundle the terminator if needed.
249        // This invalidates 'RegionEnd' and 'I'.
250        Scheduler->exitRegion();
251        continue;
252      }
253      DEBUG(dbgs() << "********** MI Scheduling **********\n");
254      DEBUG(dbgs() << MF->getFunction()->getName()
255            << ":BB#" << MBB->getNumber() << "\n  From: " << *I << "    To: ";
256            if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
257            else dbgs() << "End";
258            dbgs() << " Remaining: " << RemainingCount << "\n");
259
260      // Schedule a region: possibly reorder instructions.
261      // This invalidates 'RegionEnd' and 'I'.
262      Scheduler->schedule();
263
264      // Close the current region.
265      Scheduler->exitRegion();
266
267      // Scheduling has invalidated the current iterator 'I'. Ask the
268      // scheduler for the top of it's scheduled region.
269      RegionEnd = Scheduler->begin();
270    }
271    assert(RemainingCount == 0 && "Instruction count mismatch!");
272    Scheduler->finishBlock();
273  }
274  Scheduler->finalizeSchedule();
275  DEBUG(LIS->print(dbgs()));
276  return true;
277}
278
279void MachineScheduler::print(raw_ostream &O, const Module* m) const {
280  // unimplemented
281}
282
283//===----------------------------------------------------------------------===//
284// MachineSchedStrategy - Interface to a machine scheduling algorithm.
285//===----------------------------------------------------------------------===//
286
287namespace {
288class ScheduleDAGMI;
289
290/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected
291/// scheduling algorithm.
292///
293/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it
294/// in ScheduleDAGInstrs.h
295class MachineSchedStrategy {
296public:
297  virtual ~MachineSchedStrategy() {}
298
299  /// Initialize the strategy after building the DAG for a new region.
300  virtual void initialize(ScheduleDAGMI *DAG) = 0;
301
302  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
303  /// schedule the node at the top of the unscheduled region. Otherwise it will
304  /// be scheduled at the bottom.
305  virtual SUnit *pickNode(bool &IsTopNode) = 0;
306
307  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node.
308  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
309
310  /// When all predecessor dependencies have been resolved, free this node for
311  /// top-down scheduling.
312  virtual void releaseTopNode(SUnit *SU) = 0;
313  /// When all successor dependencies have been resolved, free this node for
314  /// bottom-up scheduling.
315  virtual void releaseBottomNode(SUnit *SU) = 0;
316};
317} // namespace
318
319//===----------------------------------------------------------------------===//
320// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
321// preservation.
322//===----------------------------------------------------------------------===//
323
324namespace {
325/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
326/// machine instructions while updating LiveIntervals.
327class ScheduleDAGMI : public ScheduleDAGInstrs {
328  AliasAnalysis *AA;
329  RegisterClassInfo *RegClassInfo;
330  MachineSchedStrategy *SchedImpl;
331
332  MachineBasicBlock::iterator LiveRegionEnd;
333
334  /// Register pressure in this region computed by buildSchedGraph.
335  IntervalPressure RegPressure;
336  RegPressureTracker RPTracker;
337
338  /// List of pressure sets that exceed the target's pressure limit before
339  /// scheduling, listed in increasing set ID order. Each pressure set is paired
340  /// with its max pressure in the currently scheduled regions.
341  std::vector<PressureElement> RegionCriticalPSets;
342
343  /// The top of the unscheduled zone.
344  MachineBasicBlock::iterator CurrentTop;
345  IntervalPressure TopPressure;
346  RegPressureTracker TopRPTracker;
347
348  /// The bottom of the unscheduled zone.
349  MachineBasicBlock::iterator CurrentBottom;
350  IntervalPressure BotPressure;
351  RegPressureTracker BotRPTracker;
352
353  /// The number of instructions scheduled so far. Used to cut off the
354  /// scheduler at the point determined by misched-cutoff.
355  unsigned NumInstrsScheduled;
356public:
357  ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
358    ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
359    AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S),
360    RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure),
361    CurrentBottom(), BotRPTracker(BotPressure), NumInstrsScheduled(0) {}
362
363  ~ScheduleDAGMI() {
364    delete SchedImpl;
365  }
366
367  MachineBasicBlock::iterator top() const { return CurrentTop; }
368  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
369
370  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
371  /// region. This covers all instructions in a block, while schedule() may only
372  /// cover a subset.
373  void enterRegion(MachineBasicBlock *bb,
374                   MachineBasicBlock::iterator begin,
375                   MachineBasicBlock::iterator end,
376                   unsigned endcount);
377
378  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
379  /// reorderable instructions.
380  void schedule();
381
382  /// Get current register pressure for the top scheduled instructions.
383  const IntervalPressure &getTopPressure() const { return TopPressure; }
384  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
385
386  /// Get current register pressure for the bottom scheduled instructions.
387  const IntervalPressure &getBotPressure() const { return BotPressure; }
388  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
389
390  /// Get register pressure for the entire scheduling region before scheduling.
391  const IntervalPressure &getRegPressure() const { return RegPressure; }
392
393  const std::vector<PressureElement> &getRegionCriticalPSets() const {
394    return RegionCriticalPSets;
395  }
396
397protected:
398  void initRegPressure();
399  void updateScheduledPressure(std::vector<unsigned> NewMaxPressure);
400
401  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
402  bool checkSchedLimit();
403
404  void releaseRoots();
405
406  void releaseSucc(SUnit *SU, SDep *SuccEdge);
407  void releaseSuccessors(SUnit *SU);
408  void releasePred(SUnit *SU, SDep *PredEdge);
409  void releasePredecessors(SUnit *SU);
410
411  void placeDebugValues();
412};
413} // namespace
414
415/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
416/// NumPredsLeft reaches zero, release the successor node.
417///
418/// FIXME: Adjust SuccSU height based on MinLatency.
419void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
420  SUnit *SuccSU = SuccEdge->getSUnit();
421
422#ifndef NDEBUG
423  if (SuccSU->NumPredsLeft == 0) {
424    dbgs() << "*** Scheduling failed! ***\n";
425    SuccSU->dump(this);
426    dbgs() << " has been released too many times!\n";
427    llvm_unreachable(0);
428  }
429#endif
430  --SuccSU->NumPredsLeft;
431  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
432    SchedImpl->releaseTopNode(SuccSU);
433}
434
435/// releaseSuccessors - Call releaseSucc on each of SU's successors.
436void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
437  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
438       I != E; ++I) {
439    releaseSucc(SU, &*I);
440  }
441}
442
443/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
444/// NumSuccsLeft reaches zero, release the predecessor node.
445///
446/// FIXME: Adjust PredSU height based on MinLatency.
447void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
448  SUnit *PredSU = PredEdge->getSUnit();
449
450#ifndef NDEBUG
451  if (PredSU->NumSuccsLeft == 0) {
452    dbgs() << "*** Scheduling failed! ***\n";
453    PredSU->dump(this);
454    dbgs() << " has been released too many times!\n";
455    llvm_unreachable(0);
456  }
457#endif
458  --PredSU->NumSuccsLeft;
459  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
460    SchedImpl->releaseBottomNode(PredSU);
461}
462
463/// releasePredecessors - Call releasePred on each of SU's predecessors.
464void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
465  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
466       I != E; ++I) {
467    releasePred(SU, &*I);
468  }
469}
470
471void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
472                                    MachineBasicBlock::iterator InsertPos) {
473  // Advance RegionBegin if the first instruction moves down.
474  if (&*RegionBegin == MI)
475    ++RegionBegin;
476
477  // Update the instruction stream.
478  BB->splice(InsertPos, BB, MI);
479
480  // Update LiveIntervals
481  LIS->handleMove(MI);
482
483  // Recede RegionBegin if an instruction moves above the first.
484  if (RegionBegin == InsertPos)
485    RegionBegin = MI;
486}
487
488bool ScheduleDAGMI::checkSchedLimit() {
489#ifndef NDEBUG
490  if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
491    CurrentTop = CurrentBottom;
492    return false;
493  }
494  ++NumInstrsScheduled;
495#endif
496  return true;
497}
498
499/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
500/// crossing a scheduling boundary. [begin, end) includes all instructions in
501/// the region, including the boundary itself and single-instruction regions
502/// that don't get scheduled.
503void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
504                                MachineBasicBlock::iterator begin,
505                                MachineBasicBlock::iterator end,
506                                unsigned endcount)
507{
508  ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
509
510  // For convenience remember the end of the liveness region.
511  LiveRegionEnd =
512    (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
513}
514
515// Setup the register pressure trackers for the top scheduled top and bottom
516// scheduled regions.
517void ScheduleDAGMI::initRegPressure() {
518  TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
519  BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
520
521  // Close the RPTracker to finalize live ins.
522  RPTracker.closeRegion();
523
524  DEBUG(RPTracker.getPressure().dump(TRI));
525
526  // Initialize the live ins and live outs.
527  TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
528  BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
529
530  // Close one end of the tracker so we can call
531  // getMaxUpward/DownwardPressureDelta before advancing across any
532  // instructions. This converts currently live regs into live ins/outs.
533  TopRPTracker.closeTop();
534  BotRPTracker.closeBottom();
535
536  // Account for liveness generated by the region boundary.
537  if (LiveRegionEnd != RegionEnd)
538    BotRPTracker.recede();
539
540  assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
541
542  // Cache the list of excess pressure sets in this region. This will also track
543  // the max pressure in the scheduled code for these sets.
544  RegionCriticalPSets.clear();
545  std::vector<unsigned> RegionPressure = RPTracker.getPressure().MaxSetPressure;
546  for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
547    unsigned Limit = TRI->getRegPressureSetLimit(i);
548    if (RegionPressure[i] > Limit)
549      RegionCriticalPSets.push_back(PressureElement(i, 0));
550  }
551  DEBUG(dbgs() << "Excess PSets: ";
552        for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
553          dbgs() << TRI->getRegPressureSetName(
554            RegionCriticalPSets[i].PSetID) << " ";
555        dbgs() << "\n");
556}
557
558// FIXME: When the pressure tracker deals in pressure differences then we won't
559// iterate over all RegionCriticalPSets[i].
560void ScheduleDAGMI::
561updateScheduledPressure(std::vector<unsigned> NewMaxPressure) {
562  for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
563    unsigned ID = RegionCriticalPSets[i].PSetID;
564    int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
565    if ((int)NewMaxPressure[ID] > MaxUnits)
566      MaxUnits = NewMaxPressure[ID];
567  }
568}
569
570// Release all DAG roots for scheduling.
571void ScheduleDAGMI::releaseRoots() {
572  SmallVector<SUnit*, 16> BotRoots;
573
574  for (std::vector<SUnit>::iterator
575         I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
576    // A SUnit is ready to top schedule if it has no predecessors.
577    if (I->Preds.empty())
578      SchedImpl->releaseTopNode(&(*I));
579    // A SUnit is ready to bottom schedule if it has no successors.
580    if (I->Succs.empty())
581      BotRoots.push_back(&(*I));
582  }
583  // Release bottom roots in reverse order so the higher priority nodes appear
584  // first. This is more natural and slightly more efficient.
585  for (SmallVectorImpl<SUnit*>::const_reverse_iterator
586         I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I)
587    SchedImpl->releaseBottomNode(*I);
588}
589
590/// schedule - Called back from MachineScheduler::runOnMachineFunction
591/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
592/// only includes instructions that have DAG nodes, not scheduling boundaries.
593void ScheduleDAGMI::schedule() {
594  // Initialize the register pressure tracker used by buildSchedGraph.
595  RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
596
597  // Account for liveness generate by the region boundary.
598  if (LiveRegionEnd != RegionEnd)
599    RPTracker.recede();
600
601  // Build the DAG, and compute current register pressure.
602  buildSchedGraph(AA, &RPTracker);
603
604  // Initialize top/bottom trackers after computing region pressure.
605  initRegPressure();
606
607  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
608          SUnits[su].dumpAll(this));
609
610  if (ViewMISchedDAGs) viewGraph();
611
612  SchedImpl->initialize(this);
613
614  // Release edges from the special Entry node or to the special Exit node.
615  releaseSuccessors(&EntrySU);
616  releasePredecessors(&ExitSU);
617
618  // Release all DAG roots for scheduling.
619  releaseRoots();
620
621  CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
622  CurrentBottom = RegionEnd;
623  bool IsTopNode = false;
624  while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
625    if (!checkSchedLimit())
626      break;
627
628    // Move the instruction to its new location in the instruction stream.
629    MachineInstr *MI = SU->getInstr();
630
631    if (IsTopNode) {
632      assert(SU->isTopReady() && "node still has unscheduled dependencies");
633      if (&*CurrentTop == MI)
634        CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
635      else {
636        moveInstruction(MI, CurrentTop);
637        TopRPTracker.setPos(MI);
638      }
639
640      // Update top scheduled pressure.
641      TopRPTracker.advance();
642      assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
643      updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
644
645      // Release dependent instructions for scheduling.
646      releaseSuccessors(SU);
647    }
648    else {
649      assert(SU->isBottomReady() && "node still has unscheduled dependencies");
650      MachineBasicBlock::iterator priorII =
651        priorNonDebug(CurrentBottom, CurrentTop);
652      if (&*priorII == MI)
653        CurrentBottom = priorII;
654      else {
655        if (&*CurrentTop == MI) {
656          CurrentTop = nextIfDebug(++CurrentTop, priorII);
657          TopRPTracker.setPos(CurrentTop);
658        }
659        moveInstruction(MI, CurrentBottom);
660        CurrentBottom = MI;
661      }
662      // Update bottom scheduled pressure.
663      BotRPTracker.recede();
664      assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
665      updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
666
667      // Release dependent instructions for scheduling.
668      releasePredecessors(SU);
669    }
670    SU->isScheduled = true;
671    SchedImpl->schedNode(SU, IsTopNode);
672  }
673  assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
674
675  placeDebugValues();
676}
677
678/// Reinsert any remaining debug_values, just like the PostRA scheduler.
679void ScheduleDAGMI::placeDebugValues() {
680  // If first instruction was a DBG_VALUE then put it back.
681  if (FirstDbgValue) {
682    BB->splice(RegionBegin, BB, FirstDbgValue);
683    RegionBegin = FirstDbgValue;
684  }
685
686  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
687         DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
688    std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
689    MachineInstr *DbgValue = P.first;
690    MachineBasicBlock::iterator OrigPrevMI = P.second;
691    BB->splice(++OrigPrevMI, BB, DbgValue);
692    if (OrigPrevMI == llvm::prior(RegionEnd))
693      RegionEnd = DbgValue;
694  }
695  DbgValues.clear();
696  FirstDbgValue = NULL;
697}
698
699//===----------------------------------------------------------------------===//
700// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
701//===----------------------------------------------------------------------===//
702
703namespace {
704/// ReadyQ encapsulates vector of "ready" SUnits with basic convenience methods
705/// for pushing and removing nodes. ReadyQ's are uniquely identified by an
706/// ID. SUnit::NodeQueueId us a mask of the ReadyQs that the SUnit is in.
707class ReadyQueue {
708  unsigned ID;
709  std::string Name;
710  std::vector<SUnit*> Queue;
711
712public:
713  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
714
715  unsigned getID() const { return ID; }
716
717  StringRef getName() const { return Name; }
718
719  // SU is in this queue if it's NodeQueueID is a superset of this ID.
720  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
721
722  bool empty() const { return Queue.empty(); }
723
724  unsigned size() const { return Queue.size(); }
725
726  typedef std::vector<SUnit*>::iterator iterator;
727
728  iterator begin() { return Queue.begin(); }
729
730  iterator end() { return Queue.end(); }
731
732  iterator find(SUnit *SU) {
733    return std::find(Queue.begin(), Queue.end(), SU);
734  }
735
736  void push(SUnit *SU) {
737    Queue.push_back(SU);
738    SU->NodeQueueId |= ID;
739  }
740
741  void remove(iterator I) {
742    (*I)->NodeQueueId &= ~ID;
743    *I = Queue.back();
744    Queue.pop_back();
745  }
746
747  void dump() {
748    dbgs() << Name << ": ";
749    for (unsigned i = 0, e = Queue.size(); i < e; ++i)
750      dbgs() << Queue[i]->NodeNum << " ";
751    dbgs() << "\n";
752  }
753};
754
755/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
756/// the schedule.
757class ConvergingScheduler : public MachineSchedStrategy {
758
759  /// Store the state used by ConvergingScheduler heuristics, required for the
760  /// lifetime of one invocation of pickNode().
761  struct SchedCandidate {
762    // The best SUnit candidate.
763    SUnit *SU;
764
765    // Register pressure values for the best candidate.
766    RegPressureDelta RPDelta;
767
768    SchedCandidate(): SU(NULL) {}
769  };
770  /// Represent the type of SchedCandidate found within a single queue.
771  enum CandResult {
772    NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure };
773
774  /// Each Scheduling boundary is associated with ready queues. It tracks the
775  /// current cycle in whichever direction at has moved, and maintains the state
776  /// of "hazards" and other interlocks at the current cycle.
777  struct SchedBoundary {
778    ReadyQueue Available;
779    ReadyQueue Pending;
780    bool CheckPending;
781
782    ScheduleHazardRecognizer *HazardRec;
783
784    unsigned CurrCycle;
785    unsigned IssueCount;
786
787    /// MinReadyCycle - Cycle of the soonest available instruction.
788    unsigned MinReadyCycle;
789
790    /// Pending queues extend the ready queues with the same ID and the
791    /// PendingFlag set.
792    SchedBoundary(unsigned ID, const Twine &Name):
793      Available(ID, Name+".A"),
794      Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
795      CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0),
796      MinReadyCycle(UINT_MAX) {}
797
798    ~SchedBoundary() { delete HazardRec; }
799
800    bool isTop() const {
801      return Available.getID() == ConvergingScheduler::TopQID;
802    }
803
804    void releaseNode(SUnit *SU, unsigned ReadyCycle);
805
806    void bumpCycle();
807
808    void releasePending();
809
810    void removeReady(SUnit *SU);
811
812    SUnit *pickOnlyChoice();
813  };
814
815  ScheduleDAGMI *DAG;
816  const TargetRegisterInfo *TRI;
817
818  // State of the top and bottom scheduled instruction boundaries.
819  SchedBoundary Top;
820  SchedBoundary Bot;
821
822public:
823  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
824  enum {
825    TopQID = 1,
826    BotQID = 2,
827    LogMaxQID = 2
828  };
829
830  ConvergingScheduler():
831    DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
832
833  virtual void initialize(ScheduleDAGMI *dag);
834
835  virtual SUnit *pickNode(bool &IsTopNode);
836
837  virtual void schedNode(SUnit *SU, bool IsTopNode);
838
839  virtual void releaseTopNode(SUnit *SU);
840
841  virtual void releaseBottomNode(SUnit *SU);
842
843protected:
844  SUnit *pickNodeBidrectional(bool &IsTopNode);
845
846  CandResult pickNodeFromQueue(ReadyQueue &Q,
847                               const RegPressureTracker &RPTracker,
848                               SchedCandidate &Candidate);
849#ifndef NDEBUG
850  void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
851                      PressureElement P = PressureElement());
852#endif
853};
854} // namespace
855
856void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
857  DAG = dag;
858  TRI = DAG->TRI;
859
860  // Initialize the HazardRecognizers.
861  const TargetMachine &TM = DAG->MF.getTarget();
862  const InstrItineraryData *Itin = TM.getInstrItineraryData();
863  Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
864  Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
865
866  assert((!ForceTopDown || !ForceBottomUp) &&
867         "-misched-topdown incompatible with -misched-bottomup");
868}
869
870void ConvergingScheduler::releaseTopNode(SUnit *SU) {
871  Top.releaseNode(SU, SU->getDepth());
872}
873
874void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
875  Bot.releaseNode(SU, SU->getHeight());
876}
877
878void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
879                                                     unsigned ReadyCycle) {
880  if (SU->isScheduled)
881    return;
882
883  if (ReadyCycle < MinReadyCycle)
884    MinReadyCycle = ReadyCycle;
885
886  // Check for interlocks first. For the purpose of other heuristics, an
887  // instruction that cannot issue appears as if it's not in the ReadyQueue.
888  if (HazardRec->isEnabled()
889      && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard)
890    Pending.push(SU);
891  else
892    Available.push(SU);
893}
894
895/// Move the boundary of scheduled code by one cycle.
896void ConvergingScheduler::SchedBoundary::bumpCycle() {
897  IssueCount = 0;
898
899  assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
900  unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
901
902  if (!HazardRec->isEnabled()) {
903    // Bypass lots of virtual calls in case of long latency.
904    CurrCycle = NextCycle;
905  }
906  else {
907    for (; CurrCycle != NextCycle; ++CurrCycle) {
908      if (isTop())
909        HazardRec->AdvanceCycle();
910      else
911        HazardRec->RecedeCycle();
912    }
913  }
914  CheckPending = true;
915
916  DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
917        << CurrCycle << '\n');
918}
919
920/// Release pending ready nodes in to the available queue. This makes them
921/// visible to heuristics.
922void ConvergingScheduler::SchedBoundary::releasePending() {
923  // If the available queue is empty, it is safe to reset MinReadyCycle.
924  if (Available.empty())
925    MinReadyCycle = UINT_MAX;
926
927  // Check to see if any of the pending instructions are ready to issue.  If
928  // so, add them to the available queue.
929  for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
930    SUnit *SU = *(Pending.begin()+i);
931    unsigned ReadyCycle = isTop() ? SU->getHeight() : SU->getDepth();
932
933    if (ReadyCycle < MinReadyCycle)
934      MinReadyCycle = ReadyCycle;
935
936    if (ReadyCycle > CurrCycle)
937      continue;
938
939    if (HazardRec->isEnabled()
940        && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard)
941      continue;
942
943    Available.push(SU);
944    Pending.remove(Pending.begin()+i);
945    --i; --e;
946  }
947  CheckPending = false;
948}
949
950/// Remove SU from the ready set for this boundary.
951void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
952  if (Available.isInQueue(SU))
953    Available.remove(Available.find(SU));
954  else {
955    assert(Pending.isInQueue(SU) && "bad ready count");
956    Pending.remove(Pending.find(SU));
957  }
958}
959
960/// If this queue only has one ready candidate, return it. As a side effect,
961/// advance the cycle until at least one node is ready. If multiple instructions
962/// are ready, return NULL.
963SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
964  if (CheckPending)
965    releasePending();
966
967  for (unsigned i = 0; Available.empty(); ++i) {
968    assert(i <= HazardRec->getMaxLookAhead() && "permanent hazard"); (void)i;
969    bumpCycle();
970    releasePending();
971  }
972  if (Available.size() == 1)
973    return *Available.begin();
974  return NULL;
975}
976
977#ifndef NDEBUG
978void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q,
979                                         SUnit *SU, PressureElement P) {
980  dbgs() << Label << " " << Q.getName() << " ";
981  if (P.isValid())
982    dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
983           << " ";
984  else
985    dbgs() << "     ";
986  SU->dump(DAG);
987}
988#endif
989
990/// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is
991/// more desirable than RHS from scheduling standpoint.
992static bool compareRPDelta(const RegPressureDelta &LHS,
993                           const RegPressureDelta &RHS) {
994  // Compare each component of pressure in decreasing order of importance
995  // without checking if any are valid. Invalid PressureElements are assumed to
996  // have UnitIncrease==0, so are neutral.
997
998  // Avoid increasing the max critical pressure in the scheduled region.
999  if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease)
1000    return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease;
1001
1002  // Avoid increasing the max critical pressure in the scheduled region.
1003  if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease)
1004    return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease;
1005
1006  // Avoid increasing the max pressure of the entire region.
1007  if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease)
1008    return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease;
1009
1010  return false;
1011}
1012
1013/// Pick the best candidate from the top queue.
1014///
1015/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
1016/// DAG building. To adjust for the current scheduling location we need to
1017/// maintain the number of vreg uses remaining to be top-scheduled.
1018ConvergingScheduler::CandResult ConvergingScheduler::
1019pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
1020                  SchedCandidate &Candidate) {
1021  DEBUG(Q.dump());
1022
1023  // getMaxPressureDelta temporarily modifies the tracker.
1024  RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
1025
1026  // BestSU remains NULL if no top candidates beat the best existing candidate.
1027  CandResult FoundCandidate = NoCand;
1028  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
1029    RegPressureDelta RPDelta;
1030    TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
1031                                    DAG->getRegionCriticalPSets(),
1032                                    DAG->getRegPressure().MaxSetPressure);
1033
1034    // Initialize the candidate if needed.
1035    if (!Candidate.SU) {
1036      Candidate.SU = *I;
1037      Candidate.RPDelta = RPDelta;
1038      FoundCandidate = NodeOrder;
1039      continue;
1040    }
1041    // Avoid exceeding the target's limit.
1042    if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) {
1043      DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess));
1044      Candidate.SU = *I;
1045      Candidate.RPDelta = RPDelta;
1046      FoundCandidate = SingleExcess;
1047      continue;
1048    }
1049    if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease)
1050      continue;
1051    if (FoundCandidate == SingleExcess)
1052      FoundCandidate = MultiPressure;
1053
1054    // Avoid increasing the max critical pressure in the scheduled region.
1055    if (RPDelta.CriticalMax.UnitIncrease
1056        < Candidate.RPDelta.CriticalMax.UnitIncrease) {
1057      DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax));
1058      Candidate.SU = *I;
1059      Candidate.RPDelta = RPDelta;
1060      FoundCandidate = SingleCritical;
1061      continue;
1062    }
1063    if (RPDelta.CriticalMax.UnitIncrease
1064        > Candidate.RPDelta.CriticalMax.UnitIncrease)
1065      continue;
1066    if (FoundCandidate == SingleCritical)
1067      FoundCandidate = MultiPressure;
1068
1069    // Avoid increasing the max pressure of the entire region.
1070    if (RPDelta.CurrentMax.UnitIncrease
1071        < Candidate.RPDelta.CurrentMax.UnitIncrease) {
1072      DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax));
1073      Candidate.SU = *I;
1074      Candidate.RPDelta = RPDelta;
1075      FoundCandidate = SingleMax;
1076      continue;
1077    }
1078    if (RPDelta.CurrentMax.UnitIncrease
1079        > Candidate.RPDelta.CurrentMax.UnitIncrease)
1080      continue;
1081    if (FoundCandidate == SingleMax)
1082      FoundCandidate = MultiPressure;
1083
1084    // Fall through to original instruction order.
1085    // Only consider node order if Candidate was chosen from this Q.
1086    if (FoundCandidate == NoCand)
1087      continue;
1088
1089    if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
1090        || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
1091      DEBUG(traceCandidate("NCAND", Q, *I));
1092      Candidate.SU = *I;
1093      Candidate.RPDelta = RPDelta;
1094      FoundCandidate = NodeOrder;
1095    }
1096  }
1097  return FoundCandidate;
1098}
1099
1100/// Pick the best candidate node from either the top or bottom queue.
1101SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) {
1102  // Schedule as far as possible in the direction of no choice. This is most
1103  // efficient, but also provides the best heuristics for CriticalPSets.
1104  if (SUnit *SU = Bot.pickOnlyChoice()) {
1105    IsTopNode = false;
1106    return SU;
1107  }
1108  if (SUnit *SU = Top.pickOnlyChoice()) {
1109    IsTopNode = true;
1110    return SU;
1111  }
1112  SchedCandidate BotCand;
1113  // Prefer bottom scheduling when heuristics are silent.
1114  CandResult BotResult = pickNodeFromQueue(Bot.Available,
1115                                           DAG->getBotRPTracker(), BotCand);
1116  assert(BotResult != NoCand && "failed to find the first candidate");
1117
1118  // If either Q has a single candidate that provides the least increase in
1119  // Excess pressure, we can immediately schedule from that Q.
1120  //
1121  // RegionCriticalPSets summarizes the pressure within the scheduled region and
1122  // affects picking from either Q. If scheduling in one direction must
1123  // increase pressure for one of the excess PSets, then schedule in that
1124  // direction first to provide more freedom in the other direction.
1125  if (BotResult == SingleExcess || BotResult == SingleCritical) {
1126    IsTopNode = false;
1127    return BotCand.SU;
1128  }
1129  // Check if the top Q has a better candidate.
1130  SchedCandidate TopCand;
1131  CandResult TopResult = pickNodeFromQueue(Top.Available,
1132                                           DAG->getTopRPTracker(), TopCand);
1133  assert(TopResult != NoCand && "failed to find the first candidate");
1134
1135  if (TopResult == SingleExcess || TopResult == SingleCritical) {
1136    IsTopNode = true;
1137    return TopCand.SU;
1138  }
1139  // If either Q has a single candidate that minimizes pressure above the
1140  // original region's pressure pick it.
1141  if (BotResult == SingleMax) {
1142    IsTopNode = false;
1143    return BotCand.SU;
1144  }
1145  if (TopResult == SingleMax) {
1146    IsTopNode = true;
1147    return TopCand.SU;
1148  }
1149  // Check for a salient pressure difference and pick the best from either side.
1150  if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) {
1151    IsTopNode = true;
1152    return TopCand.SU;
1153  }
1154  // Otherwise prefer the bottom candidate in node order.
1155  IsTopNode = false;
1156  return BotCand.SU;
1157}
1158
1159/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
1160SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
1161  if (DAG->top() == DAG->bottom()) {
1162    assert(Top.Available.empty() && Top.Pending.empty() &&
1163           Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
1164    return NULL;
1165  }
1166  SUnit *SU;
1167  if (ForceTopDown) {
1168    SU = Top.pickOnlyChoice();
1169    if (!SU) {
1170      SchedCandidate TopCand;
1171      CandResult TopResult =
1172        pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
1173      assert(TopResult != NoCand && "failed to find the first candidate");
1174      (void)TopResult;
1175      SU = TopCand.SU;
1176    }
1177    IsTopNode = true;
1178  }
1179  else if (ForceBottomUp) {
1180    SU = Bot.pickOnlyChoice();
1181    if (!SU) {
1182      SchedCandidate BotCand;
1183      CandResult BotResult =
1184        pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
1185      assert(BotResult != NoCand && "failed to find the first candidate");
1186      (void)BotResult;
1187      SU = BotCand.SU;
1188    }
1189    IsTopNode = false;
1190  }
1191  else {
1192    SU = pickNodeBidrectional(IsTopNode);
1193  }
1194  if (SU->isTopReady())
1195    Top.removeReady(SU);
1196  if (SU->isBottomReady())
1197    Bot.removeReady(SU);
1198
1199  DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
1200        << " Scheduling Instruction in cycle "
1201        << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
1202        SU->dump(DAG));
1203  return SU;
1204}
1205
1206/// Update the scheduler's state after scheduling a node. This is the same node
1207/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
1208/// it's state based on the current cycle before MachineSchedStrategy.
1209void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
1210  // Update the reservation table.
1211  if (IsTopNode && Top.HazardRec->isEnabled()) {
1212    Top.HazardRec->EmitInstruction(SU);
1213    if (Top.HazardRec->atIssueLimit()) {
1214      DEBUG(dbgs() << "*** Max instrs at cycle " << Top.CurrCycle << '\n');
1215      Top.bumpCycle();
1216    }
1217  }
1218  else if (Bot.HazardRec->isEnabled()) {
1219    if (SU->isCall) {
1220      // Calls are scheduled with their preceding instructions. For bottom-up
1221      // scheduling, clear the pipeline state before emitting.
1222      Bot.HazardRec->Reset();
1223    }
1224    Bot.HazardRec->EmitInstruction(SU);
1225    if (Bot.HazardRec->atIssueLimit()) {
1226      DEBUG(dbgs() << "*** Max instrs at cycle " << Bot.CurrCycle << '\n');
1227      Bot.bumpCycle();
1228    }
1229  }
1230}
1231
1232/// Create the standard converging machine scheduler. This will be used as the
1233/// default scheduler if the target does not set a default.
1234static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
1235  assert((!ForceTopDown || !ForceBottomUp) &&
1236         "-misched-topdown incompatible with -misched-bottomup");
1237  return new ScheduleDAGMI(C, new ConvergingScheduler());
1238}
1239static MachineSchedRegistry
1240ConvergingSchedRegistry("converge", "Standard converging scheduler.",
1241                        createConvergingSched);
1242
1243//===----------------------------------------------------------------------===//
1244// Machine Instruction Shuffler for Correctness Testing
1245//===----------------------------------------------------------------------===//
1246
1247#ifndef NDEBUG
1248namespace {
1249/// Apply a less-than relation on the node order, which corresponds to the
1250/// instruction order prior to scheduling. IsReverse implements greater-than.
1251template<bool IsReverse>
1252struct SUnitOrder {
1253  bool operator()(SUnit *A, SUnit *B) const {
1254    if (IsReverse)
1255      return A->NodeNum > B->NodeNum;
1256    else
1257      return A->NodeNum < B->NodeNum;
1258  }
1259};
1260
1261/// Reorder instructions as much as possible.
1262class InstructionShuffler : public MachineSchedStrategy {
1263  bool IsAlternating;
1264  bool IsTopDown;
1265
1266  // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
1267  // gives nodes with a higher number higher priority causing the latest
1268  // instructions to be scheduled first.
1269  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
1270    TopQ;
1271  // When scheduling bottom-up, use greater-than as the queue priority.
1272  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
1273    BottomQ;
1274public:
1275  InstructionShuffler(bool alternate, bool topdown)
1276    : IsAlternating(alternate), IsTopDown(topdown) {}
1277
1278  virtual void initialize(ScheduleDAGMI *) {
1279    TopQ.clear();
1280    BottomQ.clear();
1281  }
1282
1283  /// Implement MachineSchedStrategy interface.
1284  /// -----------------------------------------
1285
1286  virtual SUnit *pickNode(bool &IsTopNode) {
1287    SUnit *SU;
1288    if (IsTopDown) {
1289      do {
1290        if (TopQ.empty()) return NULL;
1291        SU = TopQ.top();
1292        TopQ.pop();
1293      } while (SU->isScheduled);
1294      IsTopNode = true;
1295    }
1296    else {
1297      do {
1298        if (BottomQ.empty()) return NULL;
1299        SU = BottomQ.top();
1300        BottomQ.pop();
1301      } while (SU->isScheduled);
1302      IsTopNode = false;
1303    }
1304    if (IsAlternating)
1305      IsTopDown = !IsTopDown;
1306    return SU;
1307  }
1308
1309  virtual void schedNode(SUnit *SU, bool IsTopNode) {}
1310
1311  virtual void releaseTopNode(SUnit *SU) {
1312    TopQ.push(SU);
1313  }
1314  virtual void releaseBottomNode(SUnit *SU) {
1315    BottomQ.push(SU);
1316  }
1317};
1318} // namespace
1319
1320static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
1321  bool Alternate = !ForceTopDown && !ForceBottomUp;
1322  bool TopDown = !ForceBottomUp;
1323  assert((TopDown || !ForceTopDown) &&
1324         "-misched-topdown incompatible with -misched-bottomup");
1325  return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
1326}
1327static MachineSchedRegistry ShufflerRegistry(
1328  "shuffle", "Shuffle machine instructions alternating directions",
1329  createInstructionShuffler);
1330#endif // !NDEBUG
1331