MachineScheduler.cpp revision 6dc6a89d73c24f20caabda4cdcd9279e88658d0b
1//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "llvm/CodeGen/MachineScheduler.h"
18#include "llvm/ADT/OwningPtr.h"
19#include "llvm/ADT/PriorityQueue.h"
20#include "llvm/Analysis/AliasAnalysis.h"
21#include "llvm/CodeGen/LiveIntervalAnalysis.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/MachineLoopInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/CodeGen/RegisterClassInfo.h"
27#include "llvm/CodeGen/ScheduleDFS.h"
28#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Support/GraphWriter.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include <queue>
36
37using namespace llvm;
38
39namespace llvm {
40cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
41                           cl::desc("Force top-down list scheduling"));
42cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
43                            cl::desc("Force bottom-up list scheduling"));
44}
45
46#ifndef NDEBUG
47static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
48  cl::desc("Pop up a window to show MISched dags after they are processed"));
49
50static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
51  cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
52#else
53static bool ViewMISchedDAGs = false;
54#endif // NDEBUG
55
56static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
57  cl::desc("Enable cyclic critical path analysis."), cl::init(false));
58
59static cl::opt<bool> EnableLoadCluster("misched-cluster", cl::Hidden,
60  cl::desc("Enable load clustering."), cl::init(true));
61
62// Experimental heuristics
63static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
64  cl::desc("Enable scheduling for macro fusion."), cl::init(true));
65
66static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
67  cl::desc("Verify machine instrs before and after machine scheduling"));
68
69// DAG subtrees must have at least this many nodes.
70static const unsigned MinSubtreeSize = 8;
71
72//===----------------------------------------------------------------------===//
73// Machine Instruction Scheduling Pass and Registry
74//===----------------------------------------------------------------------===//
75
76MachineSchedContext::MachineSchedContext():
77    MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {
78  RegClassInfo = new RegisterClassInfo();
79}
80
81MachineSchedContext::~MachineSchedContext() {
82  delete RegClassInfo;
83}
84
85namespace {
86/// MachineScheduler runs after coalescing and before register allocation.
87class MachineScheduler : public MachineSchedContext,
88                         public MachineFunctionPass {
89public:
90  MachineScheduler();
91
92  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
93
94  virtual void releaseMemory() {}
95
96  virtual bool runOnMachineFunction(MachineFunction&);
97
98  virtual void print(raw_ostream &O, const Module* = 0) const;
99
100  static char ID; // Class identification, replacement for typeinfo
101};
102} // namespace
103
104char MachineScheduler::ID = 0;
105
106char &llvm::MachineSchedulerID = MachineScheduler::ID;
107
108INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
109                      "Machine Instruction Scheduler", false, false)
110INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
111INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
112INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
113INITIALIZE_PASS_END(MachineScheduler, "misched",
114                    "Machine Instruction Scheduler", false, false)
115
116MachineScheduler::MachineScheduler()
117: MachineFunctionPass(ID) {
118  initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
119}
120
121void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
122  AU.setPreservesCFG();
123  AU.addRequiredID(MachineDominatorsID);
124  AU.addRequired<MachineLoopInfo>();
125  AU.addRequired<AliasAnalysis>();
126  AU.addRequired<TargetPassConfig>();
127  AU.addRequired<SlotIndexes>();
128  AU.addPreserved<SlotIndexes>();
129  AU.addRequired<LiveIntervals>();
130  AU.addPreserved<LiveIntervals>();
131  MachineFunctionPass::getAnalysisUsage(AU);
132}
133
134MachinePassRegistry MachineSchedRegistry::Registry;
135
136/// A dummy default scheduler factory indicates whether the scheduler
137/// is overridden on the command line.
138static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
139  return 0;
140}
141
142/// MachineSchedOpt allows command line selection of the scheduler.
143static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
144               RegisterPassParser<MachineSchedRegistry> >
145MachineSchedOpt("misched",
146                cl::init(&useDefaultMachineSched), cl::Hidden,
147                cl::desc("Machine instruction scheduler to use"));
148
149static MachineSchedRegistry
150DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
151                     useDefaultMachineSched);
152
153/// Forward declare the standard machine scheduler. This will be used as the
154/// default scheduler if the target does not set a default.
155static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
156
157
158/// Decrement this iterator until reaching the top or a non-debug instr.
159static MachineBasicBlock::iterator
160priorNonDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator Beg) {
161  assert(I != Beg && "reached the top of the region, cannot decrement");
162  while (--I != Beg) {
163    if (!I->isDebugValue())
164      break;
165  }
166  return I;
167}
168
169/// If this iterator is a debug value, increment until reaching the End or a
170/// non-debug instruction.
171static MachineBasicBlock::iterator
172nextIfDebug(MachineBasicBlock::iterator I, MachineBasicBlock::iterator End) {
173  for(; I != End; ++I) {
174    if (!I->isDebugValue())
175      break;
176  }
177  return I;
178}
179
180/// Top-level MachineScheduler pass driver.
181///
182/// Visit blocks in function order. Divide each block into scheduling regions
183/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
184/// consistent with the DAG builder, which traverses the interior of the
185/// scheduling regions bottom-up.
186///
187/// This design avoids exposing scheduling boundaries to the DAG builder,
188/// simplifying the DAG builder's support for "special" target instructions.
189/// At the same time the design allows target schedulers to operate across
190/// scheduling boundaries, for example to bundle the boudary instructions
191/// without reordering them. This creates complexity, because the target
192/// scheduler must update the RegionBegin and RegionEnd positions cached by
193/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
194/// design would be to split blocks at scheduling boundaries, but LLVM has a
195/// general bias against block splitting purely for implementation simplicity.
196bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
197  DEBUG(dbgs() << "Before MISsched:\n"; mf.print(dbgs()));
198
199  // Initialize the context of the pass.
200  MF = &mf;
201  MLI = &getAnalysis<MachineLoopInfo>();
202  MDT = &getAnalysis<MachineDominatorTree>();
203  PassConfig = &getAnalysis<TargetPassConfig>();
204  AA = &getAnalysis<AliasAnalysis>();
205
206  LIS = &getAnalysis<LiveIntervals>();
207  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
208
209  if (VerifyScheduling) {
210    DEBUG(LIS->dump());
211    MF->verify(this, "Before machine scheduling.");
212  }
213  RegClassInfo->runOnMachineFunction(*MF);
214
215  // Select the scheduler, or set the default.
216  MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
217  if (Ctor == useDefaultMachineSched) {
218    // Get the default scheduler set by the target.
219    Ctor = MachineSchedRegistry::getDefault();
220    if (!Ctor) {
221      Ctor = createConvergingSched;
222      MachineSchedRegistry::setDefault(Ctor);
223    }
224  }
225  // Instantiate the selected scheduler.
226  OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
227
228  // Visit all machine basic blocks.
229  //
230  // TODO: Visit blocks in global postorder or postorder within the bottom-up
231  // loop tree. Then we can optionally compute global RegPressure.
232  for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
233       MBB != MBBEnd; ++MBB) {
234
235    Scheduler->startBlock(MBB);
236
237    // Break the block into scheduling regions [I, RegionEnd), and schedule each
238    // region as soon as it is discovered. RegionEnd points the scheduling
239    // boundary at the bottom of the region. The DAG does not include RegionEnd,
240    // but the region does (i.e. the next RegionEnd is above the previous
241    // RegionBegin). If the current block has no terminator then RegionEnd ==
242    // MBB->end() for the bottom region.
243    //
244    // The Scheduler may insert instructions during either schedule() or
245    // exitRegion(), even for empty regions. So the local iterators 'I' and
246    // 'RegionEnd' are invalid across these calls.
247    unsigned RemainingInstrs = MBB->size();
248    for(MachineBasicBlock::iterator RegionEnd = MBB->end();
249        RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
250
251      // Avoid decrementing RegionEnd for blocks with no terminator.
252      if (RegionEnd != MBB->end()
253          || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
254        --RegionEnd;
255        // Count the boundary instruction.
256        --RemainingInstrs;
257      }
258
259      // The next region starts above the previous region. Look backward in the
260      // instruction stream until we find the nearest boundary.
261      unsigned NumRegionInstrs = 0;
262      MachineBasicBlock::iterator I = RegionEnd;
263      for(;I != MBB->begin(); --I, --RemainingInstrs, ++NumRegionInstrs) {
264        if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
265          break;
266      }
267      // Notify the scheduler of the region, even if we may skip scheduling
268      // it. Perhaps it still needs to be bundled.
269      Scheduler->enterRegion(MBB, I, RegionEnd, NumRegionInstrs);
270
271      // Skip empty scheduling regions (0 or 1 schedulable instructions).
272      if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
273        // Close the current region. Bundle the terminator if needed.
274        // This invalidates 'RegionEnd' and 'I'.
275        Scheduler->exitRegion();
276        continue;
277      }
278      DEBUG(dbgs() << "********** MI Scheduling **********\n");
279      DEBUG(dbgs() << MF->getName()
280            << ":BB#" << MBB->getNumber() << " " << MBB->getName()
281            << "\n  From: " << *I << "    To: ";
282            if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
283            else dbgs() << "End";
284            dbgs() << " RegionInstrs: " << NumRegionInstrs
285            << " Remaining: " << RemainingInstrs << "\n");
286
287      // Schedule a region: possibly reorder instructions.
288      // This invalidates 'RegionEnd' and 'I'.
289      Scheduler->schedule();
290
291      // Close the current region.
292      Scheduler->exitRegion();
293
294      // Scheduling has invalidated the current iterator 'I'. Ask the
295      // scheduler for the top of it's scheduled region.
296      RegionEnd = Scheduler->begin();
297    }
298    assert(RemainingInstrs == 0 && "Instruction count mismatch!");
299    Scheduler->finishBlock();
300  }
301  Scheduler->finalizeSchedule();
302  DEBUG(LIS->dump());
303  if (VerifyScheduling)
304    MF->verify(this, "After machine scheduling.");
305  return true;
306}
307
308void MachineScheduler::print(raw_ostream &O, const Module* m) const {
309  // unimplemented
310}
311
312#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
313void ReadyQueue::dump() {
314  dbgs() << Name << ": ";
315  for (unsigned i = 0, e = Queue.size(); i < e; ++i)
316    dbgs() << Queue[i]->NodeNum << " ";
317  dbgs() << "\n";
318}
319#endif
320
321//===----------------------------------------------------------------------===//
322// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
323// preservation.
324//===----------------------------------------------------------------------===//
325
326ScheduleDAGMI::~ScheduleDAGMI() {
327  delete DFSResult;
328  DeleteContainerPointers(Mutations);
329  delete SchedImpl;
330}
331
332bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
333  return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
334}
335
336bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
337  if (SuccSU != &ExitSU) {
338    // Do not use WillCreateCycle, it assumes SD scheduling.
339    // If Pred is reachable from Succ, then the edge creates a cycle.
340    if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
341      return false;
342    Topo.AddPred(SuccSU, PredDep.getSUnit());
343  }
344  SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
345  // Return true regardless of whether a new edge needed to be inserted.
346  return true;
347}
348
349/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
350/// NumPredsLeft reaches zero, release the successor node.
351///
352/// FIXME: Adjust SuccSU height based on MinLatency.
353void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
354  SUnit *SuccSU = SuccEdge->getSUnit();
355
356  if (SuccEdge->isWeak()) {
357    --SuccSU->WeakPredsLeft;
358    if (SuccEdge->isCluster())
359      NextClusterSucc = SuccSU;
360    return;
361  }
362#ifndef NDEBUG
363  if (SuccSU->NumPredsLeft == 0) {
364    dbgs() << "*** Scheduling failed! ***\n";
365    SuccSU->dump(this);
366    dbgs() << " has been released too many times!\n";
367    llvm_unreachable(0);
368  }
369#endif
370  --SuccSU->NumPredsLeft;
371  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
372    SchedImpl->releaseTopNode(SuccSU);
373}
374
375/// releaseSuccessors - Call releaseSucc on each of SU's successors.
376void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
377  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
378       I != E; ++I) {
379    releaseSucc(SU, &*I);
380  }
381}
382
383/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
384/// NumSuccsLeft reaches zero, release the predecessor node.
385///
386/// FIXME: Adjust PredSU height based on MinLatency.
387void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
388  SUnit *PredSU = PredEdge->getSUnit();
389
390  if (PredEdge->isWeak()) {
391    --PredSU->WeakSuccsLeft;
392    if (PredEdge->isCluster())
393      NextClusterPred = PredSU;
394    return;
395  }
396#ifndef NDEBUG
397  if (PredSU->NumSuccsLeft == 0) {
398    dbgs() << "*** Scheduling failed! ***\n";
399    PredSU->dump(this);
400    dbgs() << " has been released too many times!\n";
401    llvm_unreachable(0);
402  }
403#endif
404  --PredSU->NumSuccsLeft;
405  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
406    SchedImpl->releaseBottomNode(PredSU);
407}
408
409/// releasePredecessors - Call releasePred on each of SU's predecessors.
410void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
411  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
412       I != E; ++I) {
413    releasePred(SU, &*I);
414  }
415}
416
417/// This is normally called from the main scheduler loop but may also be invoked
418/// by the scheduling strategy to perform additional code motion.
419void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
420                                    MachineBasicBlock::iterator InsertPos) {
421  // Advance RegionBegin if the first instruction moves down.
422  if (&*RegionBegin == MI)
423    ++RegionBegin;
424
425  // Update the instruction stream.
426  BB->splice(InsertPos, BB, MI);
427
428  // Update LiveIntervals
429  LIS->handleMove(MI, /*UpdateFlags=*/true);
430
431  // Recede RegionBegin if an instruction moves above the first.
432  if (RegionBegin == InsertPos)
433    RegionBegin = MI;
434}
435
436bool ScheduleDAGMI::checkSchedLimit() {
437#ifndef NDEBUG
438  if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
439    CurrentTop = CurrentBottom;
440    return false;
441  }
442  ++NumInstrsScheduled;
443#endif
444  return true;
445}
446
447/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
448/// crossing a scheduling boundary. [begin, end) includes all instructions in
449/// the region, including the boundary itself and single-instruction regions
450/// that don't get scheduled.
451void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
452                                MachineBasicBlock::iterator begin,
453                                MachineBasicBlock::iterator end,
454                                unsigned regioninstrs)
455{
456  ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
457
458  // For convenience remember the end of the liveness region.
459  LiveRegionEnd =
460    (RegionEnd == bb->end()) ? RegionEnd : llvm::next(RegionEnd);
461}
462
463// Setup the register pressure trackers for the top scheduled top and bottom
464// scheduled regions.
465void ScheduleDAGMI::initRegPressure() {
466  TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
467  BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd);
468
469  // Close the RPTracker to finalize live ins.
470  RPTracker.closeRegion();
471
472  DEBUG(RPTracker.dump());
473
474  // Initialize the live ins and live outs.
475  TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
476  BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
477
478  // Close one end of the tracker so we can call
479  // getMaxUpward/DownwardPressureDelta before advancing across any
480  // instructions. This converts currently live regs into live ins/outs.
481  TopRPTracker.closeTop();
482  BotRPTracker.closeBottom();
483
484  BotRPTracker.initLiveThru(RPTracker);
485  if (!BotRPTracker.getLiveThru().empty()) {
486    TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
487    DEBUG(dbgs() << "Live Thru: ";
488          dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
489  };
490
491  // Account for liveness generated by the region boundary.
492  if (LiveRegionEnd != RegionEnd)
493    BotRPTracker.recede();
494
495  assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
496
497  // Cache the list of excess pressure sets in this region. This will also track
498  // the max pressure in the scheduled code for these sets.
499  RegionCriticalPSets.clear();
500  const std::vector<unsigned> &RegionPressure =
501    RPTracker.getPressure().MaxSetPressure;
502  for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
503    unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
504    if (RegionPressure[i] > Limit) {
505      DEBUG(dbgs() << TRI->getRegPressureSetName(i)
506            << " Limit " << Limit
507            << " Actual " << RegionPressure[i] << "\n");
508      RegionCriticalPSets.push_back(PressureElement(i, 0));
509    }
510  }
511  DEBUG(dbgs() << "Excess PSets: ";
512        for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
513          dbgs() << TRI->getRegPressureSetName(
514            RegionCriticalPSets[i].PSetID) << " ";
515        dbgs() << "\n");
516}
517
518// FIXME: When the pressure tracker deals in pressure differences then we won't
519// iterate over all RegionCriticalPSets[i].
520void ScheduleDAGMI::
521updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure) {
522  for (unsigned i = 0, e = RegionCriticalPSets.size(); i < e; ++i) {
523    unsigned ID = RegionCriticalPSets[i].PSetID;
524    int &MaxUnits = RegionCriticalPSets[i].UnitIncrease;
525    if ((int)NewMaxPressure[ID] > MaxUnits)
526      MaxUnits = NewMaxPressure[ID];
527  }
528  DEBUG(
529    for (unsigned i = 0, e = NewMaxPressure.size(); i < e; ++i) {
530      unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
531      if (NewMaxPressure[i] > Limit ) {
532        dbgs() << "  " << TRI->getRegPressureSetName(i) << ": "
533               << NewMaxPressure[i] << " > " << Limit << "\n";
534      }
535    });
536}
537
538/// schedule - Called back from MachineScheduler::runOnMachineFunction
539/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
540/// only includes instructions that have DAG nodes, not scheduling boundaries.
541///
542/// This is a skeletal driver, with all the functionality pushed into helpers,
543/// so that it can be easilly extended by experimental schedulers. Generally,
544/// implementing MachineSchedStrategy should be sufficient to implement a new
545/// scheduling algorithm. However, if a scheduler further subclasses
546/// ScheduleDAGMI then it will want to override this virtual method in order to
547/// update any specialized state.
548void ScheduleDAGMI::schedule() {
549  buildDAGWithRegPressure();
550
551  Topo.InitDAGTopologicalSorting();
552
553  postprocessDAG();
554
555  SmallVector<SUnit*, 8> TopRoots, BotRoots;
556  findRootsAndBiasEdges(TopRoots, BotRoots);
557
558  // Initialize the strategy before modifying the DAG.
559  // This may initialize a DFSResult to be used for queue priority.
560  SchedImpl->initialize(this);
561
562  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
563          SUnits[su].dumpAll(this));
564  if (ViewMISchedDAGs) viewGraph();
565
566  // Initialize ready queues now that the DAG and priority data are finalized.
567  initQueues(TopRoots, BotRoots);
568
569  bool IsTopNode = false;
570  while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
571    assert(!SU->isScheduled && "Node already scheduled");
572    if (!checkSchedLimit())
573      break;
574
575    scheduleMI(SU, IsTopNode);
576
577    updateQueues(SU, IsTopNode);
578  }
579  assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
580
581  placeDebugValues();
582
583  DEBUG({
584      unsigned BBNum = begin()->getParent()->getNumber();
585      dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
586      dumpSchedule();
587      dbgs() << '\n';
588    });
589}
590
591/// Build the DAG and setup three register pressure trackers.
592void ScheduleDAGMI::buildDAGWithRegPressure() {
593  // Initialize the register pressure tracker used by buildSchedGraph.
594  RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
595                 /*TrackUntiedDefs=*/true);
596
597  // Account for liveness generate by the region boundary.
598  if (LiveRegionEnd != RegionEnd)
599    RPTracker.recede();
600
601  // Build the DAG, and compute current register pressure.
602  buildSchedGraph(AA, &RPTracker);
603
604  // Initialize top/bottom trackers after computing region pressure.
605  initRegPressure();
606}
607
608/// Apply each ScheduleDAGMutation step in order.
609void ScheduleDAGMI::postprocessDAG() {
610  for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
611    Mutations[i]->apply(this);
612  }
613}
614
615void ScheduleDAGMI::computeDFSResult() {
616  if (!DFSResult)
617    DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
618  DFSResult->clear();
619  ScheduledTrees.clear();
620  DFSResult->resize(SUnits.size());
621  DFSResult->compute(SUnits);
622  ScheduledTrees.resize(DFSResult->getNumSubtrees());
623}
624
625void ScheduleDAGMI::findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
626                                          SmallVectorImpl<SUnit*> &BotRoots) {
627  for (std::vector<SUnit>::iterator
628         I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
629    SUnit *SU = &(*I);
630    assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
631
632    // Order predecessors so DFSResult follows the critical path.
633    SU->biasCriticalPath();
634
635    // A SUnit is ready to top schedule if it has no predecessors.
636    if (!I->NumPredsLeft)
637      TopRoots.push_back(SU);
638    // A SUnit is ready to bottom schedule if it has no successors.
639    if (!I->NumSuccsLeft)
640      BotRoots.push_back(SU);
641  }
642  ExitSU.biasCriticalPath();
643}
644
645/// Compute the max cyclic critical path through the DAG. The scheduling DAG
646/// only provides the critical path for single block loops. To handle loops that
647/// span blocks, we could use the vreg path latencies provided by
648/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
649/// available for use in the scheduler.
650///
651/// The cyclic path estimation identifies a def-use pair that crosses the back
652/// edge and considers the depth and height of the nodes. For example, consider
653/// the following instruction sequence where each instruction has unit latency
654/// and defines an epomymous virtual register:
655///
656/// a->b(a,c)->c(b)->d(c)->exit
657///
658/// The cyclic critical path is a two cycles: b->c->b
659/// The acyclic critical path is four cycles: a->b->c->d->exit
660/// LiveOutHeight = height(c) = len(c->d->exit) = 2
661/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
662/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
663/// LiveInDepth = depth(b) = len(a->b) = 1
664///
665/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
666/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
667/// CyclicCriticalPath = min(2, 2) = 2
668unsigned ScheduleDAGMI::computeCyclicCriticalPath() {
669  // This only applies to single block loop.
670  if (!BB->isSuccessor(BB))
671    return 0;
672
673  unsigned MaxCyclicLatency = 0;
674  // Visit each live out vreg def to find def/use pairs that cross iterations.
675  ArrayRef<unsigned> LiveOuts = RPTracker.getPressure().LiveOutRegs;
676  for (ArrayRef<unsigned>::iterator RI = LiveOuts.begin(), RE = LiveOuts.end();
677       RI != RE; ++RI) {
678    unsigned Reg = *RI;
679    if (!TRI->isVirtualRegister(Reg))
680        continue;
681    const LiveInterval &LI = LIS->getInterval(Reg);
682    const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
683    if (!DefVNI)
684      continue;
685
686    MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
687    const SUnit *DefSU = getSUnit(DefMI);
688    if (!DefSU)
689      continue;
690
691    unsigned LiveOutHeight = DefSU->getHeight();
692    unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
693    // Visit all local users of the vreg def.
694    for (VReg2UseMap::iterator
695           UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) {
696      if (UI->SU == &ExitSU)
697        continue;
698
699      // Only consider uses of the phi.
700      LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(UI->SU->getInstr()));
701      if (!LRQ.valueIn()->isPHIDef())
702        continue;
703
704      // Assume that a path spanning two iterations is a cycle, which could
705      // overestimate in strange cases. This allows cyclic latency to be
706      // estimated as the minimum slack of the vreg's depth or height.
707      unsigned CyclicLatency = 0;
708      if (LiveOutDepth > UI->SU->getDepth())
709        CyclicLatency = LiveOutDepth - UI->SU->getDepth();
710
711      unsigned LiveInHeight = UI->SU->getHeight() + DefSU->Latency;
712      if (LiveInHeight > LiveOutHeight) {
713        if (LiveInHeight - LiveOutHeight < CyclicLatency)
714          CyclicLatency = LiveInHeight - LiveOutHeight;
715      }
716      else
717        CyclicLatency = 0;
718
719      DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
720            << UI->SU->NodeNum << ") = " << CyclicLatency << "c\n");
721      if (CyclicLatency > MaxCyclicLatency)
722        MaxCyclicLatency = CyclicLatency;
723    }
724  }
725  DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
726  return MaxCyclicLatency;
727}
728
729/// Identify DAG roots and setup scheduler queues.
730void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
731                               ArrayRef<SUnit*> BotRoots) {
732  NextClusterSucc = NULL;
733  NextClusterPred = NULL;
734
735  // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
736  //
737  // Nodes with unreleased weak edges can still be roots.
738  // Release top roots in forward order.
739  for (SmallVectorImpl<SUnit*>::const_iterator
740         I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
741    SchedImpl->releaseTopNode(*I);
742  }
743  // Release bottom roots in reverse order so the higher priority nodes appear
744  // first. This is more natural and slightly more efficient.
745  for (SmallVectorImpl<SUnit*>::const_reverse_iterator
746         I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
747    SchedImpl->releaseBottomNode(*I);
748  }
749
750  releaseSuccessors(&EntrySU);
751  releasePredecessors(&ExitSU);
752
753  SchedImpl->registerRoots();
754
755  // Advance past initial DebugValues.
756  assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
757  CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
758  TopRPTracker.setPos(CurrentTop);
759
760  CurrentBottom = RegionEnd;
761}
762
763/// Move an instruction and update register pressure.
764void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) {
765  // Move the instruction to its new location in the instruction stream.
766  MachineInstr *MI = SU->getInstr();
767
768  if (IsTopNode) {
769    assert(SU->isTopReady() && "node still has unscheduled dependencies");
770    if (&*CurrentTop == MI)
771      CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
772    else {
773      moveInstruction(MI, CurrentTop);
774      TopRPTracker.setPos(MI);
775    }
776
777    // Update top scheduled pressure.
778    TopRPTracker.advance();
779    assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
780    updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure);
781  }
782  else {
783    assert(SU->isBottomReady() && "node still has unscheduled dependencies");
784    MachineBasicBlock::iterator priorII =
785      priorNonDebug(CurrentBottom, CurrentTop);
786    if (&*priorII == MI)
787      CurrentBottom = priorII;
788    else {
789      if (&*CurrentTop == MI) {
790        CurrentTop = nextIfDebug(++CurrentTop, priorII);
791        TopRPTracker.setPos(CurrentTop);
792      }
793      moveInstruction(MI, CurrentBottom);
794      CurrentBottom = MI;
795    }
796    // Update bottom scheduled pressure.
797    BotRPTracker.recede();
798    assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
799    updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure);
800  }
801}
802
803/// Update scheduler queues after scheduling an instruction.
804void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
805  // Release dependent instructions for scheduling.
806  if (IsTopNode)
807    releaseSuccessors(SU);
808  else
809    releasePredecessors(SU);
810
811  SU->isScheduled = true;
812
813  if (DFSResult) {
814    unsigned SubtreeID = DFSResult->getSubtreeID(SU);
815    if (!ScheduledTrees.test(SubtreeID)) {
816      ScheduledTrees.set(SubtreeID);
817      DFSResult->scheduleTree(SubtreeID);
818      SchedImpl->scheduleTree(SubtreeID);
819    }
820  }
821
822  // Notify the scheduling strategy after updating the DAG.
823  SchedImpl->schedNode(SU, IsTopNode);
824}
825
826/// Reinsert any remaining debug_values, just like the PostRA scheduler.
827void ScheduleDAGMI::placeDebugValues() {
828  // If first instruction was a DBG_VALUE then put it back.
829  if (FirstDbgValue) {
830    BB->splice(RegionBegin, BB, FirstDbgValue);
831    RegionBegin = FirstDbgValue;
832  }
833
834  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
835         DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
836    std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
837    MachineInstr *DbgValue = P.first;
838    MachineBasicBlock::iterator OrigPrevMI = P.second;
839    if (&*RegionBegin == DbgValue)
840      ++RegionBegin;
841    BB->splice(++OrigPrevMI, BB, DbgValue);
842    if (OrigPrevMI == llvm::prior(RegionEnd))
843      RegionEnd = DbgValue;
844  }
845  DbgValues.clear();
846  FirstDbgValue = NULL;
847}
848
849#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
850void ScheduleDAGMI::dumpSchedule() const {
851  for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
852    if (SUnit *SU = getSUnit(&(*MI)))
853      SU->dump(this);
854    else
855      dbgs() << "Missing SUnit\n";
856  }
857}
858#endif
859
860//===----------------------------------------------------------------------===//
861// LoadClusterMutation - DAG post-processing to cluster loads.
862//===----------------------------------------------------------------------===//
863
864namespace {
865/// \brief Post-process the DAG to create cluster edges between neighboring
866/// loads.
867class LoadClusterMutation : public ScheduleDAGMutation {
868  struct LoadInfo {
869    SUnit *SU;
870    unsigned BaseReg;
871    unsigned Offset;
872    LoadInfo(SUnit *su, unsigned reg, unsigned ofs)
873      : SU(su), BaseReg(reg), Offset(ofs) {}
874  };
875  static bool LoadInfoLess(const LoadClusterMutation::LoadInfo &LHS,
876                           const LoadClusterMutation::LoadInfo &RHS);
877
878  const TargetInstrInfo *TII;
879  const TargetRegisterInfo *TRI;
880public:
881  LoadClusterMutation(const TargetInstrInfo *tii,
882                      const TargetRegisterInfo *tri)
883    : TII(tii), TRI(tri) {}
884
885  virtual void apply(ScheduleDAGMI *DAG);
886protected:
887  void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
888};
889} // anonymous
890
891bool LoadClusterMutation::LoadInfoLess(
892  const LoadClusterMutation::LoadInfo &LHS,
893  const LoadClusterMutation::LoadInfo &RHS) {
894  if (LHS.BaseReg != RHS.BaseReg)
895    return LHS.BaseReg < RHS.BaseReg;
896  return LHS.Offset < RHS.Offset;
897}
898
899void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
900                                                  ScheduleDAGMI *DAG) {
901  SmallVector<LoadClusterMutation::LoadInfo,32> LoadRecords;
902  for (unsigned Idx = 0, End = Loads.size(); Idx != End; ++Idx) {
903    SUnit *SU = Loads[Idx];
904    unsigned BaseReg;
905    unsigned Offset;
906    if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
907      LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
908  }
909  if (LoadRecords.size() < 2)
910    return;
911  std::sort(LoadRecords.begin(), LoadRecords.end(), LoadInfoLess);
912  unsigned ClusterLength = 1;
913  for (unsigned Idx = 0, End = LoadRecords.size(); Idx < (End - 1); ++Idx) {
914    if (LoadRecords[Idx].BaseReg != LoadRecords[Idx+1].BaseReg) {
915      ClusterLength = 1;
916      continue;
917    }
918
919    SUnit *SUa = LoadRecords[Idx].SU;
920    SUnit *SUb = LoadRecords[Idx+1].SU;
921    if (TII->shouldClusterLoads(SUa->getInstr(), SUb->getInstr(), ClusterLength)
922        && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
923
924      DEBUG(dbgs() << "Cluster loads SU(" << SUa->NodeNum << ") - SU("
925            << SUb->NodeNum << ")\n");
926      // Copy successor edges from SUa to SUb. Interleaving computation
927      // dependent on SUa can prevent load combining due to register reuse.
928      // Predecessor edges do not need to be copied from SUb to SUa since nearby
929      // loads should have effectively the same inputs.
930      for (SUnit::const_succ_iterator
931             SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
932        if (SI->getSUnit() == SUb)
933          continue;
934        DEBUG(dbgs() << "  Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
935        DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
936      }
937      ++ClusterLength;
938    }
939    else
940      ClusterLength = 1;
941  }
942}
943
944/// \brief Callback from DAG postProcessing to create cluster edges for loads.
945void LoadClusterMutation::apply(ScheduleDAGMI *DAG) {
946  // Map DAG NodeNum to store chain ID.
947  DenseMap<unsigned, unsigned> StoreChainIDs;
948  // Map each store chain to a set of dependent loads.
949  SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
950  for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
951    SUnit *SU = &DAG->SUnits[Idx];
952    if (!SU->getInstr()->mayLoad())
953      continue;
954    unsigned ChainPredID = DAG->SUnits.size();
955    for (SUnit::const_pred_iterator
956           PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
957      if (PI->isCtrl()) {
958        ChainPredID = PI->getSUnit()->NodeNum;
959        break;
960      }
961    }
962    // Check if this chain-like pred has been seen
963    // before. ChainPredID==MaxNodeID for loads at the top of the schedule.
964    unsigned NumChains = StoreChainDependents.size();
965    std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
966      StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
967    if (Result.second)
968      StoreChainDependents.resize(NumChains + 1);
969    StoreChainDependents[Result.first->second].push_back(SU);
970  }
971  // Iterate over the store chains.
972  for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
973    clusterNeighboringLoads(StoreChainDependents[Idx], DAG);
974}
975
976//===----------------------------------------------------------------------===//
977// MacroFusion - DAG post-processing to encourage fusion of macro ops.
978//===----------------------------------------------------------------------===//
979
980namespace {
981/// \brief Post-process the DAG to create cluster edges between instructions
982/// that may be fused by the processor into a single operation.
983class MacroFusion : public ScheduleDAGMutation {
984  const TargetInstrInfo *TII;
985public:
986  MacroFusion(const TargetInstrInfo *tii): TII(tii) {}
987
988  virtual void apply(ScheduleDAGMI *DAG);
989};
990} // anonymous
991
992/// \brief Callback from DAG postProcessing to create cluster edges to encourage
993/// fused operations.
994void MacroFusion::apply(ScheduleDAGMI *DAG) {
995  // For now, assume targets can only fuse with the branch.
996  MachineInstr *Branch = DAG->ExitSU.getInstr();
997  if (!Branch)
998    return;
999
1000  for (unsigned Idx = DAG->SUnits.size(); Idx > 0;) {
1001    SUnit *SU = &DAG->SUnits[--Idx];
1002    if (!TII->shouldScheduleAdjacent(SU->getInstr(), Branch))
1003      continue;
1004
1005    // Create a single weak edge from SU to ExitSU. The only effect is to cause
1006    // bottom-up scheduling to heavily prioritize the clustered SU.  There is no
1007    // need to copy predecessor edges from ExitSU to SU, since top-down
1008    // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1009    // of SU, we could create an artificial edge from the deepest root, but it
1010    // hasn't been needed yet.
1011    bool Success = DAG->addEdge(&DAG->ExitSU, SDep(SU, SDep::Cluster));
1012    (void)Success;
1013    assert(Success && "No DAG nodes should be reachable from ExitSU");
1014
1015    DEBUG(dbgs() << "Macro Fuse SU(" << SU->NodeNum << ")\n");
1016    break;
1017  }
1018}
1019
1020//===----------------------------------------------------------------------===//
1021// CopyConstrain - DAG post-processing to encourage copy elimination.
1022//===----------------------------------------------------------------------===//
1023
1024namespace {
1025/// \brief Post-process the DAG to create weak edges from all uses of a copy to
1026/// the one use that defines the copy's source vreg, most likely an induction
1027/// variable increment.
1028class CopyConstrain : public ScheduleDAGMutation {
1029  // Transient state.
1030  SlotIndex RegionBeginIdx;
1031  // RegionEndIdx is the slot index of the last non-debug instruction in the
1032  // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1033  SlotIndex RegionEndIdx;
1034public:
1035  CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1036
1037  virtual void apply(ScheduleDAGMI *DAG);
1038
1039protected:
1040  void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG);
1041};
1042} // anonymous
1043
1044/// constrainLocalCopy handles two possibilities:
1045/// 1) Local src:
1046/// I0:     = dst
1047/// I1: src = ...
1048/// I2:     = dst
1049/// I3: dst = src (copy)
1050/// (create pred->succ edges I0->I1, I2->I1)
1051///
1052/// 2) Local copy:
1053/// I0: dst = src (copy)
1054/// I1:     = dst
1055/// I2: src = ...
1056/// I3:     = dst
1057/// (create pred->succ edges I1->I2, I3->I2)
1058///
1059/// Although the MachineScheduler is currently constrained to single blocks,
1060/// this algorithm should handle extended blocks. An EBB is a set of
1061/// contiguously numbered blocks such that the previous block in the EBB is
1062/// always the single predecessor.
1063void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMI *DAG) {
1064  LiveIntervals *LIS = DAG->getLIS();
1065  MachineInstr *Copy = CopySU->getInstr();
1066
1067  // Check for pure vreg copies.
1068  unsigned SrcReg = Copy->getOperand(1).getReg();
1069  if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
1070    return;
1071
1072  unsigned DstReg = Copy->getOperand(0).getReg();
1073  if (!TargetRegisterInfo::isVirtualRegister(DstReg))
1074    return;
1075
1076  // Check if either the dest or source is local. If it's live across a back
1077  // edge, it's not local. Note that if both vregs are live across the back
1078  // edge, we cannot successfully contrain the copy without cyclic scheduling.
1079  unsigned LocalReg = DstReg;
1080  unsigned GlobalReg = SrcReg;
1081  LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1082  if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1083    LocalReg = SrcReg;
1084    GlobalReg = DstReg;
1085    LocalLI = &LIS->getInterval(LocalReg);
1086    if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1087      return;
1088  }
1089  LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1090
1091  // Find the global segment after the start of the local LI.
1092  LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1093  // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1094  // local live range. We could create edges from other global uses to the local
1095  // start, but the coalescer should have already eliminated these cases, so
1096  // don't bother dealing with it.
1097  if (GlobalSegment == GlobalLI->end())
1098    return;
1099
1100  // If GlobalSegment is killed at the LocalLI->start, the call to find()
1101  // returned the next global segment. But if GlobalSegment overlaps with
1102  // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1103  // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1104  if (GlobalSegment->contains(LocalLI->beginIndex()))
1105    ++GlobalSegment;
1106
1107  if (GlobalSegment == GlobalLI->end())
1108    return;
1109
1110  // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1111  if (GlobalSegment != GlobalLI->begin()) {
1112    // Two address defs have no hole.
1113    if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->end,
1114                               GlobalSegment->start)) {
1115      return;
1116    }
1117    // If the prior global segment may be defined by the same two-address
1118    // instruction that also defines LocalLI, then can't make a hole here.
1119    if (SlotIndex::isSameInstr(llvm::prior(GlobalSegment)->start,
1120                               LocalLI->beginIndex())) {
1121      return;
1122    }
1123    // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1124    // it would be a disconnected component in the live range.
1125    assert(llvm::prior(GlobalSegment)->start < LocalLI->beginIndex() &&
1126           "Disconnected LRG within the scheduling region.");
1127  }
1128  MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1129  if (!GlobalDef)
1130    return;
1131
1132  SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1133  if (!GlobalSU)
1134    return;
1135
1136  // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1137  // constraining the uses of the last local def to precede GlobalDef.
1138  SmallVector<SUnit*,8> LocalUses;
1139  const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1140  MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1141  SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1142  for (SUnit::const_succ_iterator
1143         I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1144       I != E; ++I) {
1145    if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1146      continue;
1147    if (I->getSUnit() == GlobalSU)
1148      continue;
1149    if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1150      return;
1151    LocalUses.push_back(I->getSUnit());
1152  }
1153  // Open the top of the GlobalLI hole by constraining any earlier global uses
1154  // to precede the start of LocalLI.
1155  SmallVector<SUnit*,8> GlobalUses;
1156  MachineInstr *FirstLocalDef =
1157    LIS->getInstructionFromIndex(LocalLI->beginIndex());
1158  SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1159  for (SUnit::const_pred_iterator
1160         I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1161    if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1162      continue;
1163    if (I->getSUnit() == FirstLocalSU)
1164      continue;
1165    if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1166      return;
1167    GlobalUses.push_back(I->getSUnit());
1168  }
1169  DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1170  // Add the weak edges.
1171  for (SmallVectorImpl<SUnit*>::const_iterator
1172         I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1173    DEBUG(dbgs() << "  Local use SU(" << (*I)->NodeNum << ") -> SU("
1174          << GlobalSU->NodeNum << ")\n");
1175    DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1176  }
1177  for (SmallVectorImpl<SUnit*>::const_iterator
1178         I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1179    DEBUG(dbgs() << "  Global use SU(" << (*I)->NodeNum << ") -> SU("
1180          << FirstLocalSU->NodeNum << ")\n");
1181    DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1182  }
1183}
1184
1185/// \brief Callback from DAG postProcessing to create weak edges to encourage
1186/// copy elimination.
1187void CopyConstrain::apply(ScheduleDAGMI *DAG) {
1188  MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1189  if (FirstPos == DAG->end())
1190    return;
1191  RegionBeginIdx = DAG->getLIS()->getInstructionIndex(&*FirstPos);
1192  RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1193    &*priorNonDebug(DAG->end(), DAG->begin()));
1194
1195  for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1196    SUnit *SU = &DAG->SUnits[Idx];
1197    if (!SU->getInstr()->isCopy())
1198      continue;
1199
1200    constrainLocalCopy(SU, DAG);
1201  }
1202}
1203
1204//===----------------------------------------------------------------------===//
1205// ConvergingScheduler - Implementation of the generic MachineSchedStrategy.
1206//===----------------------------------------------------------------------===//
1207
1208namespace {
1209/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
1210/// the schedule.
1211class ConvergingScheduler : public MachineSchedStrategy {
1212public:
1213  /// Represent the type of SchedCandidate found within a single queue.
1214  /// pickNodeBidirectional depends on these listed by decreasing priority.
1215  enum CandReason {
1216    NoCand, PhysRegCopy, RegExcess, RegCritical, Cluster, Weak, RegMax,
1217    ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
1218    TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
1219
1220#ifndef NDEBUG
1221  static const char *getReasonStr(ConvergingScheduler::CandReason Reason);
1222#endif
1223
1224  /// Policy for scheduling the next instruction in the candidate's zone.
1225  struct CandPolicy {
1226    bool ReduceLatency;
1227    unsigned ReduceResIdx;
1228    unsigned DemandResIdx;
1229
1230    CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
1231  };
1232
1233  /// Status of an instruction's critical resource consumption.
1234  struct SchedResourceDelta {
1235    // Count critical resources in the scheduled region required by SU.
1236    unsigned CritResources;
1237
1238    // Count critical resources from another region consumed by SU.
1239    unsigned DemandedResources;
1240
1241    SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
1242
1243    bool operator==(const SchedResourceDelta &RHS) const {
1244      return CritResources == RHS.CritResources
1245        && DemandedResources == RHS.DemandedResources;
1246    }
1247    bool operator!=(const SchedResourceDelta &RHS) const {
1248      return !operator==(RHS);
1249    }
1250  };
1251
1252  /// Store the state used by ConvergingScheduler heuristics, required for the
1253  /// lifetime of one invocation of pickNode().
1254  struct SchedCandidate {
1255    CandPolicy Policy;
1256
1257    // The best SUnit candidate.
1258    SUnit *SU;
1259
1260    // The reason for this candidate.
1261    CandReason Reason;
1262
1263    // Set of reasons that apply to multiple candidates.
1264    uint32_t RepeatReasonSet;
1265
1266    // Register pressure values for the best candidate.
1267    RegPressureDelta RPDelta;
1268
1269    // Critical resource consumption of the best candidate.
1270    SchedResourceDelta ResDelta;
1271
1272    SchedCandidate(const CandPolicy &policy)
1273      : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
1274
1275    bool isValid() const { return SU; }
1276
1277    // Copy the status of another candidate without changing policy.
1278    void setBest(SchedCandidate &Best) {
1279      assert(Best.Reason != NoCand && "uninitialized Sched candidate");
1280      SU = Best.SU;
1281      Reason = Best.Reason;
1282      RPDelta = Best.RPDelta;
1283      ResDelta = Best.ResDelta;
1284    }
1285
1286    bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
1287    void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
1288
1289    void initResourceDelta(const ScheduleDAGMI *DAG,
1290                           const TargetSchedModel *SchedModel);
1291  };
1292
1293  /// Summarize the unscheduled region.
1294  struct SchedRemainder {
1295    // Critical path through the DAG in expected latency.
1296    unsigned CriticalPath;
1297    unsigned CyclicCritPath;
1298
1299    // Scaled count of micro-ops left to schedule.
1300    unsigned RemIssueCount;
1301
1302    bool IsAcyclicLatencyLimited;
1303
1304    // Unscheduled resources
1305    SmallVector<unsigned, 16> RemainingCounts;
1306
1307    void reset() {
1308      CriticalPath = 0;
1309      CyclicCritPath = 0;
1310      RemIssueCount = 0;
1311      IsAcyclicLatencyLimited = false;
1312      RemainingCounts.clear();
1313    }
1314
1315    SchedRemainder() { reset(); }
1316
1317    void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
1318  };
1319
1320  /// Each Scheduling boundary is associated with ready queues. It tracks the
1321  /// current cycle in the direction of movement, and maintains the state
1322  /// of "hazards" and other interlocks at the current cycle.
1323  struct SchedBoundary {
1324    ScheduleDAGMI *DAG;
1325    const TargetSchedModel *SchedModel;
1326    SchedRemainder *Rem;
1327
1328    ReadyQueue Available;
1329    ReadyQueue Pending;
1330    bool CheckPending;
1331
1332    // For heuristics, keep a list of the nodes that immediately depend on the
1333    // most recently scheduled node.
1334    SmallPtrSet<const SUnit*, 8> NextSUs;
1335
1336    ScheduleHazardRecognizer *HazardRec;
1337
1338    /// Number of cycles it takes to issue the instructions scheduled in this
1339    /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
1340    /// See getStalls().
1341    unsigned CurrCycle;
1342
1343    /// Micro-ops issued in the current cycle
1344    unsigned CurrMOps;
1345
1346    /// MinReadyCycle - Cycle of the soonest available instruction.
1347    unsigned MinReadyCycle;
1348
1349    // The expected latency of the critical path in this scheduled zone.
1350    unsigned ExpectedLatency;
1351
1352    // The latency of dependence chains leading into this zone.
1353    // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
1354    // For each cycle scheduled: DLat -= 1.
1355    unsigned DependentLatency;
1356
1357    /// Count the scheduled (issued) micro-ops that can be retired by
1358    /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
1359    unsigned RetiredMOps;
1360
1361    // Count scheduled resources that have been executed. Resources are
1362    // considered executed if they become ready in the time that it takes to
1363    // saturate any resource including the one in question. Counts are scaled
1364    // for direct comparison with other resources. Counts can be compared with
1365    // MOps * getMicroOpFactor and Latency * getLatencyFactor.
1366    SmallVector<unsigned, 16> ExecutedResCounts;
1367
1368    /// Cache the max count for a single resource.
1369    unsigned MaxExecutedResCount;
1370
1371    // Cache the critical resources ID in this scheduled zone.
1372    unsigned ZoneCritResIdx;
1373
1374    // Is the scheduled region resource limited vs. latency limited.
1375    bool IsResourceLimited;
1376
1377#ifndef NDEBUG
1378    // Remember the greatest operand latency as an upper bound on the number of
1379    // times we should retry the pending queue because of a hazard.
1380    unsigned MaxObservedLatency;
1381#endif
1382
1383    void reset() {
1384      // A new HazardRec is created for each DAG and owned by SchedBoundary.
1385      delete HazardRec;
1386
1387      Available.clear();
1388      Pending.clear();
1389      CheckPending = false;
1390      NextSUs.clear();
1391      HazardRec = 0;
1392      CurrCycle = 0;
1393      CurrMOps = 0;
1394      MinReadyCycle = UINT_MAX;
1395      ExpectedLatency = 0;
1396      DependentLatency = 0;
1397      RetiredMOps = 0;
1398      MaxExecutedResCount = 0;
1399      ZoneCritResIdx = 0;
1400      IsResourceLimited = false;
1401#ifndef NDEBUG
1402      MaxObservedLatency = 0;
1403#endif
1404      // Reserve a zero-count for invalid CritResIdx.
1405      ExecutedResCounts.resize(1);
1406      assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1407    }
1408
1409    /// Pending queues extend the ready queues with the same ID and the
1410    /// PendingFlag set.
1411    SchedBoundary(unsigned ID, const Twine &Name):
1412      DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
1413      Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"),
1414      HazardRec(0) {
1415      reset();
1416    }
1417
1418    ~SchedBoundary() { delete HazardRec; }
1419
1420    void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
1421              SchedRemainder *rem);
1422
1423    bool isTop() const {
1424      return Available.getID() == ConvergingScheduler::TopQID;
1425    }
1426
1427#ifndef NDEBUG
1428    const char *getResourceName(unsigned PIdx) {
1429      if (!PIdx)
1430        return "MOps";
1431      return SchedModel->getProcResource(PIdx)->Name;
1432    }
1433#endif
1434
1435    /// Get the number of latency cycles "covered" by the scheduled
1436    /// instructions. This is the larger of the critical path within the zone
1437    /// and the number of cycles required to issue the instructions.
1438    unsigned getScheduledLatency() const {
1439      return std::max(ExpectedLatency, CurrCycle);
1440    }
1441
1442    unsigned getUnscheduledLatency(SUnit *SU) const {
1443      return isTop() ? SU->getHeight() : SU->getDepth();
1444    }
1445
1446    unsigned getResourceCount(unsigned ResIdx) const {
1447      return ExecutedResCounts[ResIdx];
1448    }
1449
1450    /// Get the scaled count of scheduled micro-ops and resources, including
1451    /// executed resources.
1452    unsigned getCriticalCount() const {
1453      if (!ZoneCritResIdx)
1454        return RetiredMOps * SchedModel->getMicroOpFactor();
1455      return getResourceCount(ZoneCritResIdx);
1456    }
1457
1458    /// Get a scaled count for the minimum execution time of the scheduled
1459    /// micro-ops that are ready to execute by getExecutedCount. Notice the
1460    /// feedback loop.
1461    unsigned getExecutedCount() const {
1462      return std::max(CurrCycle * SchedModel->getLatencyFactor(),
1463                      MaxExecutedResCount);
1464    }
1465
1466    bool checkHazard(SUnit *SU);
1467
1468    unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
1469
1470    unsigned getOtherResourceCount(unsigned &OtherCritIdx);
1471
1472    void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone);
1473
1474    void releaseNode(SUnit *SU, unsigned ReadyCycle);
1475
1476    void bumpCycle(unsigned NextCycle);
1477
1478    void incExecutedResources(unsigned PIdx, unsigned Count);
1479
1480    unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
1481
1482    void bumpNode(SUnit *SU);
1483
1484    void releasePending();
1485
1486    void removeReady(SUnit *SU);
1487
1488    SUnit *pickOnlyChoice();
1489
1490#ifndef NDEBUG
1491    void dumpScheduledState();
1492#endif
1493  };
1494
1495private:
1496  ScheduleDAGMI *DAG;
1497  const TargetSchedModel *SchedModel;
1498  const TargetRegisterInfo *TRI;
1499
1500  // State of the top and bottom scheduled instruction boundaries.
1501  SchedRemainder Rem;
1502  SchedBoundary Top;
1503  SchedBoundary Bot;
1504
1505public:
1506  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
1507  enum {
1508    TopQID = 1,
1509    BotQID = 2,
1510    LogMaxQID = 2
1511  };
1512
1513  ConvergingScheduler():
1514    DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
1515
1516  virtual void initialize(ScheduleDAGMI *dag);
1517
1518  virtual SUnit *pickNode(bool &IsTopNode);
1519
1520  virtual void schedNode(SUnit *SU, bool IsTopNode);
1521
1522  virtual void releaseTopNode(SUnit *SU);
1523
1524  virtual void releaseBottomNode(SUnit *SU);
1525
1526  virtual void registerRoots();
1527
1528protected:
1529  void checkAcyclicLatency();
1530
1531  void tryCandidate(SchedCandidate &Cand,
1532                    SchedCandidate &TryCand,
1533                    SchedBoundary &Zone,
1534                    const RegPressureTracker &RPTracker,
1535                    RegPressureTracker &TempTracker);
1536
1537  SUnit *pickNodeBidirectional(bool &IsTopNode);
1538
1539  void pickNodeFromQueue(SchedBoundary &Zone,
1540                         const RegPressureTracker &RPTracker,
1541                         SchedCandidate &Candidate);
1542
1543  void reschedulePhysRegCopies(SUnit *SU, bool isTop);
1544
1545#ifndef NDEBUG
1546  void traceCandidate(const SchedCandidate &Cand);
1547#endif
1548};
1549} // namespace
1550
1551void ConvergingScheduler::SchedRemainder::
1552init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1553  reset();
1554  if (!SchedModel->hasInstrSchedModel())
1555    return;
1556  RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1557  for (std::vector<SUnit>::iterator
1558         I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1559    const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
1560    RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1561      * SchedModel->getMicroOpFactor();
1562    for (TargetSchedModel::ProcResIter
1563           PI = SchedModel->getWriteProcResBegin(SC),
1564           PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1565      unsigned PIdx = PI->ProcResourceIdx;
1566      unsigned Factor = SchedModel->getResourceFactor(PIdx);
1567      RemainingCounts[PIdx] += (Factor * PI->Cycles);
1568    }
1569  }
1570}
1571
1572void ConvergingScheduler::SchedBoundary::
1573init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1574  reset();
1575  DAG = dag;
1576  SchedModel = smodel;
1577  Rem = rem;
1578  if (SchedModel->hasInstrSchedModel())
1579    ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1580}
1581
1582void ConvergingScheduler::initialize(ScheduleDAGMI *dag) {
1583  DAG = dag;
1584  SchedModel = DAG->getSchedModel();
1585  TRI = DAG->TRI;
1586
1587  Rem.init(DAG, SchedModel);
1588  Top.init(DAG, SchedModel, &Rem);
1589  Bot.init(DAG, SchedModel, &Rem);
1590
1591  // Initialize resource counts.
1592
1593  // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
1594  // are disabled, then these HazardRecs will be disabled.
1595  const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
1596  const TargetMachine &TM = DAG->MF.getTarget();
1597  Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1598  Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
1599
1600  assert((!ForceTopDown || !ForceBottomUp) &&
1601         "-misched-topdown incompatible with -misched-bottomup");
1602}
1603
1604void ConvergingScheduler::releaseTopNode(SUnit *SU) {
1605  if (SU->isScheduled)
1606    return;
1607
1608  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1609       I != E; ++I) {
1610    if (I->isWeak())
1611      continue;
1612    unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
1613    unsigned Latency = I->getLatency();
1614#ifndef NDEBUG
1615    Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
1616#endif
1617    if (SU->TopReadyCycle < PredReadyCycle + Latency)
1618      SU->TopReadyCycle = PredReadyCycle + Latency;
1619  }
1620  Top.releaseNode(SU, SU->TopReadyCycle);
1621}
1622
1623void ConvergingScheduler::releaseBottomNode(SUnit *SU) {
1624  if (SU->isScheduled)
1625    return;
1626
1627  assert(SU->getInstr() && "Scheduled SUnit must have instr");
1628
1629  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1630       I != E; ++I) {
1631    if (I->isWeak())
1632      continue;
1633    unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
1634    unsigned Latency = I->getLatency();
1635#ifndef NDEBUG
1636    Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
1637#endif
1638    if (SU->BotReadyCycle < SuccReadyCycle + Latency)
1639      SU->BotReadyCycle = SuccReadyCycle + Latency;
1640  }
1641  Bot.releaseNode(SU, SU->BotReadyCycle);
1642}
1643
1644/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
1645/// critical path by more cycles than it takes to drain the instruction buffer.
1646/// We estimate an upper bounds on in-flight instructions as:
1647///
1648/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
1649/// InFlightIterations = AcyclicPath / CyclesPerIteration
1650/// InFlightResources = InFlightIterations * LoopResources
1651///
1652/// TODO: Check execution resources in addition to IssueCount.
1653void ConvergingScheduler::checkAcyclicLatency() {
1654  if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
1655    return;
1656
1657  // Scaled number of cycles per loop iteration.
1658  unsigned IterCount =
1659    std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
1660             Rem.RemIssueCount);
1661  // Scaled acyclic critical path.
1662  unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
1663  // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
1664  unsigned InFlightCount =
1665    (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
1666  unsigned BufferLimit =
1667    SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
1668
1669  Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
1670
1671  DEBUG(dbgs() << "IssueCycles="
1672        << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
1673        << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
1674        << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
1675        << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
1676        << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
1677        if (Rem.IsAcyclicLatencyLimited)
1678          dbgs() << "  ACYCLIC LATENCY LIMIT\n");
1679}
1680
1681void ConvergingScheduler::registerRoots() {
1682  Rem.CriticalPath = DAG->ExitSU.getDepth();
1683
1684  // Some roots may not feed into ExitSU. Check all of them in case.
1685  for (std::vector<SUnit*>::const_iterator
1686         I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
1687    if ((*I)->getDepth() > Rem.CriticalPath)
1688      Rem.CriticalPath = (*I)->getDepth();
1689  }
1690  DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
1691
1692  if (EnableCyclicPath) {
1693    Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
1694    checkAcyclicLatency();
1695  }
1696}
1697
1698/// Does this SU have a hazard within the current instruction group.
1699///
1700/// The scheduler supports two modes of hazard recognition. The first is the
1701/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1702/// supports highly complicated in-order reservation tables
1703/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1704///
1705/// The second is a streamlined mechanism that checks for hazards based on
1706/// simple counters that the scheduler itself maintains. It explicitly checks
1707/// for instruction dispatch limitations, including the number of micro-ops that
1708/// can dispatch per cycle.
1709///
1710/// TODO: Also check whether the SU must start a new group.
1711bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
1712  if (HazardRec->isEnabled())
1713    return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
1714
1715  unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1716  if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1717    DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
1718          << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1719    return true;
1720  }
1721  return false;
1722}
1723
1724// Find the unscheduled node in ReadySUs with the highest latency.
1725unsigned ConvergingScheduler::SchedBoundary::
1726findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1727  SUnit *LateSU = 0;
1728  unsigned RemLatency = 0;
1729  for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
1730       I != E; ++I) {
1731    unsigned L = getUnscheduledLatency(*I);
1732    if (L > RemLatency) {
1733      RemLatency = L;
1734      LateSU = *I;
1735    }
1736  }
1737  if (LateSU) {
1738    DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1739          << LateSU->NodeNum << ") " << RemLatency << "c\n");
1740  }
1741  return RemLatency;
1742}
1743
1744// Count resources in this zone and the remaining unscheduled
1745// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1746// resource index, or zero if the zone is issue limited.
1747unsigned ConvergingScheduler::SchedBoundary::
1748getOtherResourceCount(unsigned &OtherCritIdx) {
1749  OtherCritIdx = 0;
1750  if (!SchedModel->hasInstrSchedModel())
1751    return 0;
1752
1753  unsigned OtherCritCount = Rem->RemIssueCount
1754    + (RetiredMOps * SchedModel->getMicroOpFactor());
1755  DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
1756        << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1757  for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1758       PIdx != PEnd; ++PIdx) {
1759    unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1760    if (OtherCount > OtherCritCount) {
1761      OtherCritCount = OtherCount;
1762      OtherCritIdx = PIdx;
1763    }
1764  }
1765  if (OtherCritIdx) {
1766    DEBUG(dbgs() << "  " << Available.getName() << " + Remain CritRes: "
1767          << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1768          << " " << getResourceName(OtherCritIdx) << "\n");
1769  }
1770  return OtherCritCount;
1771}
1772
1773/// Set the CandPolicy for this zone given the current resources and latencies
1774/// inside and outside the zone.
1775void ConvergingScheduler::SchedBoundary::setPolicy(CandPolicy &Policy,
1776                                                   SchedBoundary &OtherZone) {
1777  // Now that potential stalls have been considered, apply preemptive heuristics
1778  // based on the the total latency and resources inside and outside this
1779  // zone.
1780
1781  // Compute remaining latency. We need this both to determine whether the
1782  // overall schedule has become latency-limited and whether the instructions
1783  // outside this zone are resource or latency limited.
1784  //
1785  // The "dependent" latency is updated incrementally during scheduling as the
1786  // max height/depth of scheduled nodes minus the cycles since it was
1787  // scheduled:
1788  //   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
1789  //
1790  // The "independent" latency is the max ready queue depth:
1791  //   ILat = max N.depth for N in Available|Pending
1792  //
1793  // RemainingLatency is the greater of independent and dependent latency.
1794  unsigned RemLatency = DependentLatency;
1795  RemLatency = std::max(RemLatency, findMaxLatency(Available.elements()));
1796  RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements()));
1797
1798  // Compute the critical resource outside the zone.
1799  unsigned OtherCritIdx;
1800  unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
1801
1802  bool OtherResLimited = false;
1803  if (SchedModel->hasInstrSchedModel()) {
1804    unsigned LFactor = SchedModel->getLatencyFactor();
1805    OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
1806  }
1807  if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) {
1808    Policy.ReduceLatency |= true;
1809    DEBUG(dbgs() << "  " << Available.getName() << " RemainingLatency "
1810          << RemLatency << " + " << CurrCycle << "c > CritPath "
1811          << Rem->CriticalPath << "\n");
1812  }
1813  // If the same resource is limiting inside and outside the zone, do nothing.
1814  if (ZoneCritResIdx == OtherCritIdx)
1815    return;
1816
1817  DEBUG(
1818    if (IsResourceLimited) {
1819      dbgs() << "  " << Available.getName() << " ResourceLimited: "
1820             << getResourceName(ZoneCritResIdx) << "\n";
1821    }
1822    if (OtherResLimited)
1823      dbgs() << "  RemainingLimit: " << getResourceName(OtherCritIdx) << "\n";
1824    if (!IsResourceLimited && !OtherResLimited)
1825      dbgs() << "  Latency limited both directions.\n");
1826
1827  if (IsResourceLimited && !Policy.ReduceResIdx)
1828    Policy.ReduceResIdx = ZoneCritResIdx;
1829
1830  if (OtherResLimited)
1831    Policy.DemandResIdx = OtherCritIdx;
1832}
1833
1834void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
1835                                                     unsigned ReadyCycle) {
1836  if (ReadyCycle < MinReadyCycle)
1837    MinReadyCycle = ReadyCycle;
1838
1839  // Check for interlocks first. For the purpose of other heuristics, an
1840  // instruction that cannot issue appears as if it's not in the ReadyQueue.
1841  bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
1842  if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU))
1843    Pending.push(SU);
1844  else
1845    Available.push(SU);
1846
1847  // Record this node as an immediate dependent of the scheduled node.
1848  NextSUs.insert(SU);
1849}
1850
1851/// Move the boundary of scheduled code by one cycle.
1852void ConvergingScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
1853  if (SchedModel->getMicroOpBufferSize() == 0) {
1854    assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
1855    if (MinReadyCycle > NextCycle)
1856      NextCycle = MinReadyCycle;
1857  }
1858  // Update the current micro-ops, which will issue in the next cycle.
1859  unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
1860  CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
1861
1862  // Decrement DependentLatency based on the next cycle.
1863  if ((NextCycle - CurrCycle) > DependentLatency)
1864    DependentLatency = 0;
1865  else
1866    DependentLatency -= (NextCycle - CurrCycle);
1867
1868  if (!HazardRec->isEnabled()) {
1869    // Bypass HazardRec virtual calls.
1870    CurrCycle = NextCycle;
1871  }
1872  else {
1873    // Bypass getHazardType calls in case of long latency.
1874    for (; CurrCycle != NextCycle; ++CurrCycle) {
1875      if (isTop())
1876        HazardRec->AdvanceCycle();
1877      else
1878        HazardRec->RecedeCycle();
1879    }
1880  }
1881  CheckPending = true;
1882  unsigned LFactor = SchedModel->getLatencyFactor();
1883  IsResourceLimited =
1884    (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
1885    > (int)LFactor;
1886
1887  DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
1888}
1889
1890void ConvergingScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
1891                                                              unsigned Count) {
1892  ExecutedResCounts[PIdx] += Count;
1893  if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
1894    MaxExecutedResCount = ExecutedResCounts[PIdx];
1895}
1896
1897/// Add the given processor resource to this scheduled zone.
1898///
1899/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
1900/// during which this resource is consumed.
1901///
1902/// \return the next cycle at which the instruction may execute without
1903/// oversubscribing resources.
1904unsigned ConvergingScheduler::SchedBoundary::
1905countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle) {
1906  unsigned Factor = SchedModel->getResourceFactor(PIdx);
1907  unsigned Count = Factor * Cycles;
1908  DEBUG(dbgs() << "  " << getResourceName(PIdx)
1909        << " +" << Cycles << "x" << Factor << "u\n");
1910
1911  // Update Executed resources counts.
1912  incExecutedResources(PIdx, Count);
1913  assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
1914  Rem->RemainingCounts[PIdx] -= Count;
1915
1916  // Check if this resource exceeds the current critical resource. If so, it
1917  // becomes the critical resource.
1918  if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
1919    ZoneCritResIdx = PIdx;
1920    DEBUG(dbgs() << "  *** Critical resource "
1921          << getResourceName(PIdx) << ": "
1922          << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
1923  }
1924  // TODO: We don't yet model reserved resources. It's not hard though.
1925  return CurrCycle;
1926}
1927
1928/// Move the boundary of scheduled code by one SUnit.
1929void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) {
1930  // Update the reservation table.
1931  if (HazardRec->isEnabled()) {
1932    if (!isTop() && SU->isCall) {
1933      // Calls are scheduled with their preceding instructions. For bottom-up
1934      // scheduling, clear the pipeline state before emitting.
1935      HazardRec->Reset();
1936    }
1937    HazardRec->EmitInstruction(SU);
1938  }
1939  const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1940  unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
1941  CurrMOps += IncMOps;
1942  // checkHazard prevents scheduling multiple instructions per cycle that exceed
1943  // issue width. However, we commonly reach the maximum. In this case
1944  // opportunistically bump the cycle to avoid uselessly checking everything in
1945  // the readyQ. Furthermore, a single instruction may produce more than one
1946  // cycle's worth of micro-ops.
1947  //
1948  // TODO: Also check if this SU must end a dispatch group.
1949  unsigned NextCycle = CurrCycle;
1950  if (CurrMOps >= SchedModel->getIssueWidth()) {
1951    ++NextCycle;
1952    DEBUG(dbgs() << "  *** Max MOps " << CurrMOps
1953          << " at cycle " << CurrCycle << '\n');
1954  }
1955  unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1956  DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
1957
1958  switch (SchedModel->getMicroOpBufferSize()) {
1959  case 0:
1960    assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
1961    break;
1962  case 1:
1963    if (ReadyCycle > NextCycle) {
1964      NextCycle = ReadyCycle;
1965      DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
1966    }
1967    break;
1968  default:
1969    // We don't currently model the OOO reorder buffer, so consider all
1970    // scheduled MOps to be "retired".
1971    break;
1972  }
1973  RetiredMOps += IncMOps;
1974
1975  // Update resource counts and critical resource.
1976  if (SchedModel->hasInstrSchedModel()) {
1977    unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
1978    assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
1979    Rem->RemIssueCount -= DecRemIssue;
1980    if (ZoneCritResIdx) {
1981      // Scale scheduled micro-ops for comparing with the critical resource.
1982      unsigned ScaledMOps =
1983        RetiredMOps * SchedModel->getMicroOpFactor();
1984
1985      // If scaled micro-ops are now more than the previous critical resource by
1986      // a full cycle, then micro-ops issue becomes critical.
1987      if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
1988          >= (int)SchedModel->getLatencyFactor()) {
1989        ZoneCritResIdx = 0;
1990        DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
1991              << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
1992      }
1993    }
1994    for (TargetSchedModel::ProcResIter
1995           PI = SchedModel->getWriteProcResBegin(SC),
1996           PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1997      unsigned RCycle =
1998        countResource(PI->ProcResourceIdx, PI->Cycles, ReadyCycle);
1999      if (RCycle > NextCycle)
2000        NextCycle = RCycle;
2001    }
2002  }
2003  // Update ExpectedLatency and DependentLatency.
2004  unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2005  unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2006  if (SU->getDepth() > TopLatency) {
2007    TopLatency = SU->getDepth();
2008    DEBUG(dbgs() << "  " << Available.getName()
2009          << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2010  }
2011  if (SU->getHeight() > BotLatency) {
2012    BotLatency = SU->getHeight();
2013    DEBUG(dbgs() << "  " << Available.getName()
2014          << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2015  }
2016  // If we stall for any reason, bump the cycle.
2017  if (NextCycle > CurrCycle) {
2018    bumpCycle(NextCycle);
2019  }
2020  else {
2021    // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2022    // resource limited. If a stall occured, bumpCycle does this.
2023    unsigned LFactor = SchedModel->getLatencyFactor();
2024    IsResourceLimited =
2025      (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2026      > (int)LFactor;
2027  }
2028  DEBUG(dumpScheduledState());
2029}
2030
2031/// Release pending ready nodes in to the available queue. This makes them
2032/// visible to heuristics.
2033void ConvergingScheduler::SchedBoundary::releasePending() {
2034  // If the available queue is empty, it is safe to reset MinReadyCycle.
2035  if (Available.empty())
2036    MinReadyCycle = UINT_MAX;
2037
2038  // Check to see if any of the pending instructions are ready to issue.  If
2039  // so, add them to the available queue.
2040  bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2041  for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2042    SUnit *SU = *(Pending.begin()+i);
2043    unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2044
2045    if (ReadyCycle < MinReadyCycle)
2046      MinReadyCycle = ReadyCycle;
2047
2048    if (!IsBuffered && ReadyCycle > CurrCycle)
2049      continue;
2050
2051    if (checkHazard(SU))
2052      continue;
2053
2054    Available.push(SU);
2055    Pending.remove(Pending.begin()+i);
2056    --i; --e;
2057  }
2058  DEBUG(if (!Pending.empty()) Pending.dump());
2059  CheckPending = false;
2060}
2061
2062/// Remove SU from the ready set for this boundary.
2063void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) {
2064  if (Available.isInQueue(SU))
2065    Available.remove(Available.find(SU));
2066  else {
2067    assert(Pending.isInQueue(SU) && "bad ready count");
2068    Pending.remove(Pending.find(SU));
2069  }
2070}
2071
2072/// If this queue only has one ready candidate, return it. As a side effect,
2073/// defer any nodes that now hit a hazard, and advance the cycle until at least
2074/// one node is ready. If multiple instructions are ready, return NULL.
2075SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
2076  if (CheckPending)
2077    releasePending();
2078
2079  if (CurrMOps > 0) {
2080    // Defer any ready instrs that now have a hazard.
2081    for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2082      if (checkHazard(*I)) {
2083        Pending.push(*I);
2084        I = Available.remove(I);
2085        continue;
2086      }
2087      ++I;
2088    }
2089  }
2090  for (unsigned i = 0; Available.empty(); ++i) {
2091    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedLatency) &&
2092           "permanent hazard"); (void)i;
2093    bumpCycle(CurrCycle + 1);
2094    releasePending();
2095  }
2096  if (Available.size() == 1)
2097    return *Available.begin();
2098  return NULL;
2099}
2100
2101#ifndef NDEBUG
2102// This is useful information to dump after bumpNode.
2103// Note that the Queue contents are more useful before pickNodeFromQueue.
2104void ConvergingScheduler::SchedBoundary::dumpScheduledState() {
2105  unsigned ResFactor;
2106  unsigned ResCount;
2107  if (ZoneCritResIdx) {
2108    ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2109    ResCount = getResourceCount(ZoneCritResIdx);
2110  }
2111  else {
2112    ResFactor = SchedModel->getMicroOpFactor();
2113    ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2114  }
2115  unsigned LFactor = SchedModel->getLatencyFactor();
2116  dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2117         << "  Retired: " << RetiredMOps;
2118  dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2119  dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2120         << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx)
2121         << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2122         << (IsResourceLimited ? "  - Resource" : "  - Latency")
2123         << " limited.\n";
2124}
2125#endif
2126
2127void ConvergingScheduler::SchedCandidate::
2128initResourceDelta(const ScheduleDAGMI *DAG,
2129                  const TargetSchedModel *SchedModel) {
2130  if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2131    return;
2132
2133  const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2134  for (TargetSchedModel::ProcResIter
2135         PI = SchedModel->getWriteProcResBegin(SC),
2136         PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2137    if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2138      ResDelta.CritResources += PI->Cycles;
2139    if (PI->ProcResourceIdx == Policy.DemandResIdx)
2140      ResDelta.DemandedResources += PI->Cycles;
2141  }
2142}
2143
2144
2145/// Return true if this heuristic determines order.
2146static bool tryLess(int TryVal, int CandVal,
2147                    ConvergingScheduler::SchedCandidate &TryCand,
2148                    ConvergingScheduler::SchedCandidate &Cand,
2149                    ConvergingScheduler::CandReason Reason) {
2150  if (TryVal < CandVal) {
2151    TryCand.Reason = Reason;
2152    return true;
2153  }
2154  if (TryVal > CandVal) {
2155    if (Cand.Reason > Reason)
2156      Cand.Reason = Reason;
2157    return true;
2158  }
2159  Cand.setRepeat(Reason);
2160  return false;
2161}
2162
2163static bool tryGreater(int TryVal, int CandVal,
2164                       ConvergingScheduler::SchedCandidate &TryCand,
2165                       ConvergingScheduler::SchedCandidate &Cand,
2166                       ConvergingScheduler::CandReason Reason) {
2167  if (TryVal > CandVal) {
2168    TryCand.Reason = Reason;
2169    return true;
2170  }
2171  if (TryVal < CandVal) {
2172    if (Cand.Reason > Reason)
2173      Cand.Reason = Reason;
2174    return true;
2175  }
2176  Cand.setRepeat(Reason);
2177  return false;
2178}
2179
2180static bool tryPressure(const PressureElement &TryP,
2181                        const PressureElement &CandP,
2182                        ConvergingScheduler::SchedCandidate &TryCand,
2183                        ConvergingScheduler::SchedCandidate &Cand,
2184                        ConvergingScheduler::CandReason Reason) {
2185  // If both candidates affect the same set, go with the smallest increase.
2186  if (TryP.PSetID == CandP.PSetID) {
2187    return tryLess(TryP.UnitIncrease, CandP.UnitIncrease, TryCand, Cand,
2188                   Reason);
2189  }
2190  // If one candidate decreases and the other increases, go with it.
2191  if (tryLess(TryP.UnitIncrease < 0, CandP.UnitIncrease < 0, TryCand, Cand,
2192              Reason)) {
2193    return true;
2194  }
2195  // If TryP has lower Rank, it has a higher priority.
2196  int TryRank = TryP.PSetRank();
2197  int CandRank = CandP.PSetRank();
2198  // If the candidates are decreasing pressure, reverse priority.
2199  if (TryP.UnitIncrease < 0)
2200    std::swap(TryRank, CandRank);
2201  return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2202}
2203
2204static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2205  return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2206}
2207
2208/// Minimize physical register live ranges. Regalloc wants them adjacent to
2209/// their physreg def/use.
2210///
2211/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2212/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2213/// with the operation that produces or consumes the physreg. We'll do this when
2214/// regalloc has support for parallel copies.
2215static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2216  const MachineInstr *MI = SU->getInstr();
2217  if (!MI->isCopy())
2218    return 0;
2219
2220  unsigned ScheduledOper = isTop ? 1 : 0;
2221  unsigned UnscheduledOper = isTop ? 0 : 1;
2222  // If we have already scheduled the physreg produce/consumer, immediately
2223  // schedule the copy.
2224  if (TargetRegisterInfo::isPhysicalRegister(
2225        MI->getOperand(ScheduledOper).getReg()))
2226    return 1;
2227  // If the physreg is at the boundary, defer it. Otherwise schedule it
2228  // immediately to free the dependent. We can hoist the copy later.
2229  bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2230  if (TargetRegisterInfo::isPhysicalRegister(
2231        MI->getOperand(UnscheduledOper).getReg()))
2232    return AtBoundary ? -1 : 1;
2233  return 0;
2234}
2235
2236static bool tryLatency(ConvergingScheduler::SchedCandidate &TryCand,
2237                       ConvergingScheduler::SchedCandidate &Cand,
2238                       ConvergingScheduler::SchedBoundary &Zone) {
2239  if (Zone.isTop()) {
2240    if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2241      if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2242                  TryCand, Cand, ConvergingScheduler::TopDepthReduce))
2243        return true;
2244    }
2245    if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2246                   TryCand, Cand, ConvergingScheduler::TopPathReduce))
2247      return true;
2248  }
2249  else {
2250    if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2251      if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2252                  TryCand, Cand, ConvergingScheduler::BotHeightReduce))
2253        return true;
2254    }
2255    if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2256                   TryCand, Cand, ConvergingScheduler::BotPathReduce))
2257      return true;
2258  }
2259  return false;
2260}
2261
2262/// Apply a set of heursitics to a new candidate. Heuristics are currently
2263/// hierarchical. This may be more efficient than a graduated cost model because
2264/// we don't need to evaluate all aspects of the model for each node in the
2265/// queue. But it's really done to make the heuristics easier to debug and
2266/// statistically analyze.
2267///
2268/// \param Cand provides the policy and current best candidate.
2269/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2270/// \param Zone describes the scheduled zone that we are extending.
2271/// \param RPTracker describes reg pressure within the scheduled zone.
2272/// \param TempTracker is a scratch pressure tracker to reuse in queries.
2273void ConvergingScheduler::tryCandidate(SchedCandidate &Cand,
2274                                       SchedCandidate &TryCand,
2275                                       SchedBoundary &Zone,
2276                                       const RegPressureTracker &RPTracker,
2277                                       RegPressureTracker &TempTracker) {
2278
2279  // Always initialize TryCand's RPDelta.
2280  TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta,
2281                                  DAG->getRegionCriticalPSets(),
2282                                  DAG->getRegPressure().MaxSetPressure);
2283
2284  // Initialize the candidate if needed.
2285  if (!Cand.isValid()) {
2286    TryCand.Reason = NodeOrder;
2287    return;
2288  }
2289
2290  if (tryGreater(biasPhysRegCopy(TryCand.SU, Zone.isTop()),
2291                 biasPhysRegCopy(Cand.SU, Zone.isTop()),
2292                 TryCand, Cand, PhysRegCopy))
2293    return;
2294
2295  // Avoid exceeding the target's limit. If signed PSetID is negative, it is
2296  // invalid; convert it to INT_MAX to give it lowest priority.
2297  if (tryPressure(TryCand.RPDelta.Excess, Cand.RPDelta.Excess, TryCand, Cand,
2298                  RegExcess))
2299    return;
2300
2301  // For loops that are acyclic path limited, aggressively schedule for latency.
2302  if (Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, Zone))
2303    return;
2304
2305  // Avoid increasing the max critical pressure in the scheduled region.
2306  if (tryPressure(TryCand.RPDelta.CriticalMax, Cand.RPDelta.CriticalMax,
2307                  TryCand, Cand, RegCritical))
2308    return;
2309
2310  // Keep clustered nodes together to encourage downstream peephole
2311  // optimizations which may reduce resource requirements.
2312  //
2313  // This is a best effort to set things up for a post-RA pass. Optimizations
2314  // like generating loads of multiple registers should ideally be done within
2315  // the scheduler pass by combining the loads during DAG postprocessing.
2316  const SUnit *NextClusterSU =
2317    Zone.isTop() ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2318  if (tryGreater(TryCand.SU == NextClusterSU, Cand.SU == NextClusterSU,
2319                 TryCand, Cand, Cluster))
2320    return;
2321
2322  // Weak edges are for clustering and other constraints.
2323  if (tryLess(getWeakLeft(TryCand.SU, Zone.isTop()),
2324              getWeakLeft(Cand.SU, Zone.isTop()),
2325              TryCand, Cand, Weak)) {
2326    return;
2327  }
2328  // Avoid increasing the max pressure of the entire region.
2329  if (tryPressure(TryCand.RPDelta.CurrentMax, Cand.RPDelta.CurrentMax,
2330                  TryCand, Cand, RegMax))
2331    return;
2332
2333  // Avoid critical resource consumption and balance the schedule.
2334  TryCand.initResourceDelta(DAG, SchedModel);
2335  if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2336              TryCand, Cand, ResourceReduce))
2337    return;
2338  if (tryGreater(TryCand.ResDelta.DemandedResources,
2339                 Cand.ResDelta.DemandedResources,
2340                 TryCand, Cand, ResourceDemand))
2341    return;
2342
2343  // Avoid serializing long latency dependence chains.
2344  // For acyclic path limited loops, latency was already checked above.
2345  if (Cand.Policy.ReduceLatency && !Rem.IsAcyclicLatencyLimited
2346      && tryLatency(TryCand, Cand, Zone)) {
2347    return;
2348  }
2349
2350  // Prefer immediate defs/users of the last scheduled instruction. This is a
2351  // local pressure avoidance strategy that also makes the machine code
2352  // readable.
2353  if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
2354                 TryCand, Cand, NextDefUse))
2355    return;
2356
2357  // Fall through to original instruction order.
2358  if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2359      || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2360    TryCand.Reason = NodeOrder;
2361  }
2362}
2363
2364#ifndef NDEBUG
2365const char *ConvergingScheduler::getReasonStr(
2366  ConvergingScheduler::CandReason Reason) {
2367  switch (Reason) {
2368  case NoCand:         return "NOCAND    ";
2369  case PhysRegCopy:    return "PREG-COPY";
2370  case RegExcess:      return "REG-EXCESS";
2371  case RegCritical:    return "REG-CRIT  ";
2372  case Cluster:        return "CLUSTER   ";
2373  case Weak:           return "WEAK      ";
2374  case RegMax:         return "REG-MAX   ";
2375  case ResourceReduce: return "RES-REDUCE";
2376  case ResourceDemand: return "RES-DEMAND";
2377  case TopDepthReduce: return "TOP-DEPTH ";
2378  case TopPathReduce:  return "TOP-PATH  ";
2379  case BotHeightReduce:return "BOT-HEIGHT";
2380  case BotPathReduce:  return "BOT-PATH  ";
2381  case NextDefUse:     return "DEF-USE   ";
2382  case NodeOrder:      return "ORDER     ";
2383  };
2384  llvm_unreachable("Unknown reason!");
2385}
2386
2387void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand) {
2388  PressureElement P;
2389  unsigned ResIdx = 0;
2390  unsigned Latency = 0;
2391  switch (Cand.Reason) {
2392  default:
2393    break;
2394  case RegExcess:
2395    P = Cand.RPDelta.Excess;
2396    break;
2397  case RegCritical:
2398    P = Cand.RPDelta.CriticalMax;
2399    break;
2400  case RegMax:
2401    P = Cand.RPDelta.CurrentMax;
2402    break;
2403  case ResourceReduce:
2404    ResIdx = Cand.Policy.ReduceResIdx;
2405    break;
2406  case ResourceDemand:
2407    ResIdx = Cand.Policy.DemandResIdx;
2408    break;
2409  case TopDepthReduce:
2410    Latency = Cand.SU->getDepth();
2411    break;
2412  case TopPathReduce:
2413    Latency = Cand.SU->getHeight();
2414    break;
2415  case BotHeightReduce:
2416    Latency = Cand.SU->getHeight();
2417    break;
2418  case BotPathReduce:
2419    Latency = Cand.SU->getDepth();
2420    break;
2421  }
2422  dbgs() << "  SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2423  if (P.isValid())
2424    dbgs() << " " << TRI->getRegPressureSetName(P.PSetID)
2425           << ":" << P.UnitIncrease << " ";
2426  else
2427    dbgs() << "      ";
2428  if (ResIdx)
2429    dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2430  else
2431    dbgs() << "         ";
2432  if (Latency)
2433    dbgs() << " " << Latency << " cycles ";
2434  else
2435    dbgs() << "          ";
2436  dbgs() << '\n';
2437}
2438#endif
2439
2440/// Pick the best candidate from the top queue.
2441///
2442/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2443/// DAG building. To adjust for the current scheduling location we need to
2444/// maintain the number of vreg uses remaining to be top-scheduled.
2445void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2446                                            const RegPressureTracker &RPTracker,
2447                                            SchedCandidate &Cand) {
2448  ReadyQueue &Q = Zone.Available;
2449
2450  DEBUG(Q.dump());
2451
2452  // getMaxPressureDelta temporarily modifies the tracker.
2453  RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2454
2455  for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
2456
2457    SchedCandidate TryCand(Cand.Policy);
2458    TryCand.SU = *I;
2459    tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker);
2460    if (TryCand.Reason != NoCand) {
2461      // Initialize resource delta if needed in case future heuristics query it.
2462      if (TryCand.ResDelta == SchedResourceDelta())
2463        TryCand.initResourceDelta(DAG, SchedModel);
2464      Cand.setBest(TryCand);
2465      DEBUG(traceCandidate(Cand));
2466    }
2467  }
2468}
2469
2470static void tracePick(const ConvergingScheduler::SchedCandidate &Cand,
2471                      bool IsTop) {
2472  DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2473        << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n');
2474}
2475
2476/// Pick the best candidate node from either the top or bottom queue.
2477SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) {
2478  // Schedule as far as possible in the direction of no choice. This is most
2479  // efficient, but also provides the best heuristics for CriticalPSets.
2480  if (SUnit *SU = Bot.pickOnlyChoice()) {
2481    IsTopNode = false;
2482    DEBUG(dbgs() << "Pick Bot NOCAND\n");
2483    return SU;
2484  }
2485  if (SUnit *SU = Top.pickOnlyChoice()) {
2486    IsTopNode = true;
2487    DEBUG(dbgs() << "Pick Top NOCAND\n");
2488    return SU;
2489  }
2490  CandPolicy NoPolicy;
2491  SchedCandidate BotCand(NoPolicy);
2492  SchedCandidate TopCand(NoPolicy);
2493  Bot.setPolicy(BotCand.Policy, Top);
2494  Top.setPolicy(TopCand.Policy, Bot);
2495
2496  // Prefer bottom scheduling when heuristics are silent.
2497  pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2498  assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2499
2500  // If either Q has a single candidate that provides the least increase in
2501  // Excess pressure, we can immediately schedule from that Q.
2502  //
2503  // RegionCriticalPSets summarizes the pressure within the scheduled region and
2504  // affects picking from either Q. If scheduling in one direction must
2505  // increase pressure for one of the excess PSets, then schedule in that
2506  // direction first to provide more freedom in the other direction.
2507  if ((BotCand.Reason == RegExcess && !BotCand.isRepeat(RegExcess))
2508      || (BotCand.Reason == RegCritical
2509          && !BotCand.isRepeat(RegCritical)))
2510  {
2511    IsTopNode = false;
2512    tracePick(BotCand, IsTopNode);
2513    return BotCand.SU;
2514  }
2515  // Check if the top Q has a better candidate.
2516  pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2517  assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2518
2519  // Choose the queue with the most important (lowest enum) reason.
2520  if (TopCand.Reason < BotCand.Reason) {
2521    IsTopNode = true;
2522    tracePick(TopCand, IsTopNode);
2523    return TopCand.SU;
2524  }
2525  // Otherwise prefer the bottom candidate, in node order if all else failed.
2526  IsTopNode = false;
2527  tracePick(BotCand, IsTopNode);
2528  return BotCand.SU;
2529}
2530
2531/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
2532SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) {
2533  if (DAG->top() == DAG->bottom()) {
2534    assert(Top.Available.empty() && Top.Pending.empty() &&
2535           Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2536    return NULL;
2537  }
2538  SUnit *SU;
2539  do {
2540    if (ForceTopDown) {
2541      SU = Top.pickOnlyChoice();
2542      if (!SU) {
2543        CandPolicy NoPolicy;
2544        SchedCandidate TopCand(NoPolicy);
2545        pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
2546        assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2547        SU = TopCand.SU;
2548      }
2549      IsTopNode = true;
2550    }
2551    else if (ForceBottomUp) {
2552      SU = Bot.pickOnlyChoice();
2553      if (!SU) {
2554        CandPolicy NoPolicy;
2555        SchedCandidate BotCand(NoPolicy);
2556        pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
2557        assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2558        SU = BotCand.SU;
2559      }
2560      IsTopNode = false;
2561    }
2562    else {
2563      SU = pickNodeBidirectional(IsTopNode);
2564    }
2565  } while (SU->isScheduled);
2566
2567  if (SU->isTopReady())
2568    Top.removeReady(SU);
2569  if (SU->isBottomReady())
2570    Bot.removeReady(SU);
2571
2572  DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
2573  return SU;
2574}
2575
2576void ConvergingScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
2577
2578  MachineBasicBlock::iterator InsertPos = SU->getInstr();
2579  if (!isTop)
2580    ++InsertPos;
2581  SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
2582
2583  // Find already scheduled copies with a single physreg dependence and move
2584  // them just above the scheduled instruction.
2585  for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
2586       I != E; ++I) {
2587    if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
2588      continue;
2589    SUnit *DepSU = I->getSUnit();
2590    if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
2591      continue;
2592    MachineInstr *Copy = DepSU->getInstr();
2593    if (!Copy->isCopy())
2594      continue;
2595    DEBUG(dbgs() << "  Rescheduling physreg copy ";
2596          I->getSUnit()->dump(DAG));
2597    DAG->moveInstruction(Copy, InsertPos);
2598  }
2599}
2600
2601/// Update the scheduler's state after scheduling a node. This is the same node
2602/// that was just returned by pickNode(). However, ScheduleDAGMI needs to update
2603/// it's state based on the current cycle before MachineSchedStrategy does.
2604///
2605/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
2606/// them here. See comments in biasPhysRegCopy.
2607void ConvergingScheduler::schedNode(SUnit *SU, bool IsTopNode) {
2608  if (IsTopNode) {
2609    SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle);
2610    Top.bumpNode(SU);
2611    if (SU->hasPhysRegUses)
2612      reschedulePhysRegCopies(SU, true);
2613  }
2614  else {
2615    SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle);
2616    Bot.bumpNode(SU);
2617    if (SU->hasPhysRegDefs)
2618      reschedulePhysRegCopies(SU, false);
2619  }
2620}
2621
2622/// Create the standard converging machine scheduler. This will be used as the
2623/// default scheduler if the target does not set a default.
2624static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
2625  assert((!ForceTopDown || !ForceBottomUp) &&
2626         "-misched-topdown incompatible with -misched-bottomup");
2627  ScheduleDAGMI *DAG = new ScheduleDAGMI(C, new ConvergingScheduler());
2628  // Register DAG post-processors.
2629  //
2630  // FIXME: extend the mutation API to allow earlier mutations to instantiate
2631  // data and pass it to later mutations. Have a single mutation that gathers
2632  // the interesting nodes in one pass.
2633  DAG->addMutation(new CopyConstrain(DAG->TII, DAG->TRI));
2634  if (EnableLoadCluster)
2635    DAG->addMutation(new LoadClusterMutation(DAG->TII, DAG->TRI));
2636  if (EnableMacroFusion)
2637    DAG->addMutation(new MacroFusion(DAG->TII));
2638  return DAG;
2639}
2640static MachineSchedRegistry
2641ConvergingSchedRegistry("converge", "Standard converging scheduler.",
2642                        createConvergingSched);
2643
2644//===----------------------------------------------------------------------===//
2645// ILP Scheduler. Currently for experimental analysis of heuristics.
2646//===----------------------------------------------------------------------===//
2647
2648namespace {
2649/// \brief Order nodes by the ILP metric.
2650struct ILPOrder {
2651  const SchedDFSResult *DFSResult;
2652  const BitVector *ScheduledTrees;
2653  bool MaximizeILP;
2654
2655  ILPOrder(bool MaxILP): DFSResult(0), ScheduledTrees(0), MaximizeILP(MaxILP) {}
2656
2657  /// \brief Apply a less-than relation on node priority.
2658  ///
2659  /// (Return true if A comes after B in the Q.)
2660  bool operator()(const SUnit *A, const SUnit *B) const {
2661    unsigned SchedTreeA = DFSResult->getSubtreeID(A);
2662    unsigned SchedTreeB = DFSResult->getSubtreeID(B);
2663    if (SchedTreeA != SchedTreeB) {
2664      // Unscheduled trees have lower priority.
2665      if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
2666        return ScheduledTrees->test(SchedTreeB);
2667
2668      // Trees with shallower connections have have lower priority.
2669      if (DFSResult->getSubtreeLevel(SchedTreeA)
2670          != DFSResult->getSubtreeLevel(SchedTreeB)) {
2671        return DFSResult->getSubtreeLevel(SchedTreeA)
2672          < DFSResult->getSubtreeLevel(SchedTreeB);
2673      }
2674    }
2675    if (MaximizeILP)
2676      return DFSResult->getILP(A) < DFSResult->getILP(B);
2677    else
2678      return DFSResult->getILP(A) > DFSResult->getILP(B);
2679  }
2680};
2681
2682/// \brief Schedule based on the ILP metric.
2683class ILPScheduler : public MachineSchedStrategy {
2684  /// In case all subtrees are eventually connected to a common root through
2685  /// data dependence (e.g. reduction), place an upper limit on their size.
2686  ///
2687  /// FIXME: A subtree limit is generally good, but in the situation commented
2688  /// above, where multiple similar subtrees feed a common root, we should
2689  /// only split at a point where the resulting subtrees will be balanced.
2690  /// (a motivating test case must be found).
2691  static const unsigned SubtreeLimit = 16;
2692
2693  ScheduleDAGMI *DAG;
2694  ILPOrder Cmp;
2695
2696  std::vector<SUnit*> ReadyQ;
2697public:
2698  ILPScheduler(bool MaximizeILP): DAG(0), Cmp(MaximizeILP) {}
2699
2700  virtual void initialize(ScheduleDAGMI *dag) {
2701    DAG = dag;
2702    DAG->computeDFSResult();
2703    Cmp.DFSResult = DAG->getDFSResult();
2704    Cmp.ScheduledTrees = &DAG->getScheduledTrees();
2705    ReadyQ.clear();
2706  }
2707
2708  virtual void registerRoots() {
2709    // Restore the heap in ReadyQ with the updated DFS results.
2710    std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2711  }
2712
2713  /// Implement MachineSchedStrategy interface.
2714  /// -----------------------------------------
2715
2716  /// Callback to select the highest priority node from the ready Q.
2717  virtual SUnit *pickNode(bool &IsTopNode) {
2718    if (ReadyQ.empty()) return NULL;
2719    std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2720    SUnit *SU = ReadyQ.back();
2721    ReadyQ.pop_back();
2722    IsTopNode = false;
2723    DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
2724          << " ILP: " << DAG->getDFSResult()->getILP(SU)
2725          << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
2726          << DAG->getDFSResult()->getSubtreeLevel(
2727            DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
2728          << "Scheduling " << *SU->getInstr());
2729    return SU;
2730  }
2731
2732  /// \brief Scheduler callback to notify that a new subtree is scheduled.
2733  virtual void scheduleTree(unsigned SubtreeID) {
2734    std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2735  }
2736
2737  /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
2738  /// DFSResults, and resort the priority Q.
2739  virtual void schedNode(SUnit *SU, bool IsTopNode) {
2740    assert(!IsTopNode && "SchedDFSResult needs bottom-up");
2741  }
2742
2743  virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ }
2744
2745  virtual void releaseBottomNode(SUnit *SU) {
2746    ReadyQ.push_back(SU);
2747    std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
2748  }
2749};
2750} // namespace
2751
2752static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
2753  return new ScheduleDAGMI(C, new ILPScheduler(true));
2754}
2755static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
2756  return new ScheduleDAGMI(C, new ILPScheduler(false));
2757}
2758static MachineSchedRegistry ILPMaxRegistry(
2759  "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
2760static MachineSchedRegistry ILPMinRegistry(
2761  "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
2762
2763//===----------------------------------------------------------------------===//
2764// Machine Instruction Shuffler for Correctness Testing
2765//===----------------------------------------------------------------------===//
2766
2767#ifndef NDEBUG
2768namespace {
2769/// Apply a less-than relation on the node order, which corresponds to the
2770/// instruction order prior to scheduling. IsReverse implements greater-than.
2771template<bool IsReverse>
2772struct SUnitOrder {
2773  bool operator()(SUnit *A, SUnit *B) const {
2774    if (IsReverse)
2775      return A->NodeNum > B->NodeNum;
2776    else
2777      return A->NodeNum < B->NodeNum;
2778  }
2779};
2780
2781/// Reorder instructions as much as possible.
2782class InstructionShuffler : public MachineSchedStrategy {
2783  bool IsAlternating;
2784  bool IsTopDown;
2785
2786  // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
2787  // gives nodes with a higher number higher priority causing the latest
2788  // instructions to be scheduled first.
2789  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
2790    TopQ;
2791  // When scheduling bottom-up, use greater-than as the queue priority.
2792  PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
2793    BottomQ;
2794public:
2795  InstructionShuffler(bool alternate, bool topdown)
2796    : IsAlternating(alternate), IsTopDown(topdown) {}
2797
2798  virtual void initialize(ScheduleDAGMI *) {
2799    TopQ.clear();
2800    BottomQ.clear();
2801  }
2802
2803  /// Implement MachineSchedStrategy interface.
2804  /// -----------------------------------------
2805
2806  virtual SUnit *pickNode(bool &IsTopNode) {
2807    SUnit *SU;
2808    if (IsTopDown) {
2809      do {
2810        if (TopQ.empty()) return NULL;
2811        SU = TopQ.top();
2812        TopQ.pop();
2813      } while (SU->isScheduled);
2814      IsTopNode = true;
2815    }
2816    else {
2817      do {
2818        if (BottomQ.empty()) return NULL;
2819        SU = BottomQ.top();
2820        BottomQ.pop();
2821      } while (SU->isScheduled);
2822      IsTopNode = false;
2823    }
2824    if (IsAlternating)
2825      IsTopDown = !IsTopDown;
2826    return SU;
2827  }
2828
2829  virtual void schedNode(SUnit *SU, bool IsTopNode) {}
2830
2831  virtual void releaseTopNode(SUnit *SU) {
2832    TopQ.push(SU);
2833  }
2834  virtual void releaseBottomNode(SUnit *SU) {
2835    BottomQ.push(SU);
2836  }
2837};
2838} // namespace
2839
2840static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
2841  bool Alternate = !ForceTopDown && !ForceBottomUp;
2842  bool TopDown = !ForceBottomUp;
2843  assert((TopDown || !ForceTopDown) &&
2844         "-misched-topdown incompatible with -misched-bottomup");
2845  return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
2846}
2847static MachineSchedRegistry ShufflerRegistry(
2848  "shuffle", "Shuffle machine instructions alternating directions",
2849  createInstructionShuffler);
2850#endif // !NDEBUG
2851
2852//===----------------------------------------------------------------------===//
2853// GraphWriter support for ScheduleDAGMI.
2854//===----------------------------------------------------------------------===//
2855
2856#ifndef NDEBUG
2857namespace llvm {
2858
2859template<> struct GraphTraits<
2860  ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
2861
2862template<>
2863struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
2864
2865  DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
2866
2867  static std::string getGraphName(const ScheduleDAG *G) {
2868    return G->MF.getName();
2869  }
2870
2871  static bool renderGraphFromBottomUp() {
2872    return true;
2873  }
2874
2875  static bool isNodeHidden(const SUnit *Node) {
2876    return (Node->NumPreds > 10 || Node->NumSuccs > 10);
2877  }
2878
2879  static bool hasNodeAddressLabel(const SUnit *Node,
2880                                  const ScheduleDAG *Graph) {
2881    return false;
2882  }
2883
2884  /// If you want to override the dot attributes printed for a particular
2885  /// edge, override this method.
2886  static std::string getEdgeAttributes(const SUnit *Node,
2887                                       SUnitIterator EI,
2888                                       const ScheduleDAG *Graph) {
2889    if (EI.isArtificialDep())
2890      return "color=cyan,style=dashed";
2891    if (EI.isCtrlDep())
2892      return "color=blue,style=dashed";
2893    return "";
2894  }
2895
2896  static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
2897    std::string Str;
2898    raw_string_ostream SS(Str);
2899    SS << "SU(" << SU->NodeNum << ')';
2900    return SS.str();
2901  }
2902  static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
2903    return G->getGraphNodeLabel(SU);
2904  }
2905
2906  static std::string getNodeAttributes(const SUnit *N,
2907                                       const ScheduleDAG *Graph) {
2908    std::string Str("shape=Mrecord");
2909    const SchedDFSResult *DFS =
2910      static_cast<const ScheduleDAGMI*>(Graph)->getDFSResult();
2911    if (DFS) {
2912      Str += ",style=filled,fillcolor=\"#";
2913      Str += DOT::getColorString(DFS->getSubtreeID(N));
2914      Str += '"';
2915    }
2916    return Str;
2917  }
2918};
2919} // namespace llvm
2920#endif // NDEBUG
2921
2922/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
2923/// rendered using 'dot'.
2924///
2925void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
2926#ifndef NDEBUG
2927  ViewGraph(this, Name, false, Title);
2928#else
2929  errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
2930         << "systems with Graphviz or gv!\n";
2931#endif  // NDEBUG
2932}
2933
2934/// Out-of-line implementation with no arguments is handy for gdb.
2935void ScheduleDAGMI::viewGraph() {
2936  viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
2937}
2938