PostRASchedulerList.cpp revision cf9aa284b332bc2613def3612b80c5883d4b9985
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "AntiDepBreaker.h"
23#include "AggressiveAntiDepBreaker.h"
24#include "CriticalAntiDepBreaker.h"
25#include "ScheduleDAGInstrs.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/LatencyPriorityQueue.h"
28#include "llvm/CodeGen/SchedulerRegistry.h"
29#include "llvm/CodeGen/MachineDominators.h"
30#include "llvm/CodeGen/MachineFrameInfo.h"
31#include "llvm/CodeGen/MachineFunctionPass.h"
32#include "llvm/CodeGen/MachineLoopInfo.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Target/TargetLowering.h"
37#include "llvm/Target/TargetMachine.h"
38#include "llvm/Target/TargetInstrInfo.h"
39#include "llvm/Target/TargetRegisterInfo.h"
40#include "llvm/Target/TargetSubtarget.h"
41#include "llvm/Support/CommandLine.h"
42#include "llvm/Support/Debug.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/ADT/BitVector.h"
46#include "llvm/ADT/Statistic.h"
47#include <set>
48using namespace llvm;
49
50STATISTIC(NumNoops, "Number of noops inserted");
51STATISTIC(NumStalls, "Number of pipeline stalls");
52STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
53
54// Post-RA scheduling is enabled with
55// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
56// override the target.
57static cl::opt<bool>
58EnablePostRAScheduler("post-RA-scheduler",
59                       cl::desc("Enable scheduling after register allocation"),
60                       cl::init(false), cl::Hidden);
61static cl::opt<std::string>
62EnableAntiDepBreaking("break-anti-dependencies",
63                      cl::desc("Break post-RA scheduling anti-dependencies: "
64                               "\"critical\", \"all\", or \"none\""),
65                      cl::init("none"), cl::Hidden);
66
67// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68static cl::opt<int>
69DebugDiv("postra-sched-debugdiv",
70                      cl::desc("Debug control MBBs that are scheduled"),
71                      cl::init(0), cl::Hidden);
72static cl::opt<int>
73DebugMod("postra-sched-debugmod",
74                      cl::desc("Debug control MBBs that are scheduled"),
75                      cl::init(0), cl::Hidden);
76
77AntiDepBreaker::~AntiDepBreaker() { }
78
79namespace {
80  class PostRAScheduler : public MachineFunctionPass {
81    AliasAnalysis *AA;
82    const TargetInstrInfo *TII;
83    CodeGenOpt::Level OptLevel;
84
85  public:
86    static char ID;
87    PostRAScheduler(CodeGenOpt::Level ol) :
88      MachineFunctionPass(ID), OptLevel(ol) {}
89
90    void getAnalysisUsage(AnalysisUsage &AU) const {
91      AU.setPreservesCFG();
92      AU.addRequired<AliasAnalysis>();
93      AU.addRequired<MachineDominatorTree>();
94      AU.addPreserved<MachineDominatorTree>();
95      AU.addRequired<MachineLoopInfo>();
96      AU.addPreserved<MachineLoopInfo>();
97      MachineFunctionPass::getAnalysisUsage(AU);
98    }
99
100    const char *getPassName() const {
101      return "Post RA top-down list latency scheduler";
102    }
103
104    bool runOnMachineFunction(MachineFunction &Fn);
105  };
106  char PostRAScheduler::ID = 0;
107
108  class SchedulePostRATDList : public ScheduleDAGInstrs {
109    /// AvailableQueue - The priority queue to use for the available SUnits.
110    ///
111    LatencyPriorityQueue AvailableQueue;
112
113    /// PendingQueue - This contains all of the instructions whose operands have
114    /// been issued, but their results are not ready yet (due to the latency of
115    /// the operation).  Once the operands becomes available, the instruction is
116    /// added to the AvailableQueue.
117    std::vector<SUnit*> PendingQueue;
118
119    /// Topo - A topological ordering for SUnits.
120    ScheduleDAGTopologicalSort Topo;
121
122    /// HazardRec - The hazard recognizer to use.
123    ScheduleHazardRecognizer *HazardRec;
124
125    /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
126    AntiDepBreaker *AntiDepBreak;
127
128    /// AA - AliasAnalysis for making memory reference queries.
129    AliasAnalysis *AA;
130
131    /// KillIndices - The index of the most recent kill (proceding bottom-up),
132    /// or ~0u if the register is not live.
133    std::vector<unsigned> KillIndices;
134
135  public:
136    SchedulePostRATDList(
137      MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
138      AliasAnalysis *AA, TargetSubtarget::AntiDepBreakMode AntiDepMode,
139      SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs);
140
141    ~SchedulePostRATDList();
142
143    /// StartBlock - Initialize register live-range state for scheduling in
144    /// this block.
145    ///
146    void StartBlock(MachineBasicBlock *BB);
147
148    /// Schedule - Schedule the instruction range using list scheduling.
149    ///
150    void Schedule();
151
152    /// Observe - Update liveness information to account for the current
153    /// instruction, which will not be scheduled.
154    ///
155    void Observe(MachineInstr *MI, unsigned Count);
156
157    /// FinishBlock - Clean up register live-range state.
158    ///
159    void FinishBlock();
160
161    /// FixupKills - Fix register kill flags that have been made
162    /// invalid due to scheduling
163    ///
164    void FixupKills(MachineBasicBlock *MBB);
165
166  private:
167    void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
168    void ReleaseSuccessors(SUnit *SU);
169    void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
170    void ListScheduleTopDown();
171    void StartBlockForKills(MachineBasicBlock *BB);
172
173    // ToggleKillFlag - Toggle a register operand kill flag. Other
174    // adjustments may be made to the instruction if necessary. Return
175    // true if the operand has been deleted, false if not.
176    bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
177  };
178}
179
180SchedulePostRATDList::SchedulePostRATDList(
181  MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
182  AliasAnalysis *AA, TargetSubtarget::AntiDepBreakMode AntiDepMode,
183  SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs)
184  : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), AA(AA),
185    KillIndices(TRI->getNumRegs())
186{
187  const TargetMachine &TM = MF.getTarget();
188  const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
189  HazardRec =
190    TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
191  AntiDepBreak =
192    ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
193     (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, CriticalPathRCs) :
194     ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
195      (AntiDepBreaker *)new CriticalAntiDepBreaker(MF) : NULL));
196}
197
198SchedulePostRATDList::~SchedulePostRATDList() {
199  delete HazardRec;
200  delete AntiDepBreak;
201}
202
203bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
204  TII = Fn.getTarget().getInstrInfo();
205  MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
206  MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
207  AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
208
209  // Check for explicit enable/disable of post-ra scheduling.
210  TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
211  SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
212  if (EnablePostRAScheduler.getPosition() > 0) {
213    if (!EnablePostRAScheduler)
214      return false;
215  } else {
216    // Check that post-RA scheduling is enabled for this target.
217    // This may upgrade the AntiDepMode.
218    const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
219    if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
220      return false;
221  }
222
223  // Check for antidep breaking override...
224  if (EnableAntiDepBreaking.getPosition() > 0) {
225    AntiDepMode = (EnableAntiDepBreaking == "all") ?
226      TargetSubtarget::ANTIDEP_ALL :
227        (EnableAntiDepBreaking == "critical")
228           ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
229  }
230
231  DEBUG(dbgs() << "PostRAScheduler\n");
232
233  SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, AntiDepMode,
234                                 CriticalPathRCs);
235
236  // Loop over all of the basic blocks
237  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
238       MBB != MBBe; ++MBB) {
239#ifndef NDEBUG
240    // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
241    if (DebugDiv > 0) {
242      static int bbcnt = 0;
243      if (bbcnt++ % DebugDiv != DebugMod)
244        continue;
245      dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
246        ":BB#" << MBB->getNumber() << " ***\n";
247    }
248#endif
249
250    // Initialize register live-range state for scheduling in this block.
251    Scheduler.StartBlock(MBB);
252
253    // Schedule each sequence of instructions not interrupted by a label
254    // or anything else that effectively needs to shut down scheduling.
255    MachineBasicBlock::iterator Current = MBB->end();
256    unsigned Count = MBB->size(), CurrentCount = Count;
257    for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
258      MachineInstr *MI = llvm::prior(I);
259      if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
260        Scheduler.Run(MBB, I, Current, CurrentCount);
261        Scheduler.EmitSchedule();
262        Current = MI;
263        CurrentCount = Count - 1;
264        Scheduler.Observe(MI, CurrentCount);
265      }
266      I = MI;
267      --Count;
268    }
269    assert(Count == 0 && "Instruction count mismatch!");
270    assert((MBB->begin() == Current || CurrentCount != 0) &&
271           "Instruction count mismatch!");
272    Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
273    Scheduler.EmitSchedule();
274
275    // Clean up register live-range state.
276    Scheduler.FinishBlock();
277
278    // Update register kills
279    Scheduler.FixupKills(MBB);
280  }
281
282  return true;
283}
284
285/// StartBlock - Initialize register live-range state for scheduling in
286/// this block.
287///
288void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
289  // Call the superclass.
290  ScheduleDAGInstrs::StartBlock(BB);
291
292  // Reset the hazard recognizer and anti-dep breaker.
293  HazardRec->Reset();
294  if (AntiDepBreak != NULL)
295    AntiDepBreak->StartBlock(BB);
296}
297
298/// Schedule - Schedule the instruction range using list scheduling.
299///
300void SchedulePostRATDList::Schedule() {
301  // Build the scheduling graph.
302  BuildSchedGraph(AA);
303
304  if (AntiDepBreak != NULL) {
305    unsigned Broken =
306      AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
307                                          InsertPosIndex);
308
309    if (Broken != 0) {
310      // We made changes. Update the dependency graph.
311      // Theoretically we could update the graph in place:
312      // When a live range is changed to use a different register, remove
313      // the def's anti-dependence *and* output-dependence edges due to
314      // that register, and add new anti-dependence and output-dependence
315      // edges based on the next live range of the register.
316      SUnits.clear();
317      Sequence.clear();
318      EntrySU = SUnit();
319      ExitSU = SUnit();
320      BuildSchedGraph(AA);
321
322      NumFixedAnti += Broken;
323    }
324  }
325
326  DEBUG(dbgs() << "********** List Scheduling **********\n");
327  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
328          SUnits[su].dumpAll(this));
329
330  AvailableQueue.initNodes(SUnits);
331  ListScheduleTopDown();
332  AvailableQueue.releaseState();
333}
334
335/// Observe - Update liveness information to account for the current
336/// instruction, which will not be scheduled.
337///
338void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
339  if (AntiDepBreak != NULL)
340    AntiDepBreak->Observe(MI, Count, InsertPosIndex);
341}
342
343/// FinishBlock - Clean up register live-range state.
344///
345void SchedulePostRATDList::FinishBlock() {
346  if (AntiDepBreak != NULL)
347    AntiDepBreak->FinishBlock();
348
349  // Call the superclass.
350  ScheduleDAGInstrs::FinishBlock();
351}
352
353/// StartBlockForKills - Initialize register live-range state for updating kills
354///
355void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
356  // Initialize the indices to indicate that no registers are live.
357  for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
358    KillIndices[i] = ~0u;
359
360  // Determine the live-out physregs for this block.
361  if (!BB->empty() && BB->back().getDesc().isReturn()) {
362    // In a return block, examine the function live-out regs.
363    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
364           E = MRI.liveout_end(); I != E; ++I) {
365      unsigned Reg = *I;
366      KillIndices[Reg] = BB->size();
367      // Repeat, for all subregs.
368      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
369           *Subreg; ++Subreg) {
370        KillIndices[*Subreg] = BB->size();
371      }
372    }
373  }
374  else {
375    // In a non-return block, examine the live-in regs of all successors.
376    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
377           SE = BB->succ_end(); SI != SE; ++SI) {
378      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
379             E = (*SI)->livein_end(); I != E; ++I) {
380        unsigned Reg = *I;
381        KillIndices[Reg] = BB->size();
382        // Repeat, for all subregs.
383        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
384             *Subreg; ++Subreg) {
385          KillIndices[*Subreg] = BB->size();
386        }
387      }
388    }
389  }
390}
391
392bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
393                                          MachineOperand &MO) {
394  // Setting kill flag...
395  if (!MO.isKill()) {
396    MO.setIsKill(true);
397    return false;
398  }
399
400  // If MO itself is live, clear the kill flag...
401  if (KillIndices[MO.getReg()] != ~0u) {
402    MO.setIsKill(false);
403    return false;
404  }
405
406  // If any subreg of MO is live, then create an imp-def for that
407  // subreg and keep MO marked as killed.
408  MO.setIsKill(false);
409  bool AllDead = true;
410  const unsigned SuperReg = MO.getReg();
411  for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
412       *Subreg; ++Subreg) {
413    if (KillIndices[*Subreg] != ~0u) {
414      MI->addOperand(MachineOperand::CreateReg(*Subreg,
415                                               true  /*IsDef*/,
416                                               true  /*IsImp*/,
417                                               false /*IsKill*/,
418                                               false /*IsDead*/));
419      AllDead = false;
420    }
421  }
422
423  if(AllDead)
424    MO.setIsKill(true);
425  return false;
426}
427
428/// FixupKills - Fix the register kill flags, they may have been made
429/// incorrect by instruction reordering.
430///
431void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
432  DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
433
434  std::set<unsigned> killedRegs;
435  BitVector ReservedRegs = TRI->getReservedRegs(MF);
436
437  StartBlockForKills(MBB);
438
439  // Examine block from end to start...
440  unsigned Count = MBB->size();
441  for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
442       I != E; --Count) {
443    MachineInstr *MI = --I;
444    if (MI->isDebugValue())
445      continue;
446
447    // Update liveness.  Registers that are defed but not used in this
448    // instruction are now dead. Mark register and all subregs as they
449    // are completely defined.
450    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
451      MachineOperand &MO = MI->getOperand(i);
452      if (!MO.isReg()) continue;
453      unsigned Reg = MO.getReg();
454      if (Reg == 0) continue;
455      if (!MO.isDef()) continue;
456      // Ignore two-addr defs.
457      if (MI->isRegTiedToUseOperand(i)) continue;
458
459      KillIndices[Reg] = ~0u;
460
461      // Repeat for all subregs.
462      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
463           *Subreg; ++Subreg) {
464        KillIndices[*Subreg] = ~0u;
465      }
466    }
467
468    // Examine all used registers and set/clear kill flag. When a
469    // register is used multiple times we only set the kill flag on
470    // the first use.
471    killedRegs.clear();
472    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
473      MachineOperand &MO = MI->getOperand(i);
474      if (!MO.isReg() || !MO.isUse()) continue;
475      unsigned Reg = MO.getReg();
476      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
477
478      bool kill = false;
479      if (killedRegs.find(Reg) == killedRegs.end()) {
480        kill = true;
481        // A register is not killed if any subregs are live...
482        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
483             *Subreg; ++Subreg) {
484          if (KillIndices[*Subreg] != ~0u) {
485            kill = false;
486            break;
487          }
488        }
489
490        // If subreg is not live, then register is killed if it became
491        // live in this instruction
492        if (kill)
493          kill = (KillIndices[Reg] == ~0u);
494      }
495
496      if (MO.isKill() != kill) {
497        DEBUG(dbgs() << "Fixing " << MO << " in ");
498        // Warning: ToggleKillFlag may invalidate MO.
499        ToggleKillFlag(MI, MO);
500        DEBUG(MI->dump());
501      }
502
503      killedRegs.insert(Reg);
504    }
505
506    // Mark any used register (that is not using undef) and subregs as
507    // now live...
508    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
509      MachineOperand &MO = MI->getOperand(i);
510      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
511      unsigned Reg = MO.getReg();
512      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
513
514      KillIndices[Reg] = Count;
515
516      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
517           *Subreg; ++Subreg) {
518        KillIndices[*Subreg] = Count;
519      }
520    }
521  }
522}
523
524//===----------------------------------------------------------------------===//
525//  Top-Down Scheduling
526//===----------------------------------------------------------------------===//
527
528/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
529/// the PendingQueue if the count reaches zero. Also update its cycle bound.
530void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
531  SUnit *SuccSU = SuccEdge->getSUnit();
532
533#ifndef NDEBUG
534  if (SuccSU->NumPredsLeft == 0) {
535    dbgs() << "*** Scheduling failed! ***\n";
536    SuccSU->dump(this);
537    dbgs() << " has been released too many times!\n";
538    llvm_unreachable(0);
539  }
540#endif
541  --SuccSU->NumPredsLeft;
542
543  // Standard scheduler algorithms will recompute the depth of the successor
544  // here as such:
545  //   SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
546  //
547  // However, we lazily compute node depth instead. Note that
548  // ScheduleNodeTopDown has already updated the depth of this node which causes
549  // all descendents to be marked dirty. Setting the successor depth explicitly
550  // here would cause depth to be recomputed for all its ancestors. If the
551  // successor is not yet ready (because of a transitively redundant edge) then
552  // this causes depth computation to be quadratic in the size of the DAG.
553
554  // If all the node's predecessors are scheduled, this node is ready
555  // to be scheduled. Ignore the special ExitSU node.
556  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
557    PendingQueue.push_back(SuccSU);
558}
559
560/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
561void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
562  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
563       I != E; ++I) {
564    ReleaseSucc(SU, &*I);
565  }
566}
567
568/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
569/// count of its successors. If a successor pending count is zero, add it to
570/// the Available queue.
571void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
572  DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
573  DEBUG(SU->dump(this));
574
575  Sequence.push_back(SU);
576  assert(CurCycle >= SU->getDepth() &&
577         "Node scheduled above its depth!");
578  SU->setDepthToAtLeast(CurCycle);
579
580  ReleaseSuccessors(SU);
581  SU->isScheduled = true;
582  AvailableQueue.ScheduledNode(SU);
583}
584
585/// ListScheduleTopDown - The main loop of list scheduling for top-down
586/// schedulers.
587void SchedulePostRATDList::ListScheduleTopDown() {
588  unsigned CurCycle = 0;
589
590  // We're scheduling top-down but we're visiting the regions in
591  // bottom-up order, so we don't know the hazards at the start of a
592  // region. So assume no hazards (this should usually be ok as most
593  // blocks are a single region).
594  HazardRec->Reset();
595
596  // Release any successors of the special Entry node.
597  ReleaseSuccessors(&EntrySU);
598
599  // Add all leaves to Available queue.
600  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
601    // It is available if it has no predecessors.
602    bool available = SUnits[i].Preds.empty();
603    if (available) {
604      AvailableQueue.push(&SUnits[i]);
605      SUnits[i].isAvailable = true;
606    }
607  }
608
609  // In any cycle where we can't schedule any instructions, we must
610  // stall or emit a noop, depending on the target.
611  bool CycleHasInsts = false;
612
613  // While Available queue is not empty, grab the node with the highest
614  // priority. If it is not ready put it back.  Schedule the node.
615  std::vector<SUnit*> NotReady;
616  Sequence.reserve(SUnits.size());
617  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
618    // Check to see if any of the pending instructions are ready to issue.  If
619    // so, add them to the available queue.
620    unsigned MinDepth = ~0u;
621    for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
622      if (PendingQueue[i]->getDepth() <= CurCycle) {
623        AvailableQueue.push(PendingQueue[i]);
624        PendingQueue[i]->isAvailable = true;
625        PendingQueue[i] = PendingQueue.back();
626        PendingQueue.pop_back();
627        --i; --e;
628      } else if (PendingQueue[i]->getDepth() < MinDepth)
629        MinDepth = PendingQueue[i]->getDepth();
630    }
631
632    DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
633
634    SUnit *FoundSUnit = 0;
635    bool HasNoopHazards = false;
636    while (!AvailableQueue.empty()) {
637      SUnit *CurSUnit = AvailableQueue.pop();
638
639      ScheduleHazardRecognizer::HazardType HT =
640        HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
641      if (HT == ScheduleHazardRecognizer::NoHazard) {
642        FoundSUnit = CurSUnit;
643        break;
644      }
645
646      // Remember if this is a noop hazard.
647      HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
648
649      NotReady.push_back(CurSUnit);
650    }
651
652    // Add the nodes that aren't ready back onto the available list.
653    if (!NotReady.empty()) {
654      AvailableQueue.push_all(NotReady);
655      NotReady.clear();
656    }
657
658    // If we found a node to schedule...
659    if (FoundSUnit) {
660      // ... schedule the node...
661      ScheduleNodeTopDown(FoundSUnit, CurCycle);
662      HazardRec->EmitInstruction(FoundSUnit);
663      CycleHasInsts = true;
664      if (HazardRec->atIssueLimit()) {
665        DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
666        HazardRec->AdvanceCycle();
667        ++CurCycle;
668        CycleHasInsts = false;
669      }
670    } else {
671      if (CycleHasInsts) {
672        DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
673        HazardRec->AdvanceCycle();
674      } else if (!HasNoopHazards) {
675        // Otherwise, we have a pipeline stall, but no other problem,
676        // just advance the current cycle and try again.
677        DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
678        HazardRec->AdvanceCycle();
679        ++NumStalls;
680      } else {
681        // Otherwise, we have no instructions to issue and we have instructions
682        // that will fault if we don't do this right.  This is the case for
683        // processors without pipeline interlocks and other cases.
684        DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
685        HazardRec->EmitNoop();
686        Sequence.push_back(0);   // NULL here means noop
687        ++NumNoops;
688      }
689
690      ++CurCycle;
691      CycleHasInsts = false;
692    }
693  }
694
695#ifndef NDEBUG
696  VerifySchedule(/*isBottomUp=*/false);
697#endif
698}
699
700//===----------------------------------------------------------------------===//
701//                         Public Constructor Functions
702//===----------------------------------------------------------------------===//
703
704FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
705  return new PostRAScheduler(OptLevel);
706}
707