PostRASchedulerList.cpp revision 4c3715c2e5e17d7216a96ac2baf9720630f04408
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
24#include "ScheduleDAGInstrs.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
28#include "llvm/CodeGen/MachineDominators.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunctionPass.h"
31#include "llvm/CodeGen/MachineLoopInfo.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
34#include "llvm/Analysis/AliasAnalysis.h"
35#include "llvm/Target/TargetLowering.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
39#include "llvm/Target/TargetSubtarget.h"
40#include "llvm/Support/Compiler.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/raw_ostream.h"
44#include "llvm/ADT/Statistic.h"
45#include <map>
46#include <set>
47using namespace llvm;
48
49STATISTIC(NumNoops, "Number of noops inserted");
50STATISTIC(NumStalls, "Number of pipeline stalls");
51
52// Post-RA scheduling is enabled with
53// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
54// override the target.
55static cl::opt<bool>
56EnablePostRAScheduler("post-RA-scheduler",
57                       cl::desc("Enable scheduling after register allocation"),
58                       cl::init(false), cl::Hidden);
59static cl::opt<bool>
60EnableAntiDepBreaking("break-anti-dependencies",
61                      cl::desc("Break post-RA scheduling anti-dependencies"),
62                      cl::init(true), cl::Hidden);
63static cl::opt<bool>
64EnablePostRAHazardAvoidance("avoid-hazards",
65                      cl::desc("Enable exact hazard avoidance"),
66                      cl::init(true), cl::Hidden);
67
68// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69static cl::opt<int>
70DebugDiv("postra-sched-debugdiv",
71                      cl::desc("Debug control MBBs that are scheduled"),
72                      cl::init(0), cl::Hidden);
73static cl::opt<int>
74DebugMod("postra-sched-debugmod",
75                      cl::desc("Debug control MBBs that are scheduled"),
76                      cl::init(0), cl::Hidden);
77
78namespace {
79  class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
80    AliasAnalysis *AA;
81    CodeGenOpt::Level OptLevel;
82
83  public:
84    static char ID;
85    PostRAScheduler(CodeGenOpt::Level ol) :
86      MachineFunctionPass(&ID), OptLevel(ol) {}
87
88    void getAnalysisUsage(AnalysisUsage &AU) const {
89      AU.setPreservesCFG();
90      AU.addRequired<AliasAnalysis>();
91      AU.addRequired<MachineDominatorTree>();
92      AU.addPreserved<MachineDominatorTree>();
93      AU.addRequired<MachineLoopInfo>();
94      AU.addPreserved<MachineLoopInfo>();
95      MachineFunctionPass::getAnalysisUsage(AU);
96    }
97
98    const char *getPassName() const {
99      return "Post RA top-down list latency scheduler";
100    }
101
102    bool runOnMachineFunction(MachineFunction &Fn);
103  };
104  char PostRAScheduler::ID = 0;
105
106  class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
107    /// AvailableQueue - The priority queue to use for the available SUnits.
108    ///
109    LatencyPriorityQueue AvailableQueue;
110
111    /// PendingQueue - This contains all of the instructions whose operands have
112    /// been issued, but their results are not ready yet (due to the latency of
113    /// the operation).  Once the operands becomes available, the instruction is
114    /// added to the AvailableQueue.
115    std::vector<SUnit*> PendingQueue;
116
117    /// Topo - A topological ordering for SUnits.
118    ScheduleDAGTopologicalSort Topo;
119
120    /// AllocatableSet - The set of allocatable registers.
121    /// We'll be ignoring anti-dependencies on non-allocatable registers,
122    /// because they may not be safe to break.
123    const BitVector AllocatableSet;
124
125    /// HazardRec - The hazard recognizer to use.
126    ScheduleHazardRecognizer *HazardRec;
127
128    /// AA - AliasAnalysis for making memory reference queries.
129    AliasAnalysis *AA;
130
131    /// AntiDepMode - Anti-dependence breaking mode
132    TargetSubtarget::AntiDepBreakMode AntiDepMode;
133
134    /// Classes - For live regs that are only used in one register class in a
135    /// live range, the register class. If the register is not live, the
136    /// corresponding value is null. If the register is live but used in
137    /// multiple register classes, the corresponding value is -1 casted to a
138    /// pointer.
139    const TargetRegisterClass *
140      Classes[TargetRegisterInfo::FirstVirtualRegister];
141
142    /// RegRegs - Map registers to all their references within a live range.
143    std::multimap<unsigned, MachineOperand *> RegRefs;
144
145    /// KillIndices - The index of the most recent kill (proceding bottom-up),
146    /// or ~0u if the register is not live.
147    unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
148
149    /// DefIndices - The index of the most recent complete def (proceding bottom
150    /// up), or ~0u if the register is live.
151    unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
152
153    /// KeepRegs - A set of registers which are live and cannot be changed to
154    /// break anti-dependencies.
155    SmallSet<unsigned, 4> KeepRegs;
156
157  public:
158    SchedulePostRATDList(MachineFunction &MF,
159                         const MachineLoopInfo &MLI,
160                         const MachineDominatorTree &MDT,
161                         ScheduleHazardRecognizer *HR,
162                         AliasAnalysis *aa,
163                         TargetSubtarget::AntiDepBreakMode adm)
164      : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
165        AllocatableSet(TRI->getAllocatableSet(MF)),
166      HazardRec(HR), AA(aa), AntiDepMode(adm) {}
167
168    ~SchedulePostRATDList() {
169      delete HazardRec;
170    }
171
172    /// StartBlock - Initialize register live-range state for scheduling in
173    /// this block.
174    ///
175    void StartBlock(MachineBasicBlock *BB);
176
177    /// Schedule - Schedule the instruction range using list scheduling.
178    ///
179    void Schedule();
180
181    /// FixupKills - Fix register kill flags that have been made
182    /// invalid due to scheduling
183    ///
184    void FixupKills(MachineBasicBlock *MBB);
185
186    /// Observe - Update liveness information to account for the current
187    /// instruction, which will not be scheduled.
188    ///
189    void Observe(MachineInstr *MI, unsigned Count);
190
191    /// FinishBlock - Clean up register live-range state.
192    ///
193    void FinishBlock();
194
195  private:
196    void PrescanInstruction(MachineInstr *MI);
197    void ScanInstruction(MachineInstr *MI, unsigned Count);
198    void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
199    void ReleaseSuccessors(SUnit *SU);
200    void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
201    void ListScheduleTopDown();
202    bool BreakAntiDependencies();
203    unsigned findSuitableFreeRegister(unsigned AntiDepReg,
204                                      unsigned LastNewReg,
205                                      const TargetRegisterClass *);
206    void StartBlockForKills(MachineBasicBlock *BB);
207
208    // ToggleKillFlag - Toggle a register operand kill flag. Other
209    // adjustments may be made to the instruction if necessary. Return
210    // true if the operand has been deleted, false if not.
211    bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
212  };
213}
214
215/// isSchedulingBoundary - Test if the given instruction should be
216/// considered a scheduling boundary. This primarily includes labels
217/// and terminators.
218///
219static bool isSchedulingBoundary(const MachineInstr *MI,
220                                 const MachineFunction &MF) {
221  // Terminators and labels can't be scheduled around.
222  if (MI->getDesc().isTerminator() || MI->isLabel())
223    return true;
224
225  // Don't attempt to schedule around any instruction that modifies
226  // a stack-oriented pointer, as it's unlikely to be profitable. This
227  // saves compile time, because it doesn't require every single
228  // stack slot reference to depend on the instruction that does the
229  // modification.
230  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
231  if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
232    return true;
233
234  return false;
235}
236
237bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
238  AA = &getAnalysis<AliasAnalysis>();
239
240  // Check for explicit enable/disable of post-ra scheduling.
241  TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
242  if (EnablePostRAScheduler.getPosition() > 0) {
243    if (!EnablePostRAScheduler)
244      return false;
245  } else {
246    // Check that post-RA scheduling is enabled for this target.
247    const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
248    if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
249      return false;
250  }
251
252  // Check for antidep breaking override...
253  if (EnableAntiDepBreaking.getPosition() > 0) {
254    AntiDepMode = (EnableAntiDepBreaking) ?
255      TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
256  }
257
258  DEBUG(errs() << "PostRAScheduler\n");
259
260  const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
261  const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
262  const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
263  ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
264    (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
265    (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
266
267  SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA, AntiDepMode);
268
269  // Loop over all of the basic blocks
270  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
271       MBB != MBBe; ++MBB) {
272#ifndef NDEBUG
273    // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
274    if (DebugDiv > 0) {
275      static int bbcnt = 0;
276      if (bbcnt++ % DebugDiv != DebugMod)
277        continue;
278      errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
279        ":MBB ID#" << MBB->getNumber() << " ***\n";
280    }
281#endif
282
283    // Initialize register live-range state for scheduling in this block.
284    Scheduler.StartBlock(MBB);
285
286    // Schedule each sequence of instructions not interrupted by a label
287    // or anything else that effectively needs to shut down scheduling.
288    MachineBasicBlock::iterator Current = MBB->end();
289    unsigned Count = MBB->size(), CurrentCount = Count;
290    for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
291      MachineInstr *MI = prior(I);
292      if (isSchedulingBoundary(MI, Fn)) {
293        Scheduler.Run(MBB, I, Current, CurrentCount);
294        Scheduler.EmitSchedule(0);
295        Current = MI;
296        CurrentCount = Count - 1;
297        Scheduler.Observe(MI, CurrentCount);
298      }
299      I = MI;
300      --Count;
301    }
302    assert(Count == 0 && "Instruction count mismatch!");
303    assert((MBB->begin() == Current || CurrentCount != 0) &&
304           "Instruction count mismatch!");
305    Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
306    Scheduler.EmitSchedule(0);
307
308    // Clean up register live-range state.
309    Scheduler.FinishBlock();
310
311    // Update register kills
312    Scheduler.FixupKills(MBB);
313  }
314
315  return true;
316}
317
318/// StartBlock - Initialize register live-range state for scheduling in
319/// this block.
320///
321void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
322  // Call the superclass.
323  ScheduleDAGInstrs::StartBlock(BB);
324
325  // Reset the hazard recognizer.
326  HazardRec->Reset();
327
328  // Clear out the register class data.
329  std::fill(Classes, array_endof(Classes),
330            static_cast<const TargetRegisterClass *>(0));
331
332  // Initialize the indices to indicate that no registers are live.
333  std::fill(KillIndices, array_endof(KillIndices), ~0u);
334  std::fill(DefIndices, array_endof(DefIndices), BB->size());
335
336  // Clear "do not change" set.
337  KeepRegs.clear();
338
339  bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
340
341  // Determine the live-out physregs for this block.
342  if (IsReturnBlock) {
343    // In a return block, examine the function live-out regs.
344    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
345         E = MRI.liveout_end(); I != E; ++I) {
346      unsigned Reg = *I;
347      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
348      KillIndices[Reg] = BB->size();
349      DefIndices[Reg] = ~0u;
350      // Repeat, for all aliases.
351      for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
352        unsigned AliasReg = *Alias;
353        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
354        KillIndices[AliasReg] = BB->size();
355        DefIndices[AliasReg] = ~0u;
356      }
357    }
358  } else {
359    // In a non-return block, examine the live-in regs of all successors.
360    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
361         SE = BB->succ_end(); SI != SE; ++SI)
362      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
363           E = (*SI)->livein_end(); I != E; ++I) {
364        unsigned Reg = *I;
365        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
366        KillIndices[Reg] = BB->size();
367        DefIndices[Reg] = ~0u;
368        // Repeat, for all aliases.
369        for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
370          unsigned AliasReg = *Alias;
371          Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
372          KillIndices[AliasReg] = BB->size();
373          DefIndices[AliasReg] = ~0u;
374        }
375      }
376  }
377
378  // Mark live-out callee-saved registers. In a return block this is
379  // all callee-saved registers. In non-return this is any
380  // callee-saved register that is not saved in the prolog.
381  const MachineFrameInfo *MFI = MF.getFrameInfo();
382  BitVector Pristine = MFI->getPristineRegs(BB);
383  for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
384    unsigned Reg = *I;
385    if (!IsReturnBlock && !Pristine.test(Reg)) continue;
386    Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
387    KillIndices[Reg] = BB->size();
388    DefIndices[Reg] = ~0u;
389    // Repeat, for all aliases.
390    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
391      unsigned AliasReg = *Alias;
392      Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
393      KillIndices[AliasReg] = BB->size();
394      DefIndices[AliasReg] = ~0u;
395    }
396  }
397}
398
399/// Schedule - Schedule the instruction range using list scheduling.
400///
401void SchedulePostRATDList::Schedule() {
402  DEBUG(errs() << "********** List Scheduling **********\n");
403
404  // Build the scheduling graph.
405  BuildSchedGraph(AA);
406
407  if (AntiDepMode != TargetSubtarget::ANTIDEP_NONE) {
408    if (BreakAntiDependencies()) {
409      // We made changes. Update the dependency graph.
410      // Theoretically we could update the graph in place:
411      // When a live range is changed to use a different register, remove
412      // the def's anti-dependence *and* output-dependence edges due to
413      // that register, and add new anti-dependence and output-dependence
414      // edges based on the next live range of the register.
415      SUnits.clear();
416      EntrySU = SUnit();
417      ExitSU = SUnit();
418      BuildSchedGraph(AA);
419    }
420  }
421
422  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
423          SUnits[su].dumpAll(this));
424
425  AvailableQueue.initNodes(SUnits);
426
427  ListScheduleTopDown();
428
429  AvailableQueue.releaseState();
430}
431
432/// Observe - Update liveness information to account for the current
433/// instruction, which will not be scheduled.
434///
435void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
436  assert(Count < InsertPosIndex && "Instruction index out of expected range!");
437
438  // Any register which was defined within the previous scheduling region
439  // may have been rescheduled and its lifetime may overlap with registers
440  // in ways not reflected in our current liveness state. For each such
441  // register, adjust the liveness state to be conservatively correct.
442  for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
443    if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
444      assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
445      // Mark this register to be non-renamable.
446      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
447      // Move the def index to the end of the previous region, to reflect
448      // that the def could theoretically have been scheduled at the end.
449      DefIndices[Reg] = InsertPosIndex;
450    }
451
452  PrescanInstruction(MI);
453  ScanInstruction(MI, Count);
454}
455
456/// FinishBlock - Clean up register live-range state.
457///
458void SchedulePostRATDList::FinishBlock() {
459  RegRefs.clear();
460
461  // Call the superclass.
462  ScheduleDAGInstrs::FinishBlock();
463}
464
465/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
466/// critical path.
467static SDep *CriticalPathStep(SUnit *SU) {
468  SDep *Next = 0;
469  unsigned NextDepth = 0;
470  // Find the predecessor edge with the greatest depth.
471  for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
472       P != PE; ++P) {
473    SUnit *PredSU = P->getSUnit();
474    unsigned PredLatency = P->getLatency();
475    unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
476    // In the case of a latency tie, prefer an anti-dependency edge over
477    // other types of edges.
478    if (NextDepth < PredTotalLatency ||
479        (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
480      NextDepth = PredTotalLatency;
481      Next = &*P;
482    }
483  }
484  return Next;
485}
486
487void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
488  // Scan the register operands for this instruction and update
489  // Classes and RegRefs.
490  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
491    MachineOperand &MO = MI->getOperand(i);
492    if (!MO.isReg()) continue;
493    unsigned Reg = MO.getReg();
494    if (Reg == 0) continue;
495    const TargetRegisterClass *NewRC = 0;
496
497    if (i < MI->getDesc().getNumOperands())
498      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
499
500    // For now, only allow the register to be changed if its register
501    // class is consistent across all uses.
502    if (!Classes[Reg] && NewRC)
503      Classes[Reg] = NewRC;
504    else if (!NewRC || Classes[Reg] != NewRC)
505      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
506
507    // Now check for aliases.
508    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
509      // If an alias of the reg is used during the live range, give up.
510      // Note that this allows us to skip checking if AntiDepReg
511      // overlaps with any of the aliases, among other things.
512      unsigned AliasReg = *Alias;
513      if (Classes[AliasReg]) {
514        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
515        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
516      }
517    }
518
519    // If we're still willing to consider this register, note the reference.
520    if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
521      RegRefs.insert(std::make_pair(Reg, &MO));
522
523    // It's not safe to change register allocation for source operands of
524    // that have special allocation requirements.
525    if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
526      if (KeepRegs.insert(Reg)) {
527        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
528             *Subreg; ++Subreg)
529          KeepRegs.insert(*Subreg);
530      }
531    }
532  }
533}
534
535void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
536                                           unsigned Count) {
537  // Update liveness.
538  // Proceding upwards, registers that are defed but not used in this
539  // instruction are now dead.
540  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
541    MachineOperand &MO = MI->getOperand(i);
542    if (!MO.isReg()) continue;
543    unsigned Reg = MO.getReg();
544    if (Reg == 0) continue;
545    if (!MO.isDef()) continue;
546    // Ignore two-addr defs.
547    if (MI->isRegTiedToUseOperand(i)) continue;
548
549    DefIndices[Reg] = Count;
550    KillIndices[Reg] = ~0u;
551    assert(((KillIndices[Reg] == ~0u) !=
552            (DefIndices[Reg] == ~0u)) &&
553           "Kill and Def maps aren't consistent for Reg!");
554    KeepRegs.erase(Reg);
555    Classes[Reg] = 0;
556    RegRefs.erase(Reg);
557    // Repeat, for all subregs.
558    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
559         *Subreg; ++Subreg) {
560      unsigned SubregReg = *Subreg;
561      DefIndices[SubregReg] = Count;
562      KillIndices[SubregReg] = ~0u;
563      KeepRegs.erase(SubregReg);
564      Classes[SubregReg] = 0;
565      RegRefs.erase(SubregReg);
566    }
567    // Conservatively mark super-registers as unusable.
568    for (const unsigned *Super = TRI->getSuperRegisters(Reg);
569         *Super; ++Super) {
570      unsigned SuperReg = *Super;
571      Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
572    }
573  }
574  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
575    MachineOperand &MO = MI->getOperand(i);
576    if (!MO.isReg()) continue;
577    unsigned Reg = MO.getReg();
578    if (Reg == 0) continue;
579    if (!MO.isUse()) continue;
580
581    const TargetRegisterClass *NewRC = 0;
582    if (i < MI->getDesc().getNumOperands())
583      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
584
585    // For now, only allow the register to be changed if its register
586    // class is consistent across all uses.
587    if (!Classes[Reg] && NewRC)
588      Classes[Reg] = NewRC;
589    else if (!NewRC || Classes[Reg] != NewRC)
590      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
591
592    RegRefs.insert(std::make_pair(Reg, &MO));
593
594    // It wasn't previously live but now it is, this is a kill.
595    if (KillIndices[Reg] == ~0u) {
596      KillIndices[Reg] = Count;
597      DefIndices[Reg] = ~0u;
598          assert(((KillIndices[Reg] == ~0u) !=
599                  (DefIndices[Reg] == ~0u)) &&
600               "Kill and Def maps aren't consistent for Reg!");
601    }
602    // Repeat, for all aliases.
603    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
604      unsigned AliasReg = *Alias;
605      if (KillIndices[AliasReg] == ~0u) {
606        KillIndices[AliasReg] = Count;
607        DefIndices[AliasReg] = ~0u;
608      }
609    }
610  }
611}
612
613unsigned
614SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
615                                               unsigned LastNewReg,
616                                               const TargetRegisterClass *RC) {
617  for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
618       RE = RC->allocation_order_end(MF); R != RE; ++R) {
619    unsigned NewReg = *R;
620    // Don't replace a register with itself.
621    if (NewReg == AntiDepReg) continue;
622    // Don't replace a register with one that was recently used to repair
623    // an anti-dependence with this AntiDepReg, because that would
624    // re-introduce that anti-dependence.
625    if (NewReg == LastNewReg) continue;
626    // If NewReg is dead and NewReg's most recent def is not before
627    // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
628    assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
629           "Kill and Def maps aren't consistent for AntiDepReg!");
630    assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
631           "Kill and Def maps aren't consistent for NewReg!");
632    if (KillIndices[NewReg] != ~0u ||
633        Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
634        KillIndices[AntiDepReg] > DefIndices[NewReg])
635      continue;
636    return NewReg;
637  }
638
639  // No registers are free and available!
640  return 0;
641}
642
643/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
644/// of the ScheduleDAG and break them by renaming registers.
645///
646bool SchedulePostRATDList::BreakAntiDependencies() {
647  // The code below assumes that there is at least one instruction,
648  // so just duck out immediately if the block is empty.
649  if (SUnits.empty()) return false;
650
651  // Find the node at the bottom of the critical path.
652  SUnit *Max = 0;
653  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
654    SUnit *SU = &SUnits[i];
655    if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
656      Max = SU;
657  }
658
659#ifndef NDEBUG
660  {
661    DEBUG(errs() << "Critical path has total latency "
662          << (Max->getDepth() + Max->Latency) << "\n");
663    DEBUG(errs() << "Available regs:");
664    for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
665      if (KillIndices[Reg] == ~0u)
666        DEBUG(errs() << " " << TRI->getName(Reg));
667    }
668    DEBUG(errs() << '\n');
669  }
670#endif
671
672  // Track progress along the critical path through the SUnit graph as we walk
673  // the instructions.
674  SUnit *CriticalPathSU = Max;
675  MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
676
677  // Consider this pattern:
678  //   A = ...
679  //   ... = A
680  //   A = ...
681  //   ... = A
682  //   A = ...
683  //   ... = A
684  //   A = ...
685  //   ... = A
686  // There are three anti-dependencies here, and without special care,
687  // we'd break all of them using the same register:
688  //   A = ...
689  //   ... = A
690  //   B = ...
691  //   ... = B
692  //   B = ...
693  //   ... = B
694  //   B = ...
695  //   ... = B
696  // because at each anti-dependence, B is the first register that
697  // isn't A which is free.  This re-introduces anti-dependencies
698  // at all but one of the original anti-dependencies that we were
699  // trying to break.  To avoid this, keep track of the most recent
700  // register that each register was replaced with, avoid
701  // using it to repair an anti-dependence on the same register.
702  // This lets us produce this:
703  //   A = ...
704  //   ... = A
705  //   B = ...
706  //   ... = B
707  //   C = ...
708  //   ... = C
709  //   B = ...
710  //   ... = B
711  // This still has an anti-dependence on B, but at least it isn't on the
712  // original critical path.
713  //
714  // TODO: If we tracked more than one register here, we could potentially
715  // fix that remaining critical edge too. This is a little more involved,
716  // because unlike the most recent register, less recent registers should
717  // still be considered, though only if no other registers are available.
718  unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
719
720  // Attempt to break anti-dependence edges on the critical path. Walk the
721  // instructions from the bottom up, tracking information about liveness
722  // as we go to help determine which registers are available.
723  bool Changed = false;
724  unsigned Count = InsertPosIndex - 1;
725  for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
726       I != E; --Count) {
727    MachineInstr *MI = --I;
728
729    // Check if this instruction has a dependence on the critical path that
730    // is an anti-dependence that we may be able to break. If it is, set
731    // AntiDepReg to the non-zero register associated with the anti-dependence.
732    //
733    // We limit our attention to the critical path as a heuristic to avoid
734    // breaking anti-dependence edges that aren't going to significantly
735    // impact the overall schedule. There are a limited number of registers
736    // and we want to save them for the important edges.
737    //
738    // TODO: Instructions with multiple defs could have multiple
739    // anti-dependencies. The current code here only knows how to break one
740    // edge per instruction. Note that we'd have to be able to break all of
741    // the anti-dependencies in an instruction in order to be effective.
742    unsigned AntiDepReg = 0;
743    if (MI == CriticalPathMI) {
744      if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
745        SUnit *NextSU = Edge->getSUnit();
746
747        // Only consider anti-dependence edges.
748        if (Edge->getKind() == SDep::Anti) {
749          AntiDepReg = Edge->getReg();
750          assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
751          if (!AllocatableSet.test(AntiDepReg))
752            // Don't break anti-dependencies on non-allocatable registers.
753            AntiDepReg = 0;
754          else if (KeepRegs.count(AntiDepReg))
755            // Don't break anti-dependencies if an use down below requires
756            // this exact register.
757            AntiDepReg = 0;
758          else {
759            // If the SUnit has other dependencies on the SUnit that it
760            // anti-depends on, don't bother breaking the anti-dependency
761            // since those edges would prevent such units from being
762            // scheduled past each other regardless.
763            //
764            // Also, if there are dependencies on other SUnits with the
765            // same register as the anti-dependency, don't attempt to
766            // break it.
767            for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
768                 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
769              if (P->getSUnit() == NextSU ?
770                    (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
771                    (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
772                AntiDepReg = 0;
773                break;
774              }
775          }
776        }
777        CriticalPathSU = NextSU;
778        CriticalPathMI = CriticalPathSU->getInstr();
779      } else {
780        // We've reached the end of the critical path.
781        CriticalPathSU = 0;
782        CriticalPathMI = 0;
783      }
784    }
785
786    PrescanInstruction(MI);
787
788    if (MI->getDesc().hasExtraDefRegAllocReq())
789      // If this instruction's defs have special allocation requirement, don't
790      // break this anti-dependency.
791      AntiDepReg = 0;
792    else if (AntiDepReg) {
793      // If this instruction has a use of AntiDepReg, breaking it
794      // is invalid.
795      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
796        MachineOperand &MO = MI->getOperand(i);
797        if (!MO.isReg()) continue;
798        unsigned Reg = MO.getReg();
799        if (Reg == 0) continue;
800        if (MO.isUse() && AntiDepReg == Reg) {
801          AntiDepReg = 0;
802          break;
803        }
804      }
805    }
806
807    // Determine AntiDepReg's register class, if it is live and is
808    // consistently used within a single class.
809    const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
810    assert((AntiDepReg == 0 || RC != NULL) &&
811           "Register should be live if it's causing an anti-dependence!");
812    if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
813      AntiDepReg = 0;
814
815    // Look for a suitable register to use to break the anti-depenence.
816    //
817    // TODO: Instead of picking the first free register, consider which might
818    // be the best.
819    if (AntiDepReg != 0) {
820      if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
821                                                     LastNewReg[AntiDepReg],
822                                                     RC)) {
823        DEBUG(errs() << "Breaking anti-dependence edge on "
824              << TRI->getName(AntiDepReg)
825              << " with " << RegRefs.count(AntiDepReg) << " references"
826              << " using " << TRI->getName(NewReg) << "!\n");
827
828        // Update the references to the old register to refer to the new
829        // register.
830        std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
831                  std::multimap<unsigned, MachineOperand *>::iterator>
832           Range = RegRefs.equal_range(AntiDepReg);
833        for (std::multimap<unsigned, MachineOperand *>::iterator
834             Q = Range.first, QE = Range.second; Q != QE; ++Q)
835          Q->second->setReg(NewReg);
836
837        // We just went back in time and modified history; the
838        // liveness information for the anti-depenence reg is now
839        // inconsistent. Set the state as if it were dead.
840        Classes[NewReg] = Classes[AntiDepReg];
841        DefIndices[NewReg] = DefIndices[AntiDepReg];
842        KillIndices[NewReg] = KillIndices[AntiDepReg];
843        assert(((KillIndices[NewReg] == ~0u) !=
844                (DefIndices[NewReg] == ~0u)) &&
845             "Kill and Def maps aren't consistent for NewReg!");
846
847        Classes[AntiDepReg] = 0;
848        DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
849        KillIndices[AntiDepReg] = ~0u;
850        assert(((KillIndices[AntiDepReg] == ~0u) !=
851                (DefIndices[AntiDepReg] == ~0u)) &&
852             "Kill and Def maps aren't consistent for AntiDepReg!");
853
854        RegRefs.erase(AntiDepReg);
855        Changed = true;
856        LastNewReg[AntiDepReg] = NewReg;
857      }
858    }
859
860    ScanInstruction(MI, Count);
861  }
862
863  return Changed;
864}
865
866/// StartBlockForKills - Initialize register live-range state for updating kills
867///
868void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
869  // Initialize the indices to indicate that no registers are live.
870  std::fill(KillIndices, array_endof(KillIndices), ~0u);
871
872  // Determine the live-out physregs for this block.
873  if (!BB->empty() && BB->back().getDesc().isReturn()) {
874    // In a return block, examine the function live-out regs.
875    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
876           E = MRI.liveout_end(); I != E; ++I) {
877      unsigned Reg = *I;
878      KillIndices[Reg] = BB->size();
879      // Repeat, for all subregs.
880      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
881           *Subreg; ++Subreg) {
882        KillIndices[*Subreg] = BB->size();
883      }
884    }
885  }
886  else {
887    // In a non-return block, examine the live-in regs of all successors.
888    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
889           SE = BB->succ_end(); SI != SE; ++SI) {
890      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
891             E = (*SI)->livein_end(); I != E; ++I) {
892        unsigned Reg = *I;
893        KillIndices[Reg] = BB->size();
894        // Repeat, for all subregs.
895        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
896             *Subreg; ++Subreg) {
897          KillIndices[*Subreg] = BB->size();
898        }
899      }
900    }
901  }
902}
903
904bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
905                                          MachineOperand &MO) {
906  // Setting kill flag...
907  if (!MO.isKill()) {
908    MO.setIsKill(true);
909    return false;
910  }
911
912  // If MO itself is live, clear the kill flag...
913  if (KillIndices[MO.getReg()] != ~0u) {
914    MO.setIsKill(false);
915    return false;
916  }
917
918  // If any subreg of MO is live, then create an imp-def for that
919  // subreg and keep MO marked as killed.
920  MO.setIsKill(false);
921  bool AllDead = true;
922  const unsigned SuperReg = MO.getReg();
923  for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
924       *Subreg; ++Subreg) {
925    if (KillIndices[*Subreg] != ~0u) {
926      MI->addOperand(MachineOperand::CreateReg(*Subreg,
927                                               true  /*IsDef*/,
928                                               true  /*IsImp*/,
929                                               false /*IsKill*/,
930                                               false /*IsDead*/));
931      AllDead = false;
932    }
933  }
934
935  if(AllDead)
936    MO.setIsKill(true);
937  return false;
938}
939
940/// FixupKills - Fix the register kill flags, they may have been made
941/// incorrect by instruction reordering.
942///
943void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
944  DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
945
946  std::set<unsigned> killedRegs;
947  BitVector ReservedRegs = TRI->getReservedRegs(MF);
948
949  StartBlockForKills(MBB);
950
951  // Examine block from end to start...
952  unsigned Count = MBB->size();
953  for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
954       I != E; --Count) {
955    MachineInstr *MI = --I;
956
957    // Update liveness.  Registers that are defed but not used in this
958    // instruction are now dead. Mark register and all subregs as they
959    // are completely defined.
960    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
961      MachineOperand &MO = MI->getOperand(i);
962      if (!MO.isReg()) continue;
963      unsigned Reg = MO.getReg();
964      if (Reg == 0) continue;
965      if (!MO.isDef()) continue;
966      // Ignore two-addr defs.
967      if (MI->isRegTiedToUseOperand(i)) continue;
968
969      KillIndices[Reg] = ~0u;
970
971      // Repeat for all subregs.
972      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
973           *Subreg; ++Subreg) {
974        KillIndices[*Subreg] = ~0u;
975      }
976    }
977
978    // Examine all used registers and set/clear kill flag. When a
979    // register is used multiple times we only set the kill flag on
980    // the first use.
981    killedRegs.clear();
982    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
983      MachineOperand &MO = MI->getOperand(i);
984      if (!MO.isReg() || !MO.isUse()) continue;
985      unsigned Reg = MO.getReg();
986      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
987
988      bool kill = false;
989      if (killedRegs.find(Reg) == killedRegs.end()) {
990        kill = true;
991        // A register is not killed if any subregs are live...
992        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
993             *Subreg; ++Subreg) {
994          if (KillIndices[*Subreg] != ~0u) {
995            kill = false;
996            break;
997          }
998        }
999
1000        // If subreg is not live, then register is killed if it became
1001        // live in this instruction
1002        if (kill)
1003          kill = (KillIndices[Reg] == ~0u);
1004      }
1005
1006      if (MO.isKill() != kill) {
1007        bool removed = ToggleKillFlag(MI, MO);
1008        if (removed) {
1009          DEBUG(errs() << "Fixed <removed> in ");
1010        } else {
1011          DEBUG(errs() << "Fixed " << MO << " in ");
1012        }
1013        DEBUG(MI->dump());
1014      }
1015
1016      killedRegs.insert(Reg);
1017    }
1018
1019    // Mark any used register (that is not using undef) and subregs as
1020    // now live...
1021    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1022      MachineOperand &MO = MI->getOperand(i);
1023      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1024      unsigned Reg = MO.getReg();
1025      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1026
1027      KillIndices[Reg] = Count;
1028
1029      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1030           *Subreg; ++Subreg) {
1031        KillIndices[*Subreg] = Count;
1032      }
1033    }
1034  }
1035}
1036
1037//===----------------------------------------------------------------------===//
1038//  Top-Down Scheduling
1039//===----------------------------------------------------------------------===//
1040
1041/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1042/// the PendingQueue if the count reaches zero. Also update its cycle bound.
1043void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1044  SUnit *SuccSU = SuccEdge->getSUnit();
1045
1046#ifndef NDEBUG
1047  if (SuccSU->NumPredsLeft == 0) {
1048    errs() << "*** Scheduling failed! ***\n";
1049    SuccSU->dump(this);
1050    errs() << " has been released too many times!\n";
1051    llvm_unreachable(0);
1052  }
1053#endif
1054  --SuccSU->NumPredsLeft;
1055
1056  // Compute how many cycles it will be before this actually becomes
1057  // available.  This is the max of the start time of all predecessors plus
1058  // their latencies.
1059  SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
1060
1061  // If all the node's predecessors are scheduled, this node is ready
1062  // to be scheduled. Ignore the special ExitSU node.
1063  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
1064    PendingQueue.push_back(SuccSU);
1065}
1066
1067/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1068void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1069  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1070       I != E; ++I)
1071    ReleaseSucc(SU, &*I);
1072}
1073
1074/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1075/// count of its successors. If a successor pending count is zero, add it to
1076/// the Available queue.
1077void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1078  DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
1079  DEBUG(SU->dump(this));
1080
1081  Sequence.push_back(SU);
1082  assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1083  SU->setDepthToAtLeast(CurCycle);
1084
1085  ReleaseSuccessors(SU);
1086  SU->isScheduled = true;
1087  AvailableQueue.ScheduledNode(SU);
1088}
1089
1090/// ListScheduleTopDown - The main loop of list scheduling for top-down
1091/// schedulers.
1092void SchedulePostRATDList::ListScheduleTopDown() {
1093  unsigned CurCycle = 0;
1094
1095  // Release any successors of the special Entry node.
1096  ReleaseSuccessors(&EntrySU);
1097
1098  // All leaves to Available queue.
1099  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1100    // It is available if it has no predecessors.
1101    if (SUnits[i].Preds.empty()) {
1102      AvailableQueue.push(&SUnits[i]);
1103      SUnits[i].isAvailable = true;
1104    }
1105  }
1106
1107  // In any cycle where we can't schedule any instructions, we must
1108  // stall or emit a noop, depending on the target.
1109  bool CycleHasInsts = false;
1110
1111  // While Available queue is not empty, grab the node with the highest
1112  // priority. If it is not ready put it back.  Schedule the node.
1113  std::vector<SUnit*> NotReady;
1114  Sequence.reserve(SUnits.size());
1115  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1116    // Check to see if any of the pending instructions are ready to issue.  If
1117    // so, add them to the available queue.
1118    unsigned MinDepth = ~0u;
1119    for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
1120      if (PendingQueue[i]->getDepth() <= CurCycle) {
1121        AvailableQueue.push(PendingQueue[i]);
1122        PendingQueue[i]->isAvailable = true;
1123        PendingQueue[i] = PendingQueue.back();
1124        PendingQueue.pop_back();
1125        --i; --e;
1126      } else if (PendingQueue[i]->getDepth() < MinDepth)
1127        MinDepth = PendingQueue[i]->getDepth();
1128    }
1129
1130    DEBUG(errs() << "\n*** Examining Available\n";
1131          LatencyPriorityQueue q = AvailableQueue;
1132          while (!q.empty()) {
1133            SUnit *su = q.pop();
1134            errs() << "Height " << su->getHeight() << ": ";
1135            su->dump(this);
1136          });
1137
1138    SUnit *FoundSUnit = 0;
1139
1140    bool HasNoopHazards = false;
1141    while (!AvailableQueue.empty()) {
1142      SUnit *CurSUnit = AvailableQueue.pop();
1143
1144      ScheduleHazardRecognizer::HazardType HT =
1145        HazardRec->getHazardType(CurSUnit);
1146      if (HT == ScheduleHazardRecognizer::NoHazard) {
1147        FoundSUnit = CurSUnit;
1148        break;
1149      }
1150
1151      // Remember if this is a noop hazard.
1152      HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1153
1154      NotReady.push_back(CurSUnit);
1155    }
1156
1157    // Add the nodes that aren't ready back onto the available list.
1158    if (!NotReady.empty()) {
1159      AvailableQueue.push_all(NotReady);
1160      NotReady.clear();
1161    }
1162
1163    // If we found a node to schedule, do it now.
1164    if (FoundSUnit) {
1165      ScheduleNodeTopDown(FoundSUnit, CurCycle);
1166      HazardRec->EmitInstruction(FoundSUnit);
1167      CycleHasInsts = true;
1168
1169      // If we are using the target-specific hazards, then don't
1170      // advance the cycle time just because we schedule a node. If
1171      // the target allows it we can schedule multiple nodes in the
1172      // same cycle.
1173      if (!EnablePostRAHazardAvoidance) {
1174        if (FoundSUnit->Latency)  // Don't increment CurCycle for pseudo-ops!
1175          ++CurCycle;
1176      }
1177    } else {
1178      if (CycleHasInsts) {
1179        DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1180        HazardRec->AdvanceCycle();
1181      } else if (!HasNoopHazards) {
1182        // Otherwise, we have a pipeline stall, but no other problem,
1183        // just advance the current cycle and try again.
1184        DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1185        HazardRec->AdvanceCycle();
1186        ++NumStalls;
1187      } else {
1188        // Otherwise, we have no instructions to issue and we have instructions
1189        // that will fault if we don't do this right.  This is the case for
1190        // processors without pipeline interlocks and other cases.
1191        DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1192        HazardRec->EmitNoop();
1193        Sequence.push_back(0);   // NULL here means noop
1194        ++NumNoops;
1195      }
1196
1197      ++CurCycle;
1198      CycleHasInsts = false;
1199    }
1200  }
1201
1202#ifndef NDEBUG
1203  VerifySchedule(/*isBottomUp=*/false);
1204#endif
1205}
1206
1207//===----------------------------------------------------------------------===//
1208//                         Public Constructor Functions
1209//===----------------------------------------------------------------------===//
1210
1211FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1212  return new PostRAScheduler(OptLevel);
1213}
1214