PostRASchedulerList.cpp revision c1ae8c9b8f426b74215abf0f7e46bffecc6f52d9
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
24#include "ScheduleDAGInstrs.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
28#include "llvm/CodeGen/MachineDominators.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunctionPass.h"
31#include "llvm/CodeGen/MachineLoopInfo.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
34#include "llvm/Analysis/AliasAnalysis.h"
35#include "llvm/Target/TargetLowering.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
39#include "llvm/Target/TargetSubtarget.h"
40#include "llvm/Support/Compiler.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/raw_ostream.h"
44#include "llvm/ADT/Statistic.h"
45#include <map>
46#include <set>
47using namespace llvm;
48
49STATISTIC(NumNoops, "Number of noops inserted");
50STATISTIC(NumStalls, "Number of pipeline stalls");
51
52// Post-RA scheduling is enabled with
53// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
54// override the target.
55static cl::opt<bool>
56EnablePostRAScheduler("post-RA-scheduler",
57                       cl::desc("Enable scheduling after register allocation"),
58                       cl::init(false), cl::Hidden);
59static cl::opt<bool>
60EnableAntiDepBreaking("break-anti-dependencies",
61                      cl::desc("Break post-RA scheduling anti-dependencies"),
62                      cl::init(true), cl::Hidden);
63static cl::opt<bool>
64EnablePostRAHazardAvoidance("avoid-hazards",
65                      cl::desc("Enable exact hazard avoidance"),
66                      cl::init(true), cl::Hidden);
67
68// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69static cl::opt<int>
70DebugDiv("postra-sched-debugdiv",
71                      cl::desc("Debug control MBBs that are scheduled"),
72                      cl::init(0), cl::Hidden);
73static cl::opt<int>
74DebugMod("postra-sched-debugmod",
75                      cl::desc("Debug control MBBs that are scheduled"),
76                      cl::init(0), cl::Hidden);
77
78namespace {
79  class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
80    AliasAnalysis *AA;
81    CodeGenOpt::Level OptLevel;
82
83  public:
84    static char ID;
85    PostRAScheduler(CodeGenOpt::Level ol) :
86      MachineFunctionPass(&ID), OptLevel(ol) {}
87
88    void getAnalysisUsage(AnalysisUsage &AU) const {
89      AU.setPreservesCFG();
90      AU.addRequired<AliasAnalysis>();
91      AU.addRequired<MachineDominatorTree>();
92      AU.addPreserved<MachineDominatorTree>();
93      AU.addRequired<MachineLoopInfo>();
94      AU.addPreserved<MachineLoopInfo>();
95      MachineFunctionPass::getAnalysisUsage(AU);
96    }
97
98    const char *getPassName() const {
99      return "Post RA top-down list latency scheduler";
100    }
101
102    bool runOnMachineFunction(MachineFunction &Fn);
103  };
104  char PostRAScheduler::ID = 0;
105
106  class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
107    /// AvailableQueue - The priority queue to use for the available SUnits.
108    ///
109    LatencyPriorityQueue AvailableQueue;
110
111    /// PendingQueue - This contains all of the instructions whose operands have
112    /// been issued, but their results are not ready yet (due to the latency of
113    /// the operation).  Once the operands becomes available, the instruction is
114    /// added to the AvailableQueue.
115    std::vector<SUnit*> PendingQueue;
116
117    /// Topo - A topological ordering for SUnits.
118    ScheduleDAGTopologicalSort Topo;
119
120    /// AllocatableSet - The set of allocatable registers.
121    /// We'll be ignoring anti-dependencies on non-allocatable registers,
122    /// because they may not be safe to break.
123    const BitVector AllocatableSet;
124
125    /// HazardRec - The hazard recognizer to use.
126    ScheduleHazardRecognizer *HazardRec;
127
128    /// AA - AliasAnalysis for making memory reference queries.
129    AliasAnalysis *AA;
130
131    /// Classes - For live regs that are only used in one register class in a
132    /// live range, the register class. If the register is not live, the
133    /// corresponding value is null. If the register is live but used in
134    /// multiple register classes, the corresponding value is -1 casted to a
135    /// pointer.
136    const TargetRegisterClass *
137      Classes[TargetRegisterInfo::FirstVirtualRegister];
138
139    /// RegRegs - Map registers to all their references within a live range.
140    std::multimap<unsigned, MachineOperand *> RegRefs;
141
142    /// KillIndices - The index of the most recent kill (proceding bottom-up),
143    /// or ~0u if the register is not live.
144    unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
145
146    /// DefIndices - The index of the most recent complete def (proceding bottom
147    /// up), or ~0u if the register is live.
148    unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
149
150    /// KeepRegs - A set of registers which are live and cannot be changed to
151    /// break anti-dependencies.
152    SmallSet<unsigned, 4> KeepRegs;
153
154  public:
155    SchedulePostRATDList(MachineFunction &MF,
156                         const MachineLoopInfo &MLI,
157                         const MachineDominatorTree &MDT,
158                         ScheduleHazardRecognizer *HR,
159                         AliasAnalysis *aa)
160      : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
161        AllocatableSet(TRI->getAllocatableSet(MF)),
162        HazardRec(HR), AA(aa) {}
163
164    ~SchedulePostRATDList() {
165      delete HazardRec;
166    }
167
168    /// StartBlock - Initialize register live-range state for scheduling in
169    /// this block.
170    ///
171    void StartBlock(MachineBasicBlock *BB);
172
173    /// Schedule - Schedule the instruction range using list scheduling.
174    ///
175    void Schedule();
176
177    /// FixupKills - Fix register kill flags that have been made
178    /// invalid due to scheduling
179    ///
180    void FixupKills(MachineBasicBlock *MBB);
181
182    /// Observe - Update liveness information to account for the current
183    /// instruction, which will not be scheduled.
184    ///
185    void Observe(MachineInstr *MI, unsigned Count);
186
187    /// FinishBlock - Clean up register live-range state.
188    ///
189    void FinishBlock();
190
191  private:
192    void PrescanInstruction(MachineInstr *MI);
193    void ScanInstruction(MachineInstr *MI, unsigned Count);
194    void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
195    void ReleaseSuccessors(SUnit *SU);
196    void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
197    void ListScheduleTopDown();
198    bool BreakAntiDependencies();
199    unsigned findSuitableFreeRegister(unsigned AntiDepReg,
200                                      unsigned LastNewReg,
201                                      const TargetRegisterClass *);
202    void StartBlockForKills(MachineBasicBlock *BB);
203
204    // ToggleKillFlag - Toggle a register operand kill flag. Other
205    // adjustments may be made to the instruction if necessary. Return
206    // true if the operand has been deleted, false if not.
207    bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
208  };
209}
210
211/// isSchedulingBoundary - Test if the given instruction should be
212/// considered a scheduling boundary. This primarily includes labels
213/// and terminators.
214///
215static bool isSchedulingBoundary(const MachineInstr *MI,
216                                 const MachineFunction &MF) {
217  // Terminators and labels can't be scheduled around.
218  if (MI->getDesc().isTerminator() || MI->isLabel())
219    return true;
220
221  // Don't attempt to schedule around any instruction that modifies
222  // a stack-oriented pointer, as it's unlikely to be profitable. This
223  // saves compile time, because it doesn't require every single
224  // stack slot reference to depend on the instruction that does the
225  // modification.
226  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
227  if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
228    return true;
229
230  return false;
231}
232
233bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
234  AA = &getAnalysis<AliasAnalysis>();
235
236  // Check for explicit enable/disable of post-ra scheduling.
237  if (EnablePostRAScheduler.getPosition() > 0) {
238    if (!EnablePostRAScheduler)
239      return false;
240  } else {
241    // Check that post-RA scheduling is enabled for this target.
242    const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
243    if (!ST.enablePostRAScheduler(OptLevel))
244      return false;
245  }
246
247  DEBUG(errs() << "PostRAScheduler\n");
248
249  const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
250  const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
251  const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
252  ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
253    (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
254    (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
255
256  SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA);
257
258  // Loop over all of the basic blocks
259  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
260       MBB != MBBe; ++MBB) {
261#ifndef NDEBUG
262    // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
263    if (DebugDiv > 0) {
264      static int bbcnt = 0;
265      if (bbcnt++ % DebugDiv != DebugMod)
266        continue;
267      errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
268        ":MBB ID#" << MBB->getNumber() << " ***\n";
269    }
270#endif
271
272    // Initialize register live-range state for scheduling in this block.
273    Scheduler.StartBlock(MBB);
274
275    // Schedule each sequence of instructions not interrupted by a label
276    // or anything else that effectively needs to shut down scheduling.
277    MachineBasicBlock::iterator Current = MBB->end();
278    unsigned Count = MBB->size(), CurrentCount = Count;
279    for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
280      MachineInstr *MI = prior(I);
281      if (isSchedulingBoundary(MI, Fn)) {
282        Scheduler.Run(MBB, I, Current, CurrentCount);
283        Scheduler.EmitSchedule(0);
284        Current = MI;
285        CurrentCount = Count - 1;
286        Scheduler.Observe(MI, CurrentCount);
287      }
288      I = MI;
289      --Count;
290    }
291    assert(Count == 0 && "Instruction count mismatch!");
292    assert((MBB->begin() == Current || CurrentCount != 0) &&
293           "Instruction count mismatch!");
294    Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
295    Scheduler.EmitSchedule(0);
296
297    // Clean up register live-range state.
298    Scheduler.FinishBlock();
299
300    // Update register kills
301    Scheduler.FixupKills(MBB);
302  }
303
304  return true;
305}
306
307/// StartBlock - Initialize register live-range state for scheduling in
308/// this block.
309///
310void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
311  // Call the superclass.
312  ScheduleDAGInstrs::StartBlock(BB);
313
314  // Reset the hazard recognizer.
315  HazardRec->Reset();
316
317  // Clear out the register class data.
318  std::fill(Classes, array_endof(Classes),
319            static_cast<const TargetRegisterClass *>(0));
320
321  // Initialize the indices to indicate that no registers are live.
322  std::fill(KillIndices, array_endof(KillIndices), ~0u);
323  std::fill(DefIndices, array_endof(DefIndices), BB->size());
324
325  // Clear "do not change" set.
326  KeepRegs.clear();
327
328  bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
329
330  // Determine the live-out physregs for this block.
331  if (IsReturnBlock) {
332    // In a return block, examine the function live-out regs.
333    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
334         E = MRI.liveout_end(); I != E; ++I) {
335      unsigned Reg = *I;
336      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
337      KillIndices[Reg] = BB->size();
338      DefIndices[Reg] = ~0u;
339      // Repeat, for all aliases.
340      for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
341        unsigned AliasReg = *Alias;
342        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
343        KillIndices[AliasReg] = BB->size();
344        DefIndices[AliasReg] = ~0u;
345      }
346    }
347  } else {
348    // In a non-return block, examine the live-in regs of all successors.
349    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
350         SE = BB->succ_end(); SI != SE; ++SI)
351      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
352           E = (*SI)->livein_end(); I != E; ++I) {
353        unsigned Reg = *I;
354        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
355        KillIndices[Reg] = BB->size();
356        DefIndices[Reg] = ~0u;
357        // Repeat, for all aliases.
358        for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
359          unsigned AliasReg = *Alias;
360          Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
361          KillIndices[AliasReg] = BB->size();
362          DefIndices[AliasReg] = ~0u;
363        }
364      }
365  }
366
367  // Mark live-out callee-saved registers. In a return block this is
368  // all callee-saved registers. In non-return this is any
369  // callee-saved register that is not saved in the prolog.
370  const MachineFrameInfo *MFI = MF.getFrameInfo();
371  BitVector Pristine = MFI->getPristineRegs(BB);
372  for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
373    unsigned Reg = *I;
374    if (!IsReturnBlock && !Pristine.test(Reg)) continue;
375    Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
376    KillIndices[Reg] = BB->size();
377    DefIndices[Reg] = ~0u;
378    // Repeat, for all aliases.
379    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
380      unsigned AliasReg = *Alias;
381      Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
382      KillIndices[AliasReg] = BB->size();
383      DefIndices[AliasReg] = ~0u;
384    }
385  }
386}
387
388/// Schedule - Schedule the instruction range using list scheduling.
389///
390void SchedulePostRATDList::Schedule() {
391  DEBUG(errs() << "********** List Scheduling **********\n");
392
393  // Build the scheduling graph.
394  BuildSchedGraph(AA);
395
396  if (EnableAntiDepBreaking) {
397    if (BreakAntiDependencies()) {
398      // We made changes. Update the dependency graph.
399      // Theoretically we could update the graph in place:
400      // When a live range is changed to use a different register, remove
401      // the def's anti-dependence *and* output-dependence edges due to
402      // that register, and add new anti-dependence and output-dependence
403      // edges based on the next live range of the register.
404      SUnits.clear();
405      EntrySU = SUnit();
406      ExitSU = SUnit();
407      BuildSchedGraph(AA);
408    }
409  }
410
411  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
412          SUnits[su].dumpAll(this));
413
414  AvailableQueue.initNodes(SUnits);
415
416  ListScheduleTopDown();
417
418  AvailableQueue.releaseState();
419}
420
421/// Observe - Update liveness information to account for the current
422/// instruction, which will not be scheduled.
423///
424void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
425  assert(Count < InsertPosIndex && "Instruction index out of expected range!");
426
427  // Any register which was defined within the previous scheduling region
428  // may have been rescheduled and its lifetime may overlap with registers
429  // in ways not reflected in our current liveness state. For each such
430  // register, adjust the liveness state to be conservatively correct.
431  for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
432    if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
433      assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
434      // Mark this register to be non-renamable.
435      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
436      // Move the def index to the end of the previous region, to reflect
437      // that the def could theoretically have been scheduled at the end.
438      DefIndices[Reg] = InsertPosIndex;
439    }
440
441  PrescanInstruction(MI);
442  ScanInstruction(MI, Count);
443}
444
445/// FinishBlock - Clean up register live-range state.
446///
447void SchedulePostRATDList::FinishBlock() {
448  RegRefs.clear();
449
450  // Call the superclass.
451  ScheduleDAGInstrs::FinishBlock();
452}
453
454/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
455/// critical path.
456static SDep *CriticalPathStep(SUnit *SU) {
457  SDep *Next = 0;
458  unsigned NextDepth = 0;
459  // Find the predecessor edge with the greatest depth.
460  for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
461       P != PE; ++P) {
462    SUnit *PredSU = P->getSUnit();
463    unsigned PredLatency = P->getLatency();
464    unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
465    // In the case of a latency tie, prefer an anti-dependency edge over
466    // other types of edges.
467    if (NextDepth < PredTotalLatency ||
468        (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
469      NextDepth = PredTotalLatency;
470      Next = &*P;
471    }
472  }
473  return Next;
474}
475
476void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
477  // Scan the register operands for this instruction and update
478  // Classes and RegRefs.
479  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
480    MachineOperand &MO = MI->getOperand(i);
481    if (!MO.isReg()) continue;
482    unsigned Reg = MO.getReg();
483    if (Reg == 0) continue;
484    const TargetRegisterClass *NewRC = 0;
485
486    if (i < MI->getDesc().getNumOperands())
487      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
488
489    // For now, only allow the register to be changed if its register
490    // class is consistent across all uses.
491    if (!Classes[Reg] && NewRC)
492      Classes[Reg] = NewRC;
493    else if (!NewRC || Classes[Reg] != NewRC)
494      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
495
496    // Now check for aliases.
497    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
498      // If an alias of the reg is used during the live range, give up.
499      // Note that this allows us to skip checking if AntiDepReg
500      // overlaps with any of the aliases, among other things.
501      unsigned AliasReg = *Alias;
502      if (Classes[AliasReg]) {
503        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
504        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
505      }
506    }
507
508    // If we're still willing to consider this register, note the reference.
509    if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
510      RegRefs.insert(std::make_pair(Reg, &MO));
511
512    // It's not safe to change register allocation for source operands of
513    // that have special allocation requirements.
514    if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
515      if (KeepRegs.insert(Reg)) {
516        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
517             *Subreg; ++Subreg)
518          KeepRegs.insert(*Subreg);
519      }
520    }
521  }
522}
523
524void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
525                                           unsigned Count) {
526  // Update liveness.
527  // Proceding upwards, registers that are defed but not used in this
528  // instruction are now dead.
529  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
530    MachineOperand &MO = MI->getOperand(i);
531    if (!MO.isReg()) continue;
532    unsigned Reg = MO.getReg();
533    if (Reg == 0) continue;
534    if (!MO.isDef()) continue;
535    // Ignore two-addr defs.
536    if (MI->isRegTiedToUseOperand(i)) continue;
537
538    DefIndices[Reg] = Count;
539    KillIndices[Reg] = ~0u;
540    assert(((KillIndices[Reg] == ~0u) !=
541            (DefIndices[Reg] == ~0u)) &&
542           "Kill and Def maps aren't consistent for Reg!");
543    KeepRegs.erase(Reg);
544    Classes[Reg] = 0;
545    RegRefs.erase(Reg);
546    // Repeat, for all subregs.
547    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
548         *Subreg; ++Subreg) {
549      unsigned SubregReg = *Subreg;
550      DefIndices[SubregReg] = Count;
551      KillIndices[SubregReg] = ~0u;
552      KeepRegs.erase(SubregReg);
553      Classes[SubregReg] = 0;
554      RegRefs.erase(SubregReg);
555    }
556    // Conservatively mark super-registers as unusable.
557    for (const unsigned *Super = TRI->getSuperRegisters(Reg);
558         *Super; ++Super) {
559      unsigned SuperReg = *Super;
560      Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
561    }
562  }
563  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
564    MachineOperand &MO = MI->getOperand(i);
565    if (!MO.isReg()) continue;
566    unsigned Reg = MO.getReg();
567    if (Reg == 0) continue;
568    if (!MO.isUse()) continue;
569
570    const TargetRegisterClass *NewRC = 0;
571    if (i < MI->getDesc().getNumOperands())
572      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
573
574    // For now, only allow the register to be changed if its register
575    // class is consistent across all uses.
576    if (!Classes[Reg] && NewRC)
577      Classes[Reg] = NewRC;
578    else if (!NewRC || Classes[Reg] != NewRC)
579      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
580
581    RegRefs.insert(std::make_pair(Reg, &MO));
582
583    // It wasn't previously live but now it is, this is a kill.
584    if (KillIndices[Reg] == ~0u) {
585      KillIndices[Reg] = Count;
586      DefIndices[Reg] = ~0u;
587          assert(((KillIndices[Reg] == ~0u) !=
588                  (DefIndices[Reg] == ~0u)) &&
589               "Kill and Def maps aren't consistent for Reg!");
590    }
591    // Repeat, for all aliases.
592    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
593      unsigned AliasReg = *Alias;
594      if (KillIndices[AliasReg] == ~0u) {
595        KillIndices[AliasReg] = Count;
596        DefIndices[AliasReg] = ~0u;
597      }
598    }
599  }
600}
601
602unsigned
603SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
604                                               unsigned LastNewReg,
605                                               const TargetRegisterClass *RC) {
606  for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
607       RE = RC->allocation_order_end(MF); R != RE; ++R) {
608    unsigned NewReg = *R;
609    // Don't replace a register with itself.
610    if (NewReg == AntiDepReg) continue;
611    // Don't replace a register with one that was recently used to repair
612    // an anti-dependence with this AntiDepReg, because that would
613    // re-introduce that anti-dependence.
614    if (NewReg == LastNewReg) continue;
615    // If NewReg is dead and NewReg's most recent def is not before
616    // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
617    assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
618           "Kill and Def maps aren't consistent for AntiDepReg!");
619    assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
620           "Kill and Def maps aren't consistent for NewReg!");
621    if (KillIndices[NewReg] != ~0u ||
622        Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
623        KillIndices[AntiDepReg] > DefIndices[NewReg])
624      continue;
625    return NewReg;
626  }
627
628  // No registers are free and available!
629  return 0;
630}
631
632/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
633/// of the ScheduleDAG and break them by renaming registers.
634///
635bool SchedulePostRATDList::BreakAntiDependencies() {
636  // The code below assumes that there is at least one instruction,
637  // so just duck out immediately if the block is empty.
638  if (SUnits.empty()) return false;
639
640  // Find the node at the bottom of the critical path.
641  SUnit *Max = 0;
642  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
643    SUnit *SU = &SUnits[i];
644    if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
645      Max = SU;
646  }
647
648#ifndef NDEBUG
649  {
650    DEBUG(errs() << "Critical path has total latency "
651          << (Max->getDepth() + Max->Latency) << "\n");
652    DEBUG(errs() << "Available regs:");
653    for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
654      if (KillIndices[Reg] == ~0u)
655        DEBUG(errs() << " " << TRI->getName(Reg));
656    }
657    DEBUG(errs() << '\n');
658  }
659#endif
660
661  // Track progress along the critical path through the SUnit graph as we walk
662  // the instructions.
663  SUnit *CriticalPathSU = Max;
664  MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
665
666  // Consider this pattern:
667  //   A = ...
668  //   ... = A
669  //   A = ...
670  //   ... = A
671  //   A = ...
672  //   ... = A
673  //   A = ...
674  //   ... = A
675  // There are three anti-dependencies here, and without special care,
676  // we'd break all of them using the same register:
677  //   A = ...
678  //   ... = A
679  //   B = ...
680  //   ... = B
681  //   B = ...
682  //   ... = B
683  //   B = ...
684  //   ... = B
685  // because at each anti-dependence, B is the first register that
686  // isn't A which is free.  This re-introduces anti-dependencies
687  // at all but one of the original anti-dependencies that we were
688  // trying to break.  To avoid this, keep track of the most recent
689  // register that each register was replaced with, avoid
690  // using it to repair an anti-dependence on the same register.
691  // This lets us produce this:
692  //   A = ...
693  //   ... = A
694  //   B = ...
695  //   ... = B
696  //   C = ...
697  //   ... = C
698  //   B = ...
699  //   ... = B
700  // This still has an anti-dependence on B, but at least it isn't on the
701  // original critical path.
702  //
703  // TODO: If we tracked more than one register here, we could potentially
704  // fix that remaining critical edge too. This is a little more involved,
705  // because unlike the most recent register, less recent registers should
706  // still be considered, though only if no other registers are available.
707  unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
708
709  // Attempt to break anti-dependence edges on the critical path. Walk the
710  // instructions from the bottom up, tracking information about liveness
711  // as we go to help determine which registers are available.
712  bool Changed = false;
713  unsigned Count = InsertPosIndex - 1;
714  for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
715       I != E; --Count) {
716    MachineInstr *MI = --I;
717
718    // Check if this instruction has a dependence on the critical path that
719    // is an anti-dependence that we may be able to break. If it is, set
720    // AntiDepReg to the non-zero register associated with the anti-dependence.
721    //
722    // We limit our attention to the critical path as a heuristic to avoid
723    // breaking anti-dependence edges that aren't going to significantly
724    // impact the overall schedule. There are a limited number of registers
725    // and we want to save them for the important edges.
726    //
727    // TODO: Instructions with multiple defs could have multiple
728    // anti-dependencies. The current code here only knows how to break one
729    // edge per instruction. Note that we'd have to be able to break all of
730    // the anti-dependencies in an instruction in order to be effective.
731    unsigned AntiDepReg = 0;
732    if (MI == CriticalPathMI) {
733      if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
734        SUnit *NextSU = Edge->getSUnit();
735
736        // Only consider anti-dependence edges.
737        if (Edge->getKind() == SDep::Anti) {
738          AntiDepReg = Edge->getReg();
739          assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
740          if (!AllocatableSet.test(AntiDepReg))
741            // Don't break anti-dependencies on non-allocatable registers.
742            AntiDepReg = 0;
743          else if (KeepRegs.count(AntiDepReg))
744            // Don't break anti-dependencies if an use down below requires
745            // this exact register.
746            AntiDepReg = 0;
747          else {
748            // If the SUnit has other dependencies on the SUnit that it
749            // anti-depends on, don't bother breaking the anti-dependency
750            // since those edges would prevent such units from being
751            // scheduled past each other regardless.
752            //
753            // Also, if there are dependencies on other SUnits with the
754            // same register as the anti-dependency, don't attempt to
755            // break it.
756            for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
757                 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
758              if (P->getSUnit() == NextSU ?
759                    (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
760                    (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
761                AntiDepReg = 0;
762                break;
763              }
764          }
765        }
766        CriticalPathSU = NextSU;
767        CriticalPathMI = CriticalPathSU->getInstr();
768      } else {
769        // We've reached the end of the critical path.
770        CriticalPathSU = 0;
771        CriticalPathMI = 0;
772      }
773    }
774
775    PrescanInstruction(MI);
776
777    if (MI->getDesc().hasExtraDefRegAllocReq())
778      // If this instruction's defs have special allocation requirement, don't
779      // break this anti-dependency.
780      AntiDepReg = 0;
781    else if (AntiDepReg) {
782      // If this instruction has a use of AntiDepReg, breaking it
783      // is invalid.
784      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
785        MachineOperand &MO = MI->getOperand(i);
786        if (!MO.isReg()) continue;
787        unsigned Reg = MO.getReg();
788        if (Reg == 0) continue;
789        if (MO.isUse() && AntiDepReg == Reg) {
790          AntiDepReg = 0;
791          break;
792        }
793      }
794    }
795
796    // Determine AntiDepReg's register class, if it is live and is
797    // consistently used within a single class.
798    const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
799    assert((AntiDepReg == 0 || RC != NULL) &&
800           "Register should be live if it's causing an anti-dependence!");
801    if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
802      AntiDepReg = 0;
803
804    // Look for a suitable register to use to break the anti-depenence.
805    //
806    // TODO: Instead of picking the first free register, consider which might
807    // be the best.
808    if (AntiDepReg != 0) {
809      if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
810                                                     LastNewReg[AntiDepReg],
811                                                     RC)) {
812        DEBUG(errs() << "Breaking anti-dependence edge on "
813              << TRI->getName(AntiDepReg)
814              << " with " << RegRefs.count(AntiDepReg) << " references"
815              << " using " << TRI->getName(NewReg) << "!\n");
816
817        // Update the references to the old register to refer to the new
818        // register.
819        std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
820                  std::multimap<unsigned, MachineOperand *>::iterator>
821           Range = RegRefs.equal_range(AntiDepReg);
822        for (std::multimap<unsigned, MachineOperand *>::iterator
823             Q = Range.first, QE = Range.second; Q != QE; ++Q)
824          Q->second->setReg(NewReg);
825
826        // We just went back in time and modified history; the
827        // liveness information for the anti-depenence reg is now
828        // inconsistent. Set the state as if it were dead.
829        Classes[NewReg] = Classes[AntiDepReg];
830        DefIndices[NewReg] = DefIndices[AntiDepReg];
831        KillIndices[NewReg] = KillIndices[AntiDepReg];
832        assert(((KillIndices[NewReg] == ~0u) !=
833                (DefIndices[NewReg] == ~0u)) &&
834             "Kill and Def maps aren't consistent for NewReg!");
835
836        Classes[AntiDepReg] = 0;
837        DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
838        KillIndices[AntiDepReg] = ~0u;
839        assert(((KillIndices[AntiDepReg] == ~0u) !=
840                (DefIndices[AntiDepReg] == ~0u)) &&
841             "Kill and Def maps aren't consistent for AntiDepReg!");
842
843        RegRefs.erase(AntiDepReg);
844        Changed = true;
845        LastNewReg[AntiDepReg] = NewReg;
846      }
847    }
848
849    ScanInstruction(MI, Count);
850  }
851
852  return Changed;
853}
854
855/// StartBlockForKills - Initialize register live-range state for updating kills
856///
857void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
858  // Initialize the indices to indicate that no registers are live.
859  std::fill(KillIndices, array_endof(KillIndices), ~0u);
860
861  // Determine the live-out physregs for this block.
862  if (!BB->empty() && BB->back().getDesc().isReturn()) {
863    // In a return block, examine the function live-out regs.
864    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
865           E = MRI.liveout_end(); I != E; ++I) {
866      unsigned Reg = *I;
867      KillIndices[Reg] = BB->size();
868      // Repeat, for all subregs.
869      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
870           *Subreg; ++Subreg) {
871        KillIndices[*Subreg] = BB->size();
872      }
873    }
874  }
875  else {
876    // In a non-return block, examine the live-in regs of all successors.
877    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
878           SE = BB->succ_end(); SI != SE; ++SI) {
879      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
880             E = (*SI)->livein_end(); I != E; ++I) {
881        unsigned Reg = *I;
882        KillIndices[Reg] = BB->size();
883        // Repeat, for all subregs.
884        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
885             *Subreg; ++Subreg) {
886          KillIndices[*Subreg] = BB->size();
887        }
888      }
889    }
890  }
891}
892
893bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
894                                          MachineOperand &MO) {
895  // Setting kill flag...
896  if (!MO.isKill()) {
897    MO.setIsKill(true);
898    return false;
899  }
900
901  // If MO itself is live, clear the kill flag...
902  if (KillIndices[MO.getReg()] != ~0u) {
903    MO.setIsKill(false);
904    return false;
905  }
906
907  // If any subreg of MO is live, then create an imp-def for that
908  // subreg and keep MO marked as killed.
909  MO.setIsKill(false);
910  bool AllDead = true;
911  const unsigned SuperReg = MO.getReg();
912  for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
913       *Subreg; ++Subreg) {
914    if (KillIndices[*Subreg] != ~0u) {
915      MI->addOperand(MachineOperand::CreateReg(*Subreg,
916                                               true  /*IsDef*/,
917                                               true  /*IsImp*/,
918                                               false /*IsKill*/,
919                                               false /*IsDead*/));
920      AllDead = false;
921    }
922  }
923
924  if(AllDead)
925    MO.setIsKill(true);
926  return false;
927}
928
929/// FixupKills - Fix the register kill flags, they may have been made
930/// incorrect by instruction reordering.
931///
932void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
933  DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
934
935  std::set<unsigned> killedRegs;
936  BitVector ReservedRegs = TRI->getReservedRegs(MF);
937
938  StartBlockForKills(MBB);
939
940  // Examine block from end to start...
941  unsigned Count = MBB->size();
942  for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
943       I != E; --Count) {
944    MachineInstr *MI = --I;
945
946    // Update liveness.  Registers that are defed but not used in this
947    // instruction are now dead. Mark register and all subregs as they
948    // are completely defined.
949    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
950      MachineOperand &MO = MI->getOperand(i);
951      if (!MO.isReg()) continue;
952      unsigned Reg = MO.getReg();
953      if (Reg == 0) continue;
954      if (!MO.isDef()) continue;
955      // Ignore two-addr defs.
956      if (MI->isRegTiedToUseOperand(i)) continue;
957
958      KillIndices[Reg] = ~0u;
959
960      // Repeat for all subregs.
961      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
962           *Subreg; ++Subreg) {
963        KillIndices[*Subreg] = ~0u;
964      }
965    }
966
967    // Examine all used registers and set/clear kill flag. When a
968    // register is used multiple times we only set the kill flag on
969    // the first use.
970    killedRegs.clear();
971    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
972      MachineOperand &MO = MI->getOperand(i);
973      if (!MO.isReg() || !MO.isUse()) continue;
974      unsigned Reg = MO.getReg();
975      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
976
977      bool kill = false;
978      if (killedRegs.find(Reg) == killedRegs.end()) {
979        kill = true;
980        // A register is not killed if any subregs are live...
981        for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
982             *Subreg; ++Subreg) {
983          if (KillIndices[*Subreg] != ~0u) {
984            kill = false;
985            break;
986          }
987        }
988
989        // If subreg is not live, then register is killed if it became
990        // live in this instruction
991        if (kill)
992          kill = (KillIndices[Reg] == ~0u);
993      }
994
995      if (MO.isKill() != kill) {
996        bool removed = ToggleKillFlag(MI, MO);
997        if (removed) {
998          DEBUG(errs() << "Fixed <removed> in ");
999        } else {
1000          DEBUG(errs() << "Fixed " << MO << " in ");
1001        }
1002        DEBUG(MI->dump());
1003      }
1004
1005      killedRegs.insert(Reg);
1006    }
1007
1008    // Mark any used register (that is not using undef) and subregs as
1009    // now live...
1010    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1011      MachineOperand &MO = MI->getOperand(i);
1012      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1013      unsigned Reg = MO.getReg();
1014      if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1015
1016      KillIndices[Reg] = Count;
1017
1018      for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1019           *Subreg; ++Subreg) {
1020        KillIndices[*Subreg] = Count;
1021      }
1022    }
1023  }
1024}
1025
1026//===----------------------------------------------------------------------===//
1027//  Top-Down Scheduling
1028//===----------------------------------------------------------------------===//
1029
1030/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1031/// the PendingQueue if the count reaches zero. Also update its cycle bound.
1032void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1033  SUnit *SuccSU = SuccEdge->getSUnit();
1034
1035#ifndef NDEBUG
1036  if (SuccSU->NumPredsLeft == 0) {
1037    errs() << "*** Scheduling failed! ***\n";
1038    SuccSU->dump(this);
1039    errs() << " has been released too many times!\n";
1040    llvm_unreachable(0);
1041  }
1042#endif
1043  --SuccSU->NumPredsLeft;
1044
1045  // Compute how many cycles it will be before this actually becomes
1046  // available.  This is the max of the start time of all predecessors plus
1047  // their latencies.
1048  SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
1049
1050  // If all the node's predecessors are scheduled, this node is ready
1051  // to be scheduled. Ignore the special ExitSU node.
1052  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
1053    PendingQueue.push_back(SuccSU);
1054}
1055
1056/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1057void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1058  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1059       I != E; ++I)
1060    ReleaseSucc(SU, &*I);
1061}
1062
1063/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1064/// count of its successors. If a successor pending count is zero, add it to
1065/// the Available queue.
1066void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1067  DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
1068  DEBUG(SU->dump(this));
1069
1070  Sequence.push_back(SU);
1071  assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1072  SU->setDepthToAtLeast(CurCycle);
1073
1074  ReleaseSuccessors(SU);
1075  SU->isScheduled = true;
1076  AvailableQueue.ScheduledNode(SU);
1077}
1078
1079/// ListScheduleTopDown - The main loop of list scheduling for top-down
1080/// schedulers.
1081void SchedulePostRATDList::ListScheduleTopDown() {
1082  unsigned CurCycle = 0;
1083
1084  // Release any successors of the special Entry node.
1085  ReleaseSuccessors(&EntrySU);
1086
1087  // All leaves to Available queue.
1088  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1089    // It is available if it has no predecessors.
1090    if (SUnits[i].Preds.empty()) {
1091      AvailableQueue.push(&SUnits[i]);
1092      SUnits[i].isAvailable = true;
1093    }
1094  }
1095
1096  // In any cycle where we can't schedule any instructions, we must
1097  // stall or emit a noop, depending on the target.
1098  bool CycleHasInsts = false;
1099
1100  // While Available queue is not empty, grab the node with the highest
1101  // priority. If it is not ready put it back.  Schedule the node.
1102  std::vector<SUnit*> NotReady;
1103  Sequence.reserve(SUnits.size());
1104  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1105    // Check to see if any of the pending instructions are ready to issue.  If
1106    // so, add them to the available queue.
1107    unsigned MinDepth = ~0u;
1108    for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
1109      if (PendingQueue[i]->getDepth() <= CurCycle) {
1110        AvailableQueue.push(PendingQueue[i]);
1111        PendingQueue[i]->isAvailable = true;
1112        PendingQueue[i] = PendingQueue.back();
1113        PendingQueue.pop_back();
1114        --i; --e;
1115      } else if (PendingQueue[i]->getDepth() < MinDepth)
1116        MinDepth = PendingQueue[i]->getDepth();
1117    }
1118
1119    DEBUG(errs() << "\n*** Examining Available\n";
1120          LatencyPriorityQueue q = AvailableQueue;
1121          while (!q.empty()) {
1122            SUnit *su = q.pop();
1123            errs() << "Height " << su->getHeight() << ": ";
1124            su->dump(this);
1125          });
1126
1127    SUnit *FoundSUnit = 0;
1128
1129    bool HasNoopHazards = false;
1130    while (!AvailableQueue.empty()) {
1131      SUnit *CurSUnit = AvailableQueue.pop();
1132
1133      ScheduleHazardRecognizer::HazardType HT =
1134        HazardRec->getHazardType(CurSUnit);
1135      if (HT == ScheduleHazardRecognizer::NoHazard) {
1136        FoundSUnit = CurSUnit;
1137        break;
1138      }
1139
1140      // Remember if this is a noop hazard.
1141      HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1142
1143      NotReady.push_back(CurSUnit);
1144    }
1145
1146    // Add the nodes that aren't ready back onto the available list.
1147    if (!NotReady.empty()) {
1148      AvailableQueue.push_all(NotReady);
1149      NotReady.clear();
1150    }
1151
1152    // If we found a node to schedule, do it now.
1153    if (FoundSUnit) {
1154      ScheduleNodeTopDown(FoundSUnit, CurCycle);
1155      HazardRec->EmitInstruction(FoundSUnit);
1156      CycleHasInsts = true;
1157
1158      // If we are using the target-specific hazards, then don't
1159      // advance the cycle time just because we schedule a node. If
1160      // the target allows it we can schedule multiple nodes in the
1161      // same cycle.
1162      if (!EnablePostRAHazardAvoidance) {
1163        if (FoundSUnit->Latency)  // Don't increment CurCycle for pseudo-ops!
1164          ++CurCycle;
1165      }
1166    } else {
1167      if (CycleHasInsts) {
1168        DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1169        HazardRec->AdvanceCycle();
1170      } else if (!HasNoopHazards) {
1171        // Otherwise, we have a pipeline stall, but no other problem,
1172        // just advance the current cycle and try again.
1173        DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1174        HazardRec->AdvanceCycle();
1175        ++NumStalls;
1176      } else {
1177        // Otherwise, we have no instructions to issue and we have instructions
1178        // that will fault if we don't do this right.  This is the case for
1179        // processors without pipeline interlocks and other cases.
1180        DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1181        HazardRec->EmitNoop();
1182        Sequence.push_back(0);   // NULL here means noop
1183        ++NumNoops;
1184      }
1185
1186      ++CurCycle;
1187      CycleHasInsts = false;
1188    }
1189  }
1190
1191#ifndef NDEBUG
1192  VerifySchedule(/*isBottomUp=*/false);
1193#endif
1194}
1195
1196//===----------------------------------------------------------------------===//
1197//                         Public Constructor Functions
1198//===----------------------------------------------------------------------===//
1199
1200FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1201  return new PostRAScheduler(OptLevel);
1202}
1203