PostRASchedulerList.cpp revision 2a3868849438a0a0ad4f9a50f2b94eb1639b554e
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "ScheduleDAGInstrs.h"
23#include "llvm/CodeGen/Passes.h"
24#include "llvm/CodeGen/LatencyPriorityQueue.h"
25#include "llvm/CodeGen/SchedulerRegistry.h"
26#include "llvm/CodeGen/MachineDominators.h"
27#include "llvm/CodeGen/MachineFunctionPass.h"
28#include "llvm/CodeGen/MachineLoopInfo.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
31#include "llvm/Target/TargetLowering.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetRegisterInfo.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/ADT/Statistic.h"
39#include <map>
40using namespace llvm;
41
42STATISTIC(NumNoops, "Number of noops inserted");
43STATISTIC(NumStalls, "Number of pipeline stalls");
44
45static cl::opt<bool>
46EnableAntiDepBreaking("break-anti-dependencies",
47                      cl::desc("Break post-RA scheduling anti-dependencies"),
48                      cl::init(true), cl::Hidden);
49
50static cl::opt<bool>
51EnablePostRAHazardAvoidance("avoid-hazards",
52                      cl::desc("Enable simple hazard-avoidance"),
53                      cl::init(true), cl::Hidden);
54
55namespace {
56  class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
57  public:
58    static char ID;
59    PostRAScheduler() : MachineFunctionPass(&ID) {}
60
61    void getAnalysisUsage(AnalysisUsage &AU) const {
62      AU.addRequired<MachineDominatorTree>();
63      AU.addPreserved<MachineDominatorTree>();
64      AU.addRequired<MachineLoopInfo>();
65      AU.addPreserved<MachineLoopInfo>();
66      MachineFunctionPass::getAnalysisUsage(AU);
67    }
68
69    const char *getPassName() const {
70      return "Post RA top-down list latency scheduler";
71    }
72
73    bool runOnMachineFunction(MachineFunction &Fn);
74  };
75  char PostRAScheduler::ID = 0;
76
77  class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
78    /// AvailableQueue - The priority queue to use for the available SUnits.
79    ///
80    LatencyPriorityQueue AvailableQueue;
81
82    /// PendingQueue - This contains all of the instructions whose operands have
83    /// been issued, but their results are not ready yet (due to the latency of
84    /// the operation).  Once the operands becomes available, the instruction is
85    /// added to the AvailableQueue.
86    std::vector<SUnit*> PendingQueue;
87
88    /// Topo - A topological ordering for SUnits.
89    ScheduleDAGTopologicalSort Topo;
90
91    /// AllocatableSet - The set of allocatable registers.
92    /// We'll be ignoring anti-dependencies on non-allocatable registers,
93    /// because they may not be safe to break.
94    const BitVector AllocatableSet;
95
96    /// HazardRec - The hazard recognizer to use.
97    ScheduleHazardRecognizer *HazardRec;
98
99    /// Classes - For live regs that are only used in one register class in a
100    /// live range, the register class. If the register is not live, the
101    /// corresponding value is null. If the register is live but used in
102    /// multiple register classes, the corresponding value is -1 casted to a
103    /// pointer.
104    const TargetRegisterClass *
105      Classes[TargetRegisterInfo::FirstVirtualRegister];
106
107    /// RegRegs - Map registers to all their references within a live range.
108    std::multimap<unsigned, MachineOperand *> RegRefs;
109
110    /// The index of the most recent kill (proceding bottom-up), or ~0u if
111    /// the register is not live.
112    unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
113
114    /// The index of the most recent complete def (proceding bottom up), or ~0u
115    /// if the register is live.
116    unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
117
118  public:
119    SchedulePostRATDList(MachineFunction &MF,
120                         const MachineLoopInfo &MLI,
121                         const MachineDominatorTree &MDT,
122                         ScheduleHazardRecognizer *HR)
123      : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
124        AllocatableSet(TRI->getAllocatableSet(MF)),
125        HazardRec(HR) {}
126
127    ~SchedulePostRATDList() {
128      delete HazardRec;
129    }
130
131    /// StartBlock - Initialize register live-range state for scheduling in
132    /// this block.
133    ///
134    void StartBlock(MachineBasicBlock *BB);
135
136    /// Schedule - Schedule the instruction range using list scheduling.
137    ///
138    void Schedule();
139
140    /// Observe - Update liveness information to account for the current
141    /// instruction, which will not be scheduled.
142    ///
143    void Observe(MachineInstr *MI, unsigned Count);
144
145    /// FinishBlock - Clean up register live-range state.
146    ///
147    void FinishBlock();
148
149  private:
150    void PrescanInstruction(MachineInstr *MI);
151    void ScanInstruction(MachineInstr *MI, unsigned Count);
152    void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
153    void ReleaseSuccessors(SUnit *SU);
154    void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
155    void ListScheduleTopDown();
156    bool BreakAntiDependencies();
157  };
158
159  /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
160  /// a coarse classification and attempts to avoid that instructions of
161  /// a given class aren't grouped too densely together.
162  class SimpleHazardRecognizer : public ScheduleHazardRecognizer {
163    /// Class - A simple classification for SUnits.
164    enum Class {
165      Other, Load, Store
166    };
167
168    /// Window - The Class values of the most recently issued
169    /// instructions.
170    Class Window[8];
171
172    /// getClass - Classify the given SUnit.
173    Class getClass(const SUnit *SU) {
174      const MachineInstr *MI = SU->getInstr();
175      const TargetInstrDesc &TID = MI->getDesc();
176      if (TID.mayLoad())
177        return Load;
178      if (TID.mayStore())
179        return Store;
180      return Other;
181    }
182
183    /// Step - Rotate the existing entries in Window and insert the
184    /// given class value in position as the most recent.
185    void Step(Class C) {
186      std::copy(Window+1, array_endof(Window), Window);
187      Window[array_lengthof(Window)-1] = C;
188    }
189
190  public:
191    SimpleHazardRecognizer() : Window() {}
192
193    virtual HazardType getHazardType(SUnit *SU) {
194      Class C = getClass(SU);
195      if (C == Other)
196        return NoHazard;
197      unsigned Score = 0;
198      for (unsigned i = 0; i != array_lengthof(Window); ++i)
199        if (Window[i] == C)
200          Score += i + 1;
201      if (Score > array_lengthof(Window) * 2)
202        return Hazard;
203      return NoHazard;
204    }
205
206    virtual void EmitInstruction(SUnit *SU) {
207      Step(getClass(SU));
208    }
209
210    virtual void AdvanceCycle() {
211      Step(Other);
212    }
213  };
214}
215
216/// isSchedulingBoundary - Test if the given instruction should be
217/// considered a scheduling boundary. This primarily includes labels
218/// and terminators.
219///
220static bool isSchedulingBoundary(const MachineInstr *MI,
221                                 const MachineFunction &MF) {
222  // Terminators and labels can't be scheduled around.
223  if (MI->getDesc().isTerminator() || MI->isLabel())
224    return true;
225
226  // Don't attempt to schedule around any instruction that modifies
227  // a stack-oriented pointer, as it's unlikely to be profitable. This
228  // saves compile time, because it doesn't require every single
229  // stack slot reference to depend on the instruction that does the
230  // modification.
231  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
232  if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
233    return true;
234
235  return false;
236}
237
238bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
239  DOUT << "PostRAScheduler\n";
240
241  const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
242  const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
243  ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
244                                 new SimpleHazardRecognizer :
245                                 new ScheduleHazardRecognizer();
246
247  SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
248
249  // Loop over all of the basic blocks
250  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
251       MBB != MBBe; ++MBB) {
252    // Initialize register live-range state for scheduling in this block.
253    Scheduler.StartBlock(MBB);
254
255    // Schedule each sequence of instructions not interrupted by a label
256    // or anything else that effectively needs to shut down scheduling.
257    MachineBasicBlock::iterator Current = MBB->end();
258    unsigned Count = MBB->size(), CurrentCount = Count;
259    for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
260      MachineInstr *MI = prior(I);
261      if (isSchedulingBoundary(MI, Fn)) {
262        Scheduler.Run(MBB, I, Current, CurrentCount);
263        Scheduler.EmitSchedule();
264        Current = MI;
265        CurrentCount = Count - 1;
266        Scheduler.Observe(MI, CurrentCount);
267      }
268      I = MI;
269      --Count;
270    }
271    assert(Count == 0 && "Instruction count mismatch!");
272    assert((MBB->begin() == Current || CurrentCount != 0) &&
273           "Instruction count mismatch!");
274    Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
275    Scheduler.EmitSchedule();
276
277    // Clean up register live-range state.
278    Scheduler.FinishBlock();
279  }
280
281  return true;
282}
283
284/// StartBlock - Initialize register live-range state for scheduling in
285/// this block.
286///
287void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
288  // Call the superclass.
289  ScheduleDAGInstrs::StartBlock(BB);
290
291  // Clear out the register class data.
292  std::fill(Classes, array_endof(Classes),
293            static_cast<const TargetRegisterClass *>(0));
294
295  // Initialize the indices to indicate that no registers are live.
296  std::fill(KillIndices, array_endof(KillIndices), ~0u);
297  std::fill(DefIndices, array_endof(DefIndices), BB->size());
298
299  // Determine the live-out physregs for this block.
300  if (!BB->empty() && BB->back().getDesc().isReturn())
301    // In a return block, examine the function live-out regs.
302    for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
303         E = MRI.liveout_end(); I != E; ++I) {
304      unsigned Reg = *I;
305      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
306      KillIndices[Reg] = BB->size();
307      DefIndices[Reg] = ~0u;
308      // Repeat, for all aliases.
309      for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
310        unsigned AliasReg = *Alias;
311        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
312        KillIndices[AliasReg] = BB->size();
313        DefIndices[AliasReg] = ~0u;
314      }
315    }
316  else
317    // In a non-return block, examine the live-in regs of all successors.
318    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
319         SE = BB->succ_end(); SI != SE; ++SI)
320      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
321           E = (*SI)->livein_end(); I != E; ++I) {
322        unsigned Reg = *I;
323        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
324        KillIndices[Reg] = BB->size();
325        DefIndices[Reg] = ~0u;
326        // Repeat, for all aliases.
327        for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
328          unsigned AliasReg = *Alias;
329          Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
330          KillIndices[AliasReg] = BB->size();
331          DefIndices[AliasReg] = ~0u;
332        }
333      }
334
335  // Consider callee-saved registers as live-out, since we're running after
336  // prologue/epilogue insertion so there's no way to add additional
337  // saved registers.
338  //
339  // TODO: If the callee saves and restores these, then we can potentially
340  // use them between the save and the restore. To do that, we could scan
341  // the exit blocks to see which of these registers are defined.
342  // Alternatively, callee-saved registers that aren't saved and restored
343  // could be marked live-in in every block.
344  for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
345    unsigned Reg = *I;
346    Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
347    KillIndices[Reg] = BB->size();
348    DefIndices[Reg] = ~0u;
349    // Repeat, for all aliases.
350    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
351      unsigned AliasReg = *Alias;
352      Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
353      KillIndices[AliasReg] = BB->size();
354      DefIndices[AliasReg] = ~0u;
355    }
356  }
357}
358
359/// Schedule - Schedule the instruction range using list scheduling.
360///
361void SchedulePostRATDList::Schedule() {
362  DOUT << "********** List Scheduling **********\n";
363
364  // Build the scheduling graph.
365  BuildSchedGraph();
366
367  if (EnableAntiDepBreaking) {
368    if (BreakAntiDependencies()) {
369      // We made changes. Update the dependency graph.
370      // Theoretically we could update the graph in place:
371      // When a live range is changed to use a different register, remove
372      // the def's anti-dependence *and* output-dependence edges due to
373      // that register, and add new anti-dependence and output-dependence
374      // edges based on the next live range of the register.
375      SUnits.clear();
376      EntrySU = SUnit();
377      ExitSU = SUnit();
378      BuildSchedGraph();
379    }
380  }
381
382  AvailableQueue.initNodes(SUnits);
383
384  ListScheduleTopDown();
385
386  AvailableQueue.releaseState();
387}
388
389/// Observe - Update liveness information to account for the current
390/// instruction, which will not be scheduled.
391///
392void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
393  assert(Count < InsertPosIndex && "Instruction index out of expected range!");
394
395  // Any register which was defined within the previous scheduling region
396  // may have been rescheduled and its lifetime may overlap with registers
397  // in ways not reflected in our current liveness state. For each such
398  // register, adjust the liveness state to be conservatively correct.
399  for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
400    if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
401      assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
402      // Mark this register to be non-renamable.
403      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
404      // Move the def index to the end of the previous region, to reflect
405      // that the def could theoretically have been scheduled at the end.
406      DefIndices[Reg] = InsertPosIndex;
407    }
408
409  PrescanInstruction(MI);
410  ScanInstruction(MI, Count);
411}
412
413/// FinishBlock - Clean up register live-range state.
414///
415void SchedulePostRATDList::FinishBlock() {
416  RegRefs.clear();
417
418  // Call the superclass.
419  ScheduleDAGInstrs::FinishBlock();
420}
421
422/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
423/// critical path.
424static SDep *CriticalPathStep(SUnit *SU) {
425  SDep *Next = 0;
426  unsigned NextDepth = 0;
427  // Find the predecessor edge with the greatest depth.
428  for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
429       P != PE; ++P) {
430    SUnit *PredSU = P->getSUnit();
431    unsigned PredLatency = P->getLatency();
432    unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
433    // In the case of a latency tie, prefer an anti-dependency edge over
434    // other types of edges.
435    if (NextDepth < PredTotalLatency ||
436        (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
437      NextDepth = PredTotalLatency;
438      Next = &*P;
439    }
440  }
441  return Next;
442}
443
444void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
445  // Scan the register operands for this instruction and update
446  // Classes and RegRefs.
447  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
448    MachineOperand &MO = MI->getOperand(i);
449    if (!MO.isReg()) continue;
450    unsigned Reg = MO.getReg();
451    if (Reg == 0) continue;
452    const TargetRegisterClass *NewRC = 0;
453
454    if (i < MI->getDesc().getNumOperands())
455      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
456
457    // For now, only allow the register to be changed if its register
458    // class is consistent across all uses.
459    if (!Classes[Reg] && NewRC)
460      Classes[Reg] = NewRC;
461    else if (!NewRC || Classes[Reg] != NewRC)
462      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
463
464    // Now check for aliases.
465    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
466      // If an alias of the reg is used during the live range, give up.
467      // Note that this allows us to skip checking if AntiDepReg
468      // overlaps with any of the aliases, among other things.
469      unsigned AliasReg = *Alias;
470      if (Classes[AliasReg]) {
471        Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
472        Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
473      }
474    }
475
476    // If we're still willing to consider this register, note the reference.
477    if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
478      RegRefs.insert(std::make_pair(Reg, &MO));
479  }
480}
481
482void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
483                                           unsigned Count) {
484  // Update liveness.
485  // Proceding upwards, registers that are defed but not used in this
486  // instruction are now dead.
487  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
488    MachineOperand &MO = MI->getOperand(i);
489    if (!MO.isReg()) continue;
490    unsigned Reg = MO.getReg();
491    if (Reg == 0) continue;
492    if (!MO.isDef()) continue;
493    // Ignore two-addr defs.
494    if (MI->isRegTiedToUseOperand(i)) continue;
495
496    DefIndices[Reg] = Count;
497    KillIndices[Reg] = ~0u;
498          assert(((KillIndices[Reg] == ~0u) !=
499                  (DefIndices[Reg] == ~0u)) &&
500               "Kill and Def maps aren't consistent for Reg!");
501    Classes[Reg] = 0;
502    RegRefs.erase(Reg);
503    // Repeat, for all subregs.
504    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
505         *Subreg; ++Subreg) {
506      unsigned SubregReg = *Subreg;
507      DefIndices[SubregReg] = Count;
508      KillIndices[SubregReg] = ~0u;
509      Classes[SubregReg] = 0;
510      RegRefs.erase(SubregReg);
511    }
512    // Conservatively mark super-registers as unusable.
513    for (const unsigned *Super = TRI->getSuperRegisters(Reg);
514         *Super; ++Super) {
515      unsigned SuperReg = *Super;
516      Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
517    }
518  }
519  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
520    MachineOperand &MO = MI->getOperand(i);
521    if (!MO.isReg()) continue;
522    unsigned Reg = MO.getReg();
523    if (Reg == 0) continue;
524    if (!MO.isUse()) continue;
525
526    const TargetRegisterClass *NewRC = 0;
527    if (i < MI->getDesc().getNumOperands())
528      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
529
530    // For now, only allow the register to be changed if its register
531    // class is consistent across all uses.
532    if (!Classes[Reg] && NewRC)
533      Classes[Reg] = NewRC;
534    else if (!NewRC || Classes[Reg] != NewRC)
535      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
536
537    RegRefs.insert(std::make_pair(Reg, &MO));
538
539    // It wasn't previously live but now it is, this is a kill.
540    if (KillIndices[Reg] == ~0u) {
541      KillIndices[Reg] = Count;
542      DefIndices[Reg] = ~0u;
543          assert(((KillIndices[Reg] == ~0u) !=
544                  (DefIndices[Reg] == ~0u)) &&
545               "Kill and Def maps aren't consistent for Reg!");
546    }
547    // Repeat, for all aliases.
548    for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
549      unsigned AliasReg = *Alias;
550      if (KillIndices[AliasReg] == ~0u) {
551        KillIndices[AliasReg] = Count;
552        DefIndices[AliasReg] = ~0u;
553      }
554    }
555  }
556}
557
558/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
559/// of the ScheduleDAG and break them by renaming registers.
560///
561bool SchedulePostRATDList::BreakAntiDependencies() {
562  // The code below assumes that there is at least one instruction,
563  // so just duck out immediately if the block is empty.
564  if (SUnits.empty()) return false;
565
566  // Find the node at the bottom of the critical path.
567  SUnit *Max = 0;
568  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
569    SUnit *SU = &SUnits[i];
570    if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
571      Max = SU;
572  }
573
574  DOUT << "Critical path has total latency "
575       << (Max->getDepth() + Max->Latency) << "\n";
576
577  // Track progress along the critical path through the SUnit graph as we walk
578  // the instructions.
579  SUnit *CriticalPathSU = Max;
580  MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
581
582  // Consider this pattern:
583  //   A = ...
584  //   ... = A
585  //   A = ...
586  //   ... = A
587  //   A = ...
588  //   ... = A
589  //   A = ...
590  //   ... = A
591  // There are three anti-dependencies here, and without special care,
592  // we'd break all of them using the same register:
593  //   A = ...
594  //   ... = A
595  //   B = ...
596  //   ... = B
597  //   B = ...
598  //   ... = B
599  //   B = ...
600  //   ... = B
601  // because at each anti-dependence, B is the first register that
602  // isn't A which is free.  This re-introduces anti-dependencies
603  // at all but one of the original anti-dependencies that we were
604  // trying to break.  To avoid this, keep track of the most recent
605  // register that each register was replaced with, avoid avoid
606  // using it to repair an anti-dependence on the same register.
607  // This lets us produce this:
608  //   A = ...
609  //   ... = A
610  //   B = ...
611  //   ... = B
612  //   C = ...
613  //   ... = C
614  //   B = ...
615  //   ... = B
616  // This still has an anti-dependence on B, but at least it isn't on the
617  // original critical path.
618  //
619  // TODO: If we tracked more than one register here, we could potentially
620  // fix that remaining critical edge too. This is a little more involved,
621  // because unlike the most recent register, less recent registers should
622  // still be considered, though only if no other registers are available.
623  unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
624
625  // Attempt to break anti-dependence edges on the critical path. Walk the
626  // instructions from the bottom up, tracking information about liveness
627  // as we go to help determine which registers are available.
628  bool Changed = false;
629  unsigned Count = InsertPosIndex - 1;
630  for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
631       I != E; --Count) {
632    MachineInstr *MI = --I;
633
634    // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
635    // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
636    // is left behind appearing to clobber the super-register, while the
637    // subregister needs to remain live. So we just ignore them.
638    if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
639      continue;
640
641    // Check if this instruction has a dependence on the critical path that
642    // is an anti-dependence that we may be able to break. If it is, set
643    // AntiDepReg to the non-zero register associated with the anti-dependence.
644    //
645    // We limit our attention to the critical path as a heuristic to avoid
646    // breaking anti-dependence edges that aren't going to significantly
647    // impact the overall schedule. There are a limited number of registers
648    // and we want to save them for the important edges.
649    //
650    // TODO: Instructions with multiple defs could have multiple
651    // anti-dependencies. The current code here only knows how to break one
652    // edge per instruction. Note that we'd have to be able to break all of
653    // the anti-dependencies in an instruction in order to be effective.
654    unsigned AntiDepReg = 0;
655    if (MI == CriticalPathMI) {
656      if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
657        SUnit *NextSU = Edge->getSUnit();
658
659        // Only consider anti-dependence edges.
660        if (Edge->getKind() == SDep::Anti) {
661          AntiDepReg = Edge->getReg();
662          assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
663          // Don't break anti-dependencies on non-allocatable registers.
664          if (!AllocatableSet.test(AntiDepReg))
665            AntiDepReg = 0;
666          else {
667            // If the SUnit has other dependencies on the SUnit that it
668            // anti-depends on, don't bother breaking the anti-dependency
669            // since those edges would prevent such units from being
670            // scheduled past each other regardless.
671            //
672            // Also, if there are dependencies on other SUnits with the
673            // same register as the anti-dependency, don't attempt to
674            // break it.
675            for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
676                 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
677              if (P->getSUnit() == NextSU ?
678                    (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
679                    (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
680                AntiDepReg = 0;
681                break;
682              }
683          }
684        }
685        CriticalPathSU = NextSU;
686        CriticalPathMI = CriticalPathSU->getInstr();
687      } else {
688        // We've reached the end of the critical path.
689        CriticalPathSU = 0;
690        CriticalPathMI = 0;
691      }
692    }
693
694    PrescanInstruction(MI);
695
696    // If this instruction has a use of AntiDepReg, breaking it
697    // is invalid.
698    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
699      MachineOperand &MO = MI->getOperand(i);
700      if (!MO.isReg()) continue;
701      unsigned Reg = MO.getReg();
702      if (Reg == 0) continue;
703      if (MO.isUse() && AntiDepReg == Reg) {
704        AntiDepReg = 0;
705        break;
706      }
707    }
708
709    // Determine AntiDepReg's register class, if it is live and is
710    // consistently used within a single class.
711    const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
712    assert((AntiDepReg == 0 || RC != NULL) &&
713           "Register should be live if it's causing an anti-dependence!");
714    if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
715      AntiDepReg = 0;
716
717    // Look for a suitable register to use to break the anti-depenence.
718    //
719    // TODO: Instead of picking the first free register, consider which might
720    // be the best.
721    if (AntiDepReg != 0) {
722      for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
723           RE = RC->allocation_order_end(MF); R != RE; ++R) {
724        unsigned NewReg = *R;
725        // Don't replace a register with itself.
726        if (NewReg == AntiDepReg) continue;
727        // Don't replace a register with one that was recently used to repair
728        // an anti-dependence with this AntiDepReg, because that would
729        // re-introduce that anti-dependence.
730        if (NewReg == LastNewReg[AntiDepReg]) continue;
731        // If NewReg is dead and NewReg's most recent def is not before
732        // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
733        assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
734               "Kill and Def maps aren't consistent for AntiDepReg!");
735        assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
736               "Kill and Def maps aren't consistent for NewReg!");
737        if (KillIndices[NewReg] == ~0u &&
738            Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) &&
739            KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
740          DOUT << "Breaking anti-dependence edge on "
741               << TRI->getName(AntiDepReg)
742               << " with " << RegRefs.count(AntiDepReg) << " references"
743               << " using " << TRI->getName(NewReg) << "!\n";
744
745          // Update the references to the old register to refer to the new
746          // register.
747          std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
748                    std::multimap<unsigned, MachineOperand *>::iterator>
749             Range = RegRefs.equal_range(AntiDepReg);
750          for (std::multimap<unsigned, MachineOperand *>::iterator
751               Q = Range.first, QE = Range.second; Q != QE; ++Q)
752            Q->second->setReg(NewReg);
753
754          // We just went back in time and modified history; the
755          // liveness information for the anti-depenence reg is now
756          // inconsistent. Set the state as if it were dead.
757          Classes[NewReg] = Classes[AntiDepReg];
758          DefIndices[NewReg] = DefIndices[AntiDepReg];
759          KillIndices[NewReg] = KillIndices[AntiDepReg];
760          assert(((KillIndices[NewReg] == ~0u) !=
761                  (DefIndices[NewReg] == ~0u)) &&
762               "Kill and Def maps aren't consistent for NewReg!");
763
764          Classes[AntiDepReg] = 0;
765          DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
766          KillIndices[AntiDepReg] = ~0u;
767          assert(((KillIndices[AntiDepReg] == ~0u) !=
768                  (DefIndices[AntiDepReg] == ~0u)) &&
769               "Kill and Def maps aren't consistent for AntiDepReg!");
770
771          RegRefs.erase(AntiDepReg);
772          Changed = true;
773          LastNewReg[AntiDepReg] = NewReg;
774          break;
775        }
776      }
777    }
778
779    ScanInstruction(MI, Count);
780  }
781
782  return Changed;
783}
784
785//===----------------------------------------------------------------------===//
786//  Top-Down Scheduling
787//===----------------------------------------------------------------------===//
788
789/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
790/// the PendingQueue if the count reaches zero. Also update its cycle bound.
791void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
792  SUnit *SuccSU = SuccEdge->getSUnit();
793  --SuccSU->NumPredsLeft;
794
795#ifndef NDEBUG
796  if (SuccSU->NumPredsLeft < 0) {
797    cerr << "*** Scheduling failed! ***\n";
798    SuccSU->dump(this);
799    cerr << " has been released too many times!\n";
800    llvm_unreachable(0);
801  }
802#endif
803
804  // Compute how many cycles it will be before this actually becomes
805  // available.  This is the max of the start time of all predecessors plus
806  // their latencies.
807  SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
808
809  // If all the node's predecessors are scheduled, this node is ready
810  // to be scheduled. Ignore the special ExitSU node.
811  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
812    PendingQueue.push_back(SuccSU);
813}
814
815/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
816void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
817  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
818       I != E; ++I)
819    ReleaseSucc(SU, &*I);
820}
821
822/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
823/// count of its successors. If a successor pending count is zero, add it to
824/// the Available queue.
825void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
826  DOUT << "*** Scheduling [" << CurCycle << "]: ";
827  DEBUG(SU->dump(this));
828
829  Sequence.push_back(SU);
830  assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
831  SU->setDepthToAtLeast(CurCycle);
832
833  ReleaseSuccessors(SU);
834  SU->isScheduled = true;
835  AvailableQueue.ScheduledNode(SU);
836}
837
838/// ListScheduleTopDown - The main loop of list scheduling for top-down
839/// schedulers.
840void SchedulePostRATDList::ListScheduleTopDown() {
841  unsigned CurCycle = 0;
842
843  // Release any successors of the special Entry node.
844  ReleaseSuccessors(&EntrySU);
845
846  // All leaves to Available queue.
847  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
848    // It is available if it has no predecessors.
849    if (SUnits[i].Preds.empty()) {
850      AvailableQueue.push(&SUnits[i]);
851      SUnits[i].isAvailable = true;
852    }
853  }
854
855  // While Available queue is not empty, grab the node with the highest
856  // priority. If it is not ready put it back.  Schedule the node.
857  std::vector<SUnit*> NotReady;
858  Sequence.reserve(SUnits.size());
859  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
860    // Check to see if any of the pending instructions are ready to issue.  If
861    // so, add them to the available queue.
862    unsigned MinDepth = ~0u;
863    for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
864      if (PendingQueue[i]->getDepth() <= CurCycle) {
865        AvailableQueue.push(PendingQueue[i]);
866        PendingQueue[i]->isAvailable = true;
867        PendingQueue[i] = PendingQueue.back();
868        PendingQueue.pop_back();
869        --i; --e;
870      } else if (PendingQueue[i]->getDepth() < MinDepth)
871        MinDepth = PendingQueue[i]->getDepth();
872    }
873
874    // If there are no instructions available, don't try to issue anything, and
875    // don't advance the hazard recognizer.
876    if (AvailableQueue.empty()) {
877      CurCycle = MinDepth != ~0u ? MinDepth : CurCycle + 1;
878      continue;
879    }
880
881    SUnit *FoundSUnit = 0;
882
883    bool HasNoopHazards = false;
884    while (!AvailableQueue.empty()) {
885      SUnit *CurSUnit = AvailableQueue.pop();
886
887      ScheduleHazardRecognizer::HazardType HT =
888        HazardRec->getHazardType(CurSUnit);
889      if (HT == ScheduleHazardRecognizer::NoHazard) {
890        FoundSUnit = CurSUnit;
891        break;
892      }
893
894      // Remember if this is a noop hazard.
895      HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
896
897      NotReady.push_back(CurSUnit);
898    }
899
900    // Add the nodes that aren't ready back onto the available list.
901    if (!NotReady.empty()) {
902      AvailableQueue.push_all(NotReady);
903      NotReady.clear();
904    }
905
906    // If we found a node to schedule, do it now.
907    if (FoundSUnit) {
908      ScheduleNodeTopDown(FoundSUnit, CurCycle);
909      HazardRec->EmitInstruction(FoundSUnit);
910
911      // If this is a pseudo-op node, we don't want to increment the current
912      // cycle.
913      if (FoundSUnit->Latency)  // Don't increment CurCycle for pseudo-ops!
914        ++CurCycle;
915    } else if (!HasNoopHazards) {
916      // Otherwise, we have a pipeline stall, but no other problem, just advance
917      // the current cycle and try again.
918      DOUT << "*** Advancing cycle, no work to do\n";
919      HazardRec->AdvanceCycle();
920      ++NumStalls;
921      ++CurCycle;
922    } else {
923      // Otherwise, we have no instructions to issue and we have instructions
924      // that will fault if we don't do this right.  This is the case for
925      // processors without pipeline interlocks and other cases.
926      DOUT << "*** Emitting noop\n";
927      HazardRec->EmitNoop();
928      Sequence.push_back(0);   // NULL here means noop
929      ++NumNoops;
930      ++CurCycle;
931    }
932  }
933
934#ifndef NDEBUG
935  VerifySchedule(/*isBottomUp=*/false);
936#endif
937}
938
939//===----------------------------------------------------------------------===//
940//                         Public Constructor Functions
941//===----------------------------------------------------------------------===//
942
943FunctionPass *llvm::createPostRAScheduler() {
944  return new PostRAScheduler();
945}
946