ScheduleDAGInstrs.cpp revision 6a9041e5ca9bdf2c1b586a7b2c6488374b39be74
1//===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "sched-instrs"
16#include "llvm/CodeGen/ScheduleDAGInstrs.h"
17#include "llvm/CodeGen/PseudoSourceValue.h"
18#include "llvm/Target/TargetMachine.h"
19#include "llvm/Target/TargetInstrInfo.h"
20#include "llvm/Target/TargetRegisterInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/raw_ostream.h"
23#include <map>
24using namespace llvm;
25
26ScheduleDAGInstrs::ScheduleDAGInstrs(MachineBasicBlock *bb,
27                                     const TargetMachine &tm)
28  : ScheduleDAG(0, bb, tm) {}
29
30void ScheduleDAGInstrs::BuildSchedUnits() {
31  SUnits.clear();
32  SUnits.reserve(BB->size());
33  int Cost = 1; // FIXME
34
35  // We build scheduling units by walking a block's instruction list from bottom
36  // to top.
37
38  // Remember where defs and uses of each physical register are as we procede.
39  SUnit *Defs[TargetRegisterInfo::FirstVirtualRegister] = {};
40  std::vector<SUnit *> Uses[TargetRegisterInfo::FirstVirtualRegister] = {};
41
42  // Remember where unknown loads are after the most recent unknown store
43  // as we procede.
44  std::vector<SUnit *> PendingLoads;
45
46  // Remember where a generic side-effecting instruction is as we procede. If
47  // ChainMMO is null, this is assumed to have arbitrary side-effects. If
48  // ChainMMO is non-null, then Chain makes only a single memory reference.
49  SUnit *Chain = 0;
50  MachineMemOperand *ChainMMO = 0;
51
52  // Memory references to specific known memory locations are tracked so that
53  // they can be given more precise dependencies.
54  std::map<const Value *, SUnit *> MemDefs;
55  std::map<const Value *, std::vector<SUnit *> > MemUses;
56
57  // Terminators can perform control transfers, we we need to make sure that
58  // all the work of the block is done before the terminator.
59  SUnit *Terminator = 0;
60
61  for (MachineBasicBlock::iterator MII = BB->end(), MIE = BB->begin();
62       MII != MIE; --MII) {
63    MachineInstr *MI = prior(MII);
64    SUnit *SU = NewSUnit(MI);
65
66    // Add register-based dependencies (data, anti, and output).
67    for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
68      const MachineOperand &MO = MI->getOperand(j);
69      if (!MO.isReg()) continue;
70      unsigned Reg = MO.getReg();
71      if (Reg == 0) continue;
72
73      assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
74      std::vector<SUnit *> &UseList = Uses[Reg];
75      SUnit *&Def = Defs[Reg];
76      // Optionally add output and anti dependencies.
77      if (Def && Def != SU)
78        Def->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false,
79                     /*PhyReg=*/Reg, Cost, /*isAntiDep=*/MO.isUse());
80      for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
81        SUnit *&Def = Defs[*Alias];
82        if (Def && Def != SU)
83          Def->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false,
84                       /*PhyReg=*/*Alias, Cost, /*isAntiDep=*/MO.isUse());
85      }
86
87      if (MO.isDef()) {
88        // Add any data dependencies.
89        for (unsigned i = 0, e = UseList.size(); i != e; ++i)
90          if (UseList[i] != SU)
91            UseList[i]->addPred(SU, /*isCtrl=*/false, /*isArtificial=*/false,
92                                /*PhysReg=*/Reg, Cost);
93        for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
94          std::vector<SUnit *> &UseList = Uses[*Alias];
95          for (unsigned i = 0, e = UseList.size(); i != e; ++i)
96            if (UseList[i] != SU)
97              UseList[i]->addPred(SU, /*isCtrl=*/false, /*isArtificial=*/false,
98                                  /*PhysReg=*/*Alias, Cost);
99        }
100
101        UseList.clear();
102        Def = SU;
103      } else {
104        UseList.push_back(SU);
105      }
106    }
107
108    // Add chain dependencies.
109    // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
110    // after stack slots are lowered to actual addresses.
111    // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
112    // produce more precise dependence information.
113    const TargetInstrDesc &TID = MI->getDesc();
114    if (TID.isCall() || TID.isReturn() || TID.isBranch() ||
115        TID.hasUnmodeledSideEffects()) {
116    new_chain:
117      // This is the conservative case. Add dependencies on all memory references.
118      if (Chain)
119        Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
120      Chain = SU;
121      for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
122        PendingLoads[k]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
123      PendingLoads.clear();
124      for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
125           E = MemDefs.end(); I != E; ++I) {
126        I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
127        I->second = SU;
128      }
129      for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
130           MemUses.begin(), E = MemUses.end(); I != E; ++I) {
131        for (unsigned i = 0, e = I->second.size(); i != e; ++i)
132          I->second[i]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
133        I->second.clear();
134      }
135      // See if it is known to just have a single memory reference.
136      MachineInstr *ChainMI = Chain->getInstr();
137      const TargetInstrDesc &ChainTID = ChainMI->getDesc();
138      if (!ChainTID.isCall() && !ChainTID.isReturn() && !ChainTID.isBranch() &&
139          !ChainTID.hasUnmodeledSideEffects() &&
140          ChainMI->hasOneMemOperand() &&
141          !ChainMI->memoperands_begin()->isVolatile() &&
142          ChainMI->memoperands_begin()->getValue())
143        // We know that the Chain accesses one specific memory location.
144        ChainMMO = &*ChainMI->memoperands_begin();
145      else
146        // Unknown memory accesses. Assume the worst.
147        ChainMMO = 0;
148    } else if (TID.mayStore()) {
149      if (MI->hasOneMemOperand() &&
150          MI->memoperands_begin()->getValue() &&
151          !MI->memoperands_begin()->isVolatile() &&
152          isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
153        // A store to a specific PseudoSourceValue. Add precise dependencies.
154        const Value *V = MI->memoperands_begin()->getValue();
155        // Handle the def in MemDefs, if there is one.
156        std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
157        if (I != MemDefs.end()) {
158          I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
159          I->second = SU;
160        } else {
161          MemDefs[V] = SU;
162        }
163        // Handle the uses in MemUses, if there are any.
164        std::map<const Value *, std::vector<SUnit *> >::iterator J = MemUses.find(V);
165        if (J != MemUses.end()) {
166          for (unsigned i = 0, e = J->second.size(); i != e; ++i)
167            J->second[i]->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
168          J->second.clear();
169        }
170        // Add a general dependence too, if needed.
171        if (Chain)
172          Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
173      } else
174        // Treat all other stores conservatively.
175        goto new_chain;
176    } else if (TID.mayLoad()) {
177      if (TII->isInvariantLoad(MI)) {
178        // Invariant load, no chain dependencies needed!
179      } else if (MI->hasOneMemOperand() &&
180                 MI->memoperands_begin()->getValue() &&
181                 !MI->memoperands_begin()->isVolatile() &&
182                 isa<PseudoSourceValue>(MI->memoperands_begin()->getValue())) {
183        // A load from a specific PseudoSourceValue. Add precise dependencies.
184        const Value *V = MI->memoperands_begin()->getValue();
185        std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
186        if (I != MemDefs.end())
187          I->second->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
188        MemUses[V].push_back(SU);
189
190        // Add a general dependence too, if needed.
191        if (Chain && (!ChainMMO ||
192                      (ChainMMO->isStore() || ChainMMO->isVolatile())))
193          Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
194      } else if (MI->hasVolatileMemoryRef()) {
195        // Treat volatile loads conservatively. Note that this includes
196        // cases where memoperand information is unavailable.
197        goto new_chain;
198      } else {
199        // A normal load. Just depend on the general chain.
200        if (Chain)
201          Chain->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
202        PendingLoads.push_back(SU);
203      }
204    }
205
206    // Add chain edges from the terminator to ensure that all the work of the block is
207    // completed before any control transfers.
208    if (Terminator && SU->Succs.empty())
209      Terminator->addPred(SU, /*isCtrl=*/true, /*isArtificial=*/false);
210    if (TID.isTerminator() || MI->isLabel())
211      Terminator = SU;
212
213    // Assign the Latency field of SU using target-provided information.
214    ComputeLatency(SU);
215  }
216}
217
218void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
219  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
220
221  // Compute the latency for the node.  We use the sum of the latencies for
222  // all nodes flagged together into this SUnit.
223  SU->Latency =
224    InstrItins.getLatency(SU->getInstr()->getDesc().getSchedClass());
225}
226
227void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
228  SU->getInstr()->dump();
229}
230
231std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
232  std::string s;
233  raw_string_ostream oss(s);
234  SU->getInstr()->print(oss);
235  return oss.str();
236}
237
238// EmitSchedule - Emit the machine code in scheduled order.
239MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
240  // For MachineInstr-based scheduling, we're rescheduling the instructions in
241  // the block, so start by removing them from the block.
242  while (!BB->empty())
243    BB->remove(BB->begin());
244
245  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
246    SUnit *SU = Sequence[i];
247    if (!SU) {
248      // Null SUnit* is a noop.
249      EmitNoop();
250      continue;
251    }
252
253    BB->push_back(SU->getInstr());
254  }
255
256  return BB;
257}
258