ScheduleDAGSDNodes.cpp revision fd9760502d0fc87ac49dac834660a657f6b78370
1//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
16#include "SDNodeDbgValue.h"
17#include "ScheduleDAGSDNodes.h"
18#include "InstrEmitter.h"
19#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/Target/TargetMachine.h"
21#include "llvm/Target/TargetInstrInfo.h"
22#include "llvm/Target/TargetRegisterInfo.h"
23#include "llvm/Target/TargetSubtarget.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/Statistic.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/raw_ostream.h"
31using namespace llvm;
32
33STATISTIC(LoadsClustered, "Number of loads clustered together");
34
35ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
36  : ScheduleDAG(mf) {
37}
38
39/// Run - perform scheduling.
40///
41void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
42                             MachineBasicBlock::iterator insertPos) {
43  DAG = dag;
44  ScheduleDAG::Run(bb, insertPos);
45}
46
47SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
48  SUnit *SU = NewSUnit(Old->getNode());
49  SU->OrigNode = Old->OrigNode;
50  SU->Latency = Old->Latency;
51  SU->isTwoAddress = Old->isTwoAddress;
52  SU->isCommutable = Old->isCommutable;
53  SU->hasPhysRegDefs = Old->hasPhysRegDefs;
54  SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
55  Old->isCloned = true;
56  return SU;
57}
58
59/// CheckForPhysRegDependency - Check if the dependency between def and use of
60/// a specified operand is a physical register dependency. If so, returns the
61/// register and the cost of copying the register.
62static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
63                                      const TargetRegisterInfo *TRI,
64                                      const TargetInstrInfo *TII,
65                                      unsigned &PhysReg, int &Cost) {
66  if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
67    return;
68
69  unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
70  if (TargetRegisterInfo::isVirtualRegister(Reg))
71    return;
72
73  unsigned ResNo = User->getOperand(2).getResNo();
74  if (Def->isMachineOpcode()) {
75    const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
76    if (ResNo >= II.getNumDefs() &&
77        II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
78      PhysReg = Reg;
79      const TargetRegisterClass *RC =
80        TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
81      Cost = RC->getCopyCost();
82    }
83  }
84}
85
86static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
87                     SelectionDAG *DAG) {
88  SmallVector<EVT, 4> VTs;
89  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
90    VTs.push_back(N->getValueType(i));
91  if (AddFlag)
92    VTs.push_back(MVT::Flag);
93  SmallVector<SDValue, 4> Ops;
94  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
95    Ops.push_back(N->getOperand(i));
96  if (Flag.getNode())
97    Ops.push_back(Flag);
98  SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
99  DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
100}
101
102/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
103/// This function finds loads of the same base and different offsets. If the
104/// offsets are not far apart (target specific), it add MVT::Flag inputs and
105/// outputs to ensure they are scheduled together and in order. This
106/// optimization may benefit some targets by improving cache locality.
107void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
108  SmallPtrSet<SDNode*, 16> Visited;
109  SmallVector<int64_t, 4> Offsets;
110  DenseMap<long long, SDNode*> O2SMap;  // Map from offset to SDNode.
111  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
112       E = DAG->allnodes_end(); NI != E; ++NI) {
113    SDNode *Node = &*NI;
114    if (!Node || !Node->isMachineOpcode())
115      continue;
116
117    unsigned Opc = Node->getMachineOpcode();
118    const TargetInstrDesc &TID = TII->get(Opc);
119    if (!TID.mayLoad())
120      continue;
121
122    SDNode *Chain = 0;
123    unsigned NumOps = Node->getNumOperands();
124    if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
125      Chain = Node->getOperand(NumOps-1).getNode();
126    if (!Chain)
127      continue;
128
129    // Look for other loads of the same chain. Find loads that are loading from
130    // the same base pointer and different offsets.
131    Visited.clear();
132    Offsets.clear();
133    O2SMap.clear();
134    bool Cluster = false;
135    SDNode *Base = Node;
136    int64_t BaseOffset;
137    for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
138         I != E; ++I) {
139      SDNode *User = *I;
140      if (User == Node || !Visited.insert(User))
141        continue;
142      int64_t Offset1, Offset2;
143      if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
144          Offset1 == Offset2)
145        // FIXME: Should be ok if they addresses are identical. But earlier
146        // optimizations really should have eliminated one of the loads.
147        continue;
148      if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
149        Offsets.push_back(Offset1);
150      O2SMap.insert(std::make_pair(Offset2, User));
151      Offsets.push_back(Offset2);
152      if (Offset2 < Offset1) {
153        Base = User;
154        BaseOffset = Offset2;
155      } else {
156        BaseOffset = Offset1;
157      }
158      Cluster = true;
159    }
160
161    if (!Cluster)
162      continue;
163
164    // Sort them in increasing order.
165    std::sort(Offsets.begin(), Offsets.end());
166
167    // Check if the loads are close enough.
168    SmallVector<SDNode*, 4> Loads;
169    unsigned NumLoads = 0;
170    int64_t BaseOff = Offsets[0];
171    SDNode *BaseLoad = O2SMap[BaseOff];
172    Loads.push_back(BaseLoad);
173    for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
174      int64_t Offset = Offsets[i];
175      SDNode *Load = O2SMap[Offset];
176      if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
177                                        NumLoads))
178        break; // Stop right here. Ignore loads that are further away.
179      Loads.push_back(Load);
180      ++NumLoads;
181    }
182
183    if (NumLoads == 0)
184      continue;
185
186    // Cluster loads by adding MVT::Flag outputs and inputs. This also
187    // ensure they are scheduled in order of increasing addresses.
188    SDNode *Lead = Loads[0];
189    AddFlags(Lead, SDValue(0,0), true, DAG);
190    SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
191    for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
192      bool OutFlag = i < e-1;
193      SDNode *Load = Loads[i];
194      AddFlags(Load, InFlag, OutFlag, DAG);
195      if (OutFlag)
196        InFlag = SDValue(Load, Load->getNumValues()-1);
197      ++LoadsClustered;
198    }
199  }
200}
201
202void ScheduleDAGSDNodes::BuildSchedUnits() {
203  // During scheduling, the NodeId field of SDNode is used to map SDNodes
204  // to their associated SUnits by holding SUnits table indices. A value
205  // of -1 means the SDNode does not yet have an associated SUnit.
206  unsigned NumNodes = 0;
207  for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
208       E = DAG->allnodes_end(); NI != E; ++NI) {
209    NI->setNodeId(-1);
210    ++NumNodes;
211  }
212
213  // Reserve entries in the vector for each of the SUnits we are creating.  This
214  // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
215  // invalidated.
216  // FIXME: Multiply by 2 because we may clone nodes during scheduling.
217  // This is a temporary workaround.
218  SUnits.reserve(NumNodes * 2);
219
220  // Check to see if the scheduler cares about latencies.
221  bool UnitLatencies = ForceUnitLatencies();
222
223  // Add all nodes in depth first order.
224  SmallVector<SDNode*, 64> Worklist;
225  SmallPtrSet<SDNode*, 64> Visited;
226  Worklist.push_back(DAG->getRoot().getNode());
227  Visited.insert(DAG->getRoot().getNode());
228
229  while (!Worklist.empty()) {
230    SDNode *NI = Worklist.pop_back_val();
231
232    // Add all operands to the worklist unless they've already been added.
233    for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
234      if (Visited.insert(NI->getOperand(i).getNode()))
235        Worklist.push_back(NI->getOperand(i).getNode());
236
237    if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
238      continue;
239
240    // If this node has already been processed, stop now.
241    if (NI->getNodeId() != -1) continue;
242
243    SUnit *NodeSUnit = NewSUnit(NI);
244
245    // See if anything is flagged to this node, if so, add them to flagged
246    // nodes.  Nodes can have at most one flag input and one flag output.  Flags
247    // are required to be the last operand and result of a node.
248
249    // Scan up to find flagged preds.
250    SDNode *N = NI;
251    while (N->getNumOperands() &&
252           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
253      N = N->getOperand(N->getNumOperands()-1).getNode();
254      assert(N->getNodeId() == -1 && "Node already inserted!");
255      N->setNodeId(NodeSUnit->NodeNum);
256    }
257
258    // Scan down to find any flagged succs.
259    N = NI;
260    while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
261      SDValue FlagVal(N, N->getNumValues()-1);
262
263      // There are either zero or one users of the Flag result.
264      bool HasFlagUse = false;
265      for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
266           UI != E; ++UI)
267        if (FlagVal.isOperandOf(*UI)) {
268          HasFlagUse = true;
269          assert(N->getNodeId() == -1 && "Node already inserted!");
270          N->setNodeId(NodeSUnit->NodeNum);
271          N = *UI;
272          break;
273        }
274      if (!HasFlagUse) break;
275    }
276
277    // If there are flag operands involved, N is now the bottom-most node
278    // of the sequence of nodes that are flagged together.
279    // Update the SUnit.
280    NodeSUnit->setNode(N);
281    assert(N->getNodeId() == -1 && "Node already inserted!");
282    N->setNodeId(NodeSUnit->NodeNum);
283
284    // Assign the Latency field of NodeSUnit using target-provided information.
285    if (UnitLatencies)
286      NodeSUnit->Latency = 1;
287    else
288      ComputeLatency(NodeSUnit);
289  }
290}
291
292void ScheduleDAGSDNodes::AddSchedEdges() {
293  const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
294
295  // Check to see if the scheduler cares about latencies.
296  bool UnitLatencies = ForceUnitLatencies();
297
298  // Pass 2: add the preds, succs, etc.
299  for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
300    SUnit *SU = &SUnits[su];
301    SDNode *MainNode = SU->getNode();
302
303    if (MainNode->isMachineOpcode()) {
304      unsigned Opc = MainNode->getMachineOpcode();
305      const TargetInstrDesc &TID = TII->get(Opc);
306      for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
307        if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
308          SU->isTwoAddress = true;
309          break;
310        }
311      }
312      if (TID.isCommutable())
313        SU->isCommutable = true;
314    }
315
316    // Find all predecessors and successors of the group.
317    for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
318      if (N->isMachineOpcode() &&
319          TII->get(N->getMachineOpcode()).getImplicitDefs()) {
320        SU->hasPhysRegClobbers = true;
321        unsigned NumUsed = InstrEmitter::CountResults(N);
322        while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
323          --NumUsed;    // Skip over unused values at the end.
324        if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
325          SU->hasPhysRegDefs = true;
326      }
327
328      for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
329        SDNode *OpN = N->getOperand(i).getNode();
330        if (isPassiveNode(OpN)) continue;   // Not scheduled.
331        SUnit *OpSU = &SUnits[OpN->getNodeId()];
332        assert(OpSU && "Node has no SUnit!");
333        if (OpSU == SU) continue;           // In the same group.
334
335        EVT OpVT = N->getOperand(i).getValueType();
336        assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
337        bool isChain = OpVT == MVT::Other;
338
339        unsigned PhysReg = 0;
340        int Cost = 1;
341        // Determine if this is a physical register dependency.
342        CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
343        assert((PhysReg == 0 || !isChain) &&
344               "Chain dependence via physreg data?");
345        // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
346        // emits a copy from the physical register to a virtual register unless
347        // it requires a cross class copy (cost < 0). That means we are only
348        // treating "expensive to copy" register dependency as physical register
349        // dependency. This may change in the future though.
350        if (Cost >= 0)
351          PhysReg = 0;
352
353        const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
354                               OpSU->Latency, PhysReg);
355        if (!isChain && !UnitLatencies) {
356          ComputeOperandLatency(OpSU, SU, (SDep &)dep);
357          ST.adjustSchedDependency(OpSU, SU, (SDep &)dep);
358        }
359
360        SU->addPred(dep);
361      }
362    }
363  }
364}
365
366/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
367/// are input.  This SUnit graph is similar to the SelectionDAG, but
368/// excludes nodes that aren't interesting to scheduling, and represents
369/// flagged together nodes with a single SUnit.
370void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
371  // Cluster loads from "near" addresses into combined SUnits.
372  ClusterNeighboringLoads();
373  // Populate the SUnits array.
374  BuildSchedUnits();
375  // Compute all the scheduling dependencies between nodes.
376  AddSchedEdges();
377}
378
379void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
380  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
381
382  // Compute the latency for the node.  We use the sum of the latencies for
383  // all nodes flagged together into this SUnit.
384  SU->Latency = 0;
385  for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
386    if (N->isMachineOpcode()) {
387      SU->Latency += InstrItins.
388        getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
389    }
390}
391
392void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
393  if (!SU->getNode()) {
394    dbgs() << "PHYS REG COPY\n";
395    return;
396  }
397
398  SU->getNode()->dump(DAG);
399  dbgs() << "\n";
400  SmallVector<SDNode *, 4> FlaggedNodes;
401  for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
402    FlaggedNodes.push_back(N);
403  while (!FlaggedNodes.empty()) {
404    dbgs() << "    ";
405    FlaggedNodes.back()->dump(DAG);
406    dbgs() << "\n";
407    FlaggedNodes.pop_back();
408  }
409}
410
411namespace {
412  struct OrderSorter {
413    bool operator()(const std::pair<unsigned, MachineInstr*> &A,
414                    const std::pair<unsigned, MachineInstr*> &B) {
415      return A.first < B.first;
416    }
417  };
418}
419
420// ProcessSourceNode - Process nodes with source order numbers. These are added
421// to a vector which EmitSchedule use to determine how to insert dbg_value
422// instructions in the right order.
423static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
424                           InstrEmitter &Emitter,
425                           DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM,
426                           DenseMap<SDValue, unsigned> &VRBaseMap,
427                    SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
428                           SmallSet<unsigned, 8> &Seen) {
429  unsigned Order = DAG->GetOrdering(N);
430  if (!Order || !Seen.insert(Order))
431    return;
432
433  MachineBasicBlock *BB = Emitter.getBlock();
434  if (BB->empty() || BB->back().isPHI()) {
435    // Did not insert any instruction.
436    Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
437    return;
438  }
439
440  Orders.push_back(std::make_pair(Order, &BB->back()));
441  if (!N->getHasDebugValue())
442    return;
443  // Opportunistically insert immediate dbg_value uses, i.e. those with source
444  // order number right after the N.
445  MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
446  SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
447  for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
448    if (DVs[i]->isInvalidated())
449      continue;
450    unsigned DVOrder = DVs[i]->getOrder();
451    if (DVOrder == ++Order) {
452      // FIXME: If the source node with next higher order is scheduled before
453      // this could end up generating funky debug info.
454      MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], BB, VRBaseMap, EM);
455      Orders.push_back(std::make_pair(DVOrder, DbgMI));
456      BB->insert(InsertPos, DbgMI);
457      DVs[i]->setIsInvalidated();
458    }
459  }
460}
461
462
463/// EmitSchedule - Emit the machine code in scheduled order.
464MachineBasicBlock *ScheduleDAGSDNodes::
465EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
466  InstrEmitter Emitter(BB, InsertPos);
467  DenseMap<SDValue, unsigned> VRBaseMap;
468  DenseMap<SUnit*, unsigned> CopyVRBaseMap;
469  SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
470  SmallSet<unsigned, 8> Seen;
471  bool HasDbg = DAG->hasDebugValues();
472
473  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
474    SUnit *SU = Sequence[i];
475    if (!SU) {
476      // Null SUnit* is a noop.
477      EmitNoop();
478      continue;
479    }
480
481    // For pre-regalloc scheduling, create instructions corresponding to the
482    // SDNode and any flagged SDNodes and append them to the block.
483    if (!SU->getNode()) {
484      // Emit a copy.
485      EmitPhysRegCopy(SU, CopyVRBaseMap);
486      continue;
487    }
488
489    SmallVector<SDNode *, 4> FlaggedNodes;
490    for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
491         N = N->getFlaggedNode())
492      FlaggedNodes.push_back(N);
493    while (!FlaggedNodes.empty()) {
494      SDNode *N = FlaggedNodes.back();
495      Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
496                       VRBaseMap, EM);
497      // Remember the the source order of the inserted instruction.
498      if (HasDbg)
499        ProcessSourceNode(N, DAG, Emitter, EM, VRBaseMap, Orders, Seen);
500      FlaggedNodes.pop_back();
501    }
502    Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
503                     VRBaseMap, EM);
504    // Remember the the source order of the inserted instruction.
505    if (HasDbg)
506      ProcessSourceNode(SU->getNode(), DAG, Emitter, EM, VRBaseMap, Orders,
507                        Seen);
508  }
509
510  // Insert all the dbg_value which have not already been inserted in source
511  // order sequence.
512  if (HasDbg) {
513    MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
514    while (BBBegin != BB->end() && BBBegin->isPHI())
515      ++BBBegin;
516
517    // Sort the source order instructions and use the order to insert debug
518    // values.
519    std::sort(Orders.begin(), Orders.end(), OrderSorter());
520
521    SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
522    SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
523    // Now emit the rest according to source order.
524    unsigned LastOrder = 0;
525    MachineInstr *LastMI = 0;
526    for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
527      unsigned Order = Orders[i].first;
528      MachineInstr *MI = Orders[i].second;
529      // Insert all SDDbgValue's whose order(s) are before "Order".
530      if (!MI)
531        continue;
532      MachineBasicBlock *MIBB = MI->getParent();
533      for (; DI != DE &&
534             (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
535        if ((*DI)->isInvalidated())
536          continue;
537        MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, MIBB, VRBaseMap, EM);
538        if (!LastOrder)
539          // Insert to start of the BB (after PHIs).
540          BB->insert(BBBegin, DbgMI);
541        else {
542          MachineBasicBlock::iterator Pos = MI;
543          MIBB->insert(llvm::next(Pos), DbgMI);
544        }
545      }
546      LastOrder = Order;
547      LastMI = MI;
548    }
549    // Add trailing DbgValue's before the terminator. FIXME: May want to add
550    // some of them before one or more conditional branches?
551    while (DI != DE) {
552      MachineBasicBlock *InsertBB = Emitter.getBlock();
553      MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
554      if (!(*DI)->isInvalidated()) {
555        MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, InsertBB, VRBaseMap, EM);
556        InsertBB->insert(Pos, DbgMI);
557      }
558      ++DI;
559    }
560  }
561
562  BB = Emitter.getBlock();
563  InsertPos = Emitter.getInsertPos();
564  return BB;
565}
566