1//===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a fast scheduler.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/SchedulerRegistry.h"
15#include "InstrEmitter.h"
16#include "ScheduleDAGSDNodes.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/CodeGen/SelectionDAGISel.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/InlineAsm.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/raw_ostream.h"
26#include "llvm/Target/TargetInstrInfo.h"
27#include "llvm/Target/TargetRegisterInfo.h"
28using namespace llvm;
29
30#define DEBUG_TYPE "pre-RA-sched"
31
32STATISTIC(NumUnfolds,    "Number of nodes unfolded");
33STATISTIC(NumDups,       "Number of duplicated nodes");
34STATISTIC(NumPRCopies,   "Number of physical copies");
35
36static RegisterScheduler
37  fastDAGScheduler("fast", "Fast suboptimal list scheduling",
38                   createFastDAGScheduler);
39static RegisterScheduler
40  linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
41                        createDAGLinearizer);
42
43
44namespace {
45  /// FastPriorityQueue - A degenerate priority queue that considers
46  /// all nodes to have the same priority.
47  ///
48  struct FastPriorityQueue {
49    SmallVector<SUnit *, 16> Queue;
50
51    bool empty() const { return Queue.empty(); }
52
53    void push(SUnit *U) {
54      Queue.push_back(U);
55    }
56
57    SUnit *pop() {
58      if (empty()) return nullptr;
59      SUnit *V = Queue.back();
60      Queue.pop_back();
61      return V;
62    }
63  };
64
65//===----------------------------------------------------------------------===//
66/// ScheduleDAGFast - The actual "fast" list scheduler implementation.
67///
68class ScheduleDAGFast : public ScheduleDAGSDNodes {
69private:
70  /// AvailableQueue - The priority queue to use for the available SUnits.
71  FastPriorityQueue AvailableQueue;
72
73  /// LiveRegDefs - A set of physical registers and their definition
74  /// that are "live". These nodes must be scheduled before any other nodes that
75  /// modifies the registers can be scheduled.
76  unsigned NumLiveRegs;
77  std::vector<SUnit*> LiveRegDefs;
78  std::vector<unsigned> LiveRegCycles;
79
80public:
81  ScheduleDAGFast(MachineFunction &mf)
82    : ScheduleDAGSDNodes(mf) {}
83
84  void Schedule() override;
85
86  /// AddPred - adds a predecessor edge to SUnit SU.
87  /// This returns true if this is a new predecessor.
88  void AddPred(SUnit *SU, const SDep &D) {
89    SU->addPred(D);
90  }
91
92  /// RemovePred - removes a predecessor edge from SUnit SU.
93  /// This returns true if an edge was removed.
94  void RemovePred(SUnit *SU, const SDep &D) {
95    SU->removePred(D);
96  }
97
98private:
99  void ReleasePred(SUnit *SU, SDep *PredEdge);
100  void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
101  void ScheduleNodeBottomUp(SUnit*, unsigned);
102  SUnit *CopyAndMoveSuccessors(SUnit*);
103  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
104                                const TargetRegisterClass*,
105                                const TargetRegisterClass*,
106                                SmallVectorImpl<SUnit*>&);
107  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
108  void ListScheduleBottomUp();
109
110  /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
111  bool forceUnitLatencies() const override { return true; }
112};
113}  // end anonymous namespace
114
115
116/// Schedule - Schedule the DAG using list scheduling.
117void ScheduleDAGFast::Schedule() {
118  DEBUG(dbgs() << "********** List Scheduling **********\n");
119
120  NumLiveRegs = 0;
121  LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
122  LiveRegCycles.resize(TRI->getNumRegs(), 0);
123
124  // Build the scheduling graph.
125  BuildSchedGraph(nullptr);
126
127  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
128          SUnits[su].dumpAll(this));
129
130  // Execute the actual scheduling loop.
131  ListScheduleBottomUp();
132}
133
134//===----------------------------------------------------------------------===//
135//  Bottom-Up Scheduling
136//===----------------------------------------------------------------------===//
137
138/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
139/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
140void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
141  SUnit *PredSU = PredEdge->getSUnit();
142
143#ifndef NDEBUG
144  if (PredSU->NumSuccsLeft == 0) {
145    dbgs() << "*** Scheduling failed! ***\n";
146    PredSU->dump(this);
147    dbgs() << " has been released too many times!\n";
148    llvm_unreachable(nullptr);
149  }
150#endif
151  --PredSU->NumSuccsLeft;
152
153  // If all the node's successors are scheduled, this node is ready
154  // to be scheduled. Ignore the special EntrySU node.
155  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
156    PredSU->isAvailable = true;
157    AvailableQueue.push(PredSU);
158  }
159}
160
161void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
162  // Bottom up: release predecessors
163  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
164       I != E; ++I) {
165    ReleasePred(SU, &*I);
166    if (I->isAssignedRegDep()) {
167      // This is a physical register dependency and it's impossible or
168      // expensive to copy the register. Make sure nothing that can
169      // clobber the register is scheduled between the predecessor and
170      // this node.
171      if (!LiveRegDefs[I->getReg()]) {
172        ++NumLiveRegs;
173        LiveRegDefs[I->getReg()] = I->getSUnit();
174        LiveRegCycles[I->getReg()] = CurCycle;
175      }
176    }
177  }
178}
179
180/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
181/// count of its predecessors. If a predecessor pending count is zero, add it to
182/// the Available queue.
183void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
184  DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
185  DEBUG(SU->dump(this));
186
187  assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
188  SU->setHeightToAtLeast(CurCycle);
189  Sequence.push_back(SU);
190
191  ReleasePredecessors(SU, CurCycle);
192
193  // Release all the implicit physical register defs that are live.
194  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
195       I != E; ++I) {
196    if (I->isAssignedRegDep()) {
197      if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
198        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
199        assert(LiveRegDefs[I->getReg()] == SU &&
200               "Physical register dependency violated?");
201        --NumLiveRegs;
202        LiveRegDefs[I->getReg()] = nullptr;
203        LiveRegCycles[I->getReg()] = 0;
204      }
205    }
206  }
207
208  SU->isScheduled = true;
209}
210
211/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
212/// successors to the newly created node.
213SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
214  if (SU->getNode()->getGluedNode())
215    return nullptr;
216
217  SDNode *N = SU->getNode();
218  if (!N)
219    return nullptr;
220
221  SUnit *NewSU;
222  bool TryUnfold = false;
223  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
224    EVT VT = N->getValueType(i);
225    if (VT == MVT::Glue)
226      return nullptr;
227    else if (VT == MVT::Other)
228      TryUnfold = true;
229  }
230  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
231    const SDValue &Op = N->getOperand(i);
232    EVT VT = Op.getNode()->getValueType(Op.getResNo());
233    if (VT == MVT::Glue)
234      return nullptr;
235  }
236
237  if (TryUnfold) {
238    SmallVector<SDNode*, 2> NewNodes;
239    if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
240      return nullptr;
241
242    DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
243    assert(NewNodes.size() == 2 && "Expected a load folding node!");
244
245    N = NewNodes[1];
246    SDNode *LoadNode = NewNodes[0];
247    unsigned NumVals = N->getNumValues();
248    unsigned OldNumVals = SU->getNode()->getNumValues();
249    for (unsigned i = 0; i != NumVals; ++i)
250      DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
251    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
252                                   SDValue(LoadNode, 1));
253
254    SUnit *NewSU = newSUnit(N);
255    assert(N->getNodeId() == -1 && "Node already inserted!");
256    N->setNodeId(NewSU->NodeNum);
257
258    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
259    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
260      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
261        NewSU->isTwoAddress = true;
262        break;
263      }
264    }
265    if (MCID.isCommutable())
266      NewSU->isCommutable = true;
267
268    // LoadNode may already exist. This can happen when there is another
269    // load from the same location and producing the same type of value
270    // but it has different alignment or volatileness.
271    bool isNewLoad = true;
272    SUnit *LoadSU;
273    if (LoadNode->getNodeId() != -1) {
274      LoadSU = &SUnits[LoadNode->getNodeId()];
275      isNewLoad = false;
276    } else {
277      LoadSU = newSUnit(LoadNode);
278      LoadNode->setNodeId(LoadSU->NodeNum);
279    }
280
281    SDep ChainPred;
282    SmallVector<SDep, 4> ChainSuccs;
283    SmallVector<SDep, 4> LoadPreds;
284    SmallVector<SDep, 4> NodePreds;
285    SmallVector<SDep, 4> NodeSuccs;
286    for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
287         I != E; ++I) {
288      if (I->isCtrl())
289        ChainPred = *I;
290      else if (I->getSUnit()->getNode() &&
291               I->getSUnit()->getNode()->isOperandOf(LoadNode))
292        LoadPreds.push_back(*I);
293      else
294        NodePreds.push_back(*I);
295    }
296    for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
297         I != E; ++I) {
298      if (I->isCtrl())
299        ChainSuccs.push_back(*I);
300      else
301        NodeSuccs.push_back(*I);
302    }
303
304    if (ChainPred.getSUnit()) {
305      RemovePred(SU, ChainPred);
306      if (isNewLoad)
307        AddPred(LoadSU, ChainPred);
308    }
309    for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
310      const SDep &Pred = LoadPreds[i];
311      RemovePred(SU, Pred);
312      if (isNewLoad) {
313        AddPred(LoadSU, Pred);
314      }
315    }
316    for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
317      const SDep &Pred = NodePreds[i];
318      RemovePred(SU, Pred);
319      AddPred(NewSU, Pred);
320    }
321    for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
322      SDep D = NodeSuccs[i];
323      SUnit *SuccDep = D.getSUnit();
324      D.setSUnit(SU);
325      RemovePred(SuccDep, D);
326      D.setSUnit(NewSU);
327      AddPred(SuccDep, D);
328    }
329    for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
330      SDep D = ChainSuccs[i];
331      SUnit *SuccDep = D.getSUnit();
332      D.setSUnit(SU);
333      RemovePred(SuccDep, D);
334      if (isNewLoad) {
335        D.setSUnit(LoadSU);
336        AddPred(SuccDep, D);
337      }
338    }
339    if (isNewLoad) {
340      SDep D(LoadSU, SDep::Barrier);
341      D.setLatency(LoadSU->Latency);
342      AddPred(NewSU, D);
343    }
344
345    ++NumUnfolds;
346
347    if (NewSU->NumSuccsLeft == 0) {
348      NewSU->isAvailable = true;
349      return NewSU;
350    }
351    SU = NewSU;
352  }
353
354  DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
355  NewSU = Clone(SU);
356
357  // New SUnit has the exact same predecessors.
358  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
359       I != E; ++I)
360    if (!I->isArtificial())
361      AddPred(NewSU, *I);
362
363  // Only copy scheduled successors. Cut them from old node's successor
364  // list and move them over.
365  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
366  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
367       I != E; ++I) {
368    if (I->isArtificial())
369      continue;
370    SUnit *SuccSU = I->getSUnit();
371    if (SuccSU->isScheduled) {
372      SDep D = *I;
373      D.setSUnit(NewSU);
374      AddPred(SuccSU, D);
375      D.setSUnit(SU);
376      DelDeps.push_back(std::make_pair(SuccSU, D));
377    }
378  }
379  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
380    RemovePred(DelDeps[i].first, DelDeps[i].second);
381
382  ++NumDups;
383  return NewSU;
384}
385
386/// InsertCopiesAndMoveSuccs - Insert register copies and move all
387/// scheduled successors of the given SUnit to the last copy.
388void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
389                                              const TargetRegisterClass *DestRC,
390                                              const TargetRegisterClass *SrcRC,
391                                              SmallVectorImpl<SUnit*> &Copies) {
392  SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
393  CopyFromSU->CopySrcRC = SrcRC;
394  CopyFromSU->CopyDstRC = DestRC;
395
396  SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
397  CopyToSU->CopySrcRC = DestRC;
398  CopyToSU->CopyDstRC = SrcRC;
399
400  // Only copy scheduled successors. Cut them from old node's successor
401  // list and move them over.
402  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
403  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
404       I != E; ++I) {
405    if (I->isArtificial())
406      continue;
407    SUnit *SuccSU = I->getSUnit();
408    if (SuccSU->isScheduled) {
409      SDep D = *I;
410      D.setSUnit(CopyToSU);
411      AddPred(SuccSU, D);
412      DelDeps.push_back(std::make_pair(SuccSU, *I));
413    }
414  }
415  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
416    RemovePred(DelDeps[i].first, DelDeps[i].second);
417  }
418  SDep FromDep(SU, SDep::Data, Reg);
419  FromDep.setLatency(SU->Latency);
420  AddPred(CopyFromSU, FromDep);
421  SDep ToDep(CopyFromSU, SDep::Data, 0);
422  ToDep.setLatency(CopyFromSU->Latency);
423  AddPred(CopyToSU, ToDep);
424
425  Copies.push_back(CopyFromSU);
426  Copies.push_back(CopyToSU);
427
428  ++NumPRCopies;
429}
430
431/// getPhysicalRegisterVT - Returns the ValueType of the physical register
432/// definition of the specified node.
433/// FIXME: Move to SelectionDAG?
434static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
435                                 const TargetInstrInfo *TII) {
436  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
437  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
438  unsigned NumRes = MCID.getNumDefs();
439  for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
440    if (Reg == *ImpDef)
441      break;
442    ++NumRes;
443  }
444  return N->getValueType(NumRes);
445}
446
447/// CheckForLiveRegDef - Return true and update live register vector if the
448/// specified register def of the specified SUnit clobbers any "live" registers.
449static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
450                               std::vector<SUnit*> &LiveRegDefs,
451                               SmallSet<unsigned, 4> &RegAdded,
452                               SmallVectorImpl<unsigned> &LRegs,
453                               const TargetRegisterInfo *TRI) {
454  bool Added = false;
455  for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
456    if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
457      if (RegAdded.insert(*AI)) {
458        LRegs.push_back(*AI);
459        Added = true;
460      }
461    }
462  }
463  return Added;
464}
465
466/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
467/// scheduling of the given node to satisfy live physical register dependencies.
468/// If the specific node is the last one that's available to schedule, do
469/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
470bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
471                                              SmallVectorImpl<unsigned> &LRegs){
472  if (NumLiveRegs == 0)
473    return false;
474
475  SmallSet<unsigned, 4> RegAdded;
476  // If this node would clobber any "live" register, then it's not ready.
477  for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
478       I != E; ++I) {
479    if (I->isAssignedRegDep()) {
480      CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
481                         RegAdded, LRegs, TRI);
482    }
483  }
484
485  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
486    if (Node->getOpcode() == ISD::INLINEASM) {
487      // Inline asm can clobber physical defs.
488      unsigned NumOps = Node->getNumOperands();
489      if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
490        --NumOps;  // Ignore the glue operand.
491
492      for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
493        unsigned Flags =
494          cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
495        unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
496
497        ++i; // Skip the ID value.
498        if (InlineAsm::isRegDefKind(Flags) ||
499            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
500            InlineAsm::isClobberKind(Flags)) {
501          // Check for def of register or earlyclobber register.
502          for (; NumVals; --NumVals, ++i) {
503            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
504            if (TargetRegisterInfo::isPhysicalRegister(Reg))
505              CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
506          }
507        } else
508          i += NumVals;
509      }
510      continue;
511    }
512    if (!Node->isMachineOpcode())
513      continue;
514    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
515    if (!MCID.ImplicitDefs)
516      continue;
517    for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
518      CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
519    }
520  }
521  return !LRegs.empty();
522}
523
524
525/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
526/// schedulers.
527void ScheduleDAGFast::ListScheduleBottomUp() {
528  unsigned CurCycle = 0;
529
530  // Release any predecessors of the special Exit node.
531  ReleasePredecessors(&ExitSU, CurCycle);
532
533  // Add root to Available queue.
534  if (!SUnits.empty()) {
535    SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
536    assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
537    RootSU->isAvailable = true;
538    AvailableQueue.push(RootSU);
539  }
540
541  // While Available queue is not empty, grab the node with the highest
542  // priority. If it is not ready put it back.  Schedule the node.
543  SmallVector<SUnit*, 4> NotReady;
544  DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
545  Sequence.reserve(SUnits.size());
546  while (!AvailableQueue.empty()) {
547    bool Delayed = false;
548    LRegsMap.clear();
549    SUnit *CurSU = AvailableQueue.pop();
550    while (CurSU) {
551      SmallVector<unsigned, 4> LRegs;
552      if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
553        break;
554      Delayed = true;
555      LRegsMap.insert(std::make_pair(CurSU, LRegs));
556
557      CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
558      NotReady.push_back(CurSU);
559      CurSU = AvailableQueue.pop();
560    }
561
562    // All candidates are delayed due to live physical reg dependencies.
563    // Try code duplication or inserting cross class copies
564    // to resolve it.
565    if (Delayed && !CurSU) {
566      if (!CurSU) {
567        // Try duplicating the nodes that produces these
568        // "expensive to copy" values to break the dependency. In case even
569        // that doesn't work, insert cross class copies.
570        SUnit *TrySU = NotReady[0];
571        SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
572        assert(LRegs.size() == 1 && "Can't handle this yet!");
573        unsigned Reg = LRegs[0];
574        SUnit *LRDef = LiveRegDefs[Reg];
575        EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
576        const TargetRegisterClass *RC =
577          TRI->getMinimalPhysRegClass(Reg, VT);
578        const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
579
580        // If cross copy register class is the same as RC, then it must be
581        // possible copy the value directly. Do not try duplicate the def.
582        // If cross copy register class is not the same as RC, then it's
583        // possible to copy the value but it require cross register class copies
584        // and it is expensive.
585        // If cross copy register class is null, then it's not possible to copy
586        // the value at all.
587        SUnit *NewDef = nullptr;
588        if (DestRC != RC) {
589          NewDef = CopyAndMoveSuccessors(LRDef);
590          if (!DestRC && !NewDef)
591            report_fatal_error("Can't handle live physical "
592                               "register dependency!");
593        }
594        if (!NewDef) {
595          // Issue copies, these can be expensive cross register class copies.
596          SmallVector<SUnit*, 2> Copies;
597          InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
598          DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
599                       << " to SU #" << Copies.front()->NodeNum << "\n");
600          AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
601          NewDef = Copies.back();
602        }
603
604        DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
605                     << " to SU #" << TrySU->NodeNum << "\n");
606        LiveRegDefs[Reg] = NewDef;
607        AddPred(NewDef, SDep(TrySU, SDep::Artificial));
608        TrySU->isAvailable = false;
609        CurSU = NewDef;
610      }
611
612      if (!CurSU) {
613        llvm_unreachable("Unable to resolve live physical register dependencies!");
614      }
615    }
616
617    // Add the nodes that aren't ready back onto the available list.
618    for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
619      NotReady[i]->isPending = false;
620      // May no longer be available due to backtracking.
621      if (NotReady[i]->isAvailable)
622        AvailableQueue.push(NotReady[i]);
623    }
624    NotReady.clear();
625
626    if (CurSU)
627      ScheduleNodeBottomUp(CurSU, CurCycle);
628    ++CurCycle;
629  }
630
631  // Reverse the order since it is bottom up.
632  std::reverse(Sequence.begin(), Sequence.end());
633
634#ifndef NDEBUG
635  VerifyScheduledSequence(/*isBottomUp=*/true);
636#endif
637}
638
639
640namespace {
641//===----------------------------------------------------------------------===//
642// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
643// DAG in topological order.
644// IMPORTANT: this may not work for targets with phyreg dependency.
645//
646class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
647public:
648  ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
649
650  void Schedule() override;
651
652  MachineBasicBlock *
653    EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
654
655private:
656  std::vector<SDNode*> Sequence;
657  DenseMap<SDNode*, SDNode*> GluedMap;  // Cache glue to its user
658
659  void ScheduleNode(SDNode *N);
660};
661} // end anonymous namespace
662
663void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
664  if (N->getNodeId() != 0)
665    llvm_unreachable(nullptr);
666
667  if (!N->isMachineOpcode() &&
668      (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
669    // These nodes do not need to be translated into MIs.
670    return;
671
672  DEBUG(dbgs() << "\n*** Scheduling: ");
673  DEBUG(N->dump(DAG));
674  Sequence.push_back(N);
675
676  unsigned NumOps = N->getNumOperands();
677  if (unsigned NumLeft = NumOps) {
678    SDNode *GluedOpN = nullptr;
679    do {
680      const SDValue &Op = N->getOperand(NumLeft-1);
681      SDNode *OpN = Op.getNode();
682
683      if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
684        // Schedule glue operand right above N.
685        GluedOpN = OpN;
686        assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
687        OpN->setNodeId(0);
688        ScheduleNode(OpN);
689        continue;
690      }
691
692      if (OpN == GluedOpN)
693        // Glue operand is already scheduled.
694        continue;
695
696      DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
697      if (DI != GluedMap.end() && DI->second != N)
698        // Users of glues are counted against the glued users.
699        OpN = DI->second;
700
701      unsigned Degree = OpN->getNodeId();
702      assert(Degree > 0 && "Predecessor over-released!");
703      OpN->setNodeId(--Degree);
704      if (Degree == 0)
705        ScheduleNode(OpN);
706    } while (--NumLeft);
707  }
708}
709
710/// findGluedUser - Find the representative use of a glue value by walking
711/// the use chain.
712static SDNode *findGluedUser(SDNode *N) {
713  while (SDNode *Glued = N->getGluedUser())
714    N = Glued;
715  return N;
716}
717
718void ScheduleDAGLinearize::Schedule() {
719  DEBUG(dbgs() << "********** DAG Linearization **********\n");
720
721  SmallVector<SDNode*, 8> Glues;
722  unsigned DAGSize = 0;
723  for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(),
724         E = DAG->allnodes_end(); I != E; ++I) {
725    SDNode *N = I;
726
727    // Use node id to record degree.
728    unsigned Degree = N->use_size();
729    N->setNodeId(Degree);
730    unsigned NumVals = N->getNumValues();
731    if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
732        N->hasAnyUseOfValue(NumVals-1)) {
733      SDNode *User = findGluedUser(N);
734      if (User) {
735        Glues.push_back(N);
736        GluedMap.insert(std::make_pair(N, User));
737      }
738    }
739
740    if (N->isMachineOpcode() ||
741        (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
742      ++DAGSize;
743  }
744
745  for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
746    SDNode *Glue = Glues[i];
747    SDNode *GUser = GluedMap[Glue];
748    unsigned Degree = Glue->getNodeId();
749    unsigned UDegree = GUser->getNodeId();
750
751    // Glue user must be scheduled together with the glue operand. So other
752    // users of the glue operand must be treated as its users.
753    SDNode *ImmGUser = Glue->getGluedUser();
754    for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
755         ui != ue; ++ui)
756      if (*ui == ImmGUser)
757        --Degree;
758    GUser->setNodeId(UDegree + Degree);
759    Glue->setNodeId(1);
760  }
761
762  Sequence.reserve(DAGSize);
763  ScheduleNode(DAG->getRoot().getNode());
764}
765
766MachineBasicBlock*
767ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
768  InstrEmitter Emitter(BB, InsertPos);
769  DenseMap<SDValue, unsigned> VRBaseMap;
770
771  DEBUG({
772      dbgs() << "\n*** Final schedule ***\n";
773    });
774
775  // FIXME: Handle dbg_values.
776  unsigned NumNodes = Sequence.size();
777  for (unsigned i = 0; i != NumNodes; ++i) {
778    SDNode *N = Sequence[NumNodes-i-1];
779    DEBUG(N->dump(DAG));
780    Emitter.EmitNode(N, false, false, VRBaseMap);
781  }
782
783  DEBUG(dbgs() << '\n');
784
785  InsertPos = Emitter.getInsertPos();
786  return Emitter.getBlock();
787}
788
789//===----------------------------------------------------------------------===//
790//                         Public Constructor Functions
791//===----------------------------------------------------------------------===//
792
793llvm::ScheduleDAGSDNodes *
794llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
795  return new ScheduleDAGFast(*IS->MF);
796}
797
798llvm::ScheduleDAGSDNodes *
799llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
800  return new ScheduleDAGLinearize(*IS->MF);
801}
802