ScheduleDAGInstrs.cpp revision 4c5e43da7792f75567b693105cc53e3f1992ad98
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAGInstrs class, which implements re-scheduling
11// of MachineInstrs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/ScheduleDAGInstrs.h"
16#include "llvm/ADT/MapVector.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/LiveIntervalAnalysis.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineMemOperand.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/PseudoSourceValue.h"
27#include "llvm/CodeGen/RegisterPressure.h"
28#include "llvm/CodeGen/ScheduleDFS.h"
29#include "llvm/IR/Operator.h"
30#include "llvm/MC/MCInstrItineraries.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/Format.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetRegisterInfo.h"
38#include "llvm/Target/TargetSubtargetInfo.h"
39#include <queue>
40
41using namespace llvm;
42
43#define DEBUG_TYPE "misched"
44
45static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
46    cl::ZeroOrMore, cl::init(false),
47    cl::desc("Enable use of AA during MI DAG construction"));
48
49static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden,
50    cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
51
52ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
53                                     const MachineLoopInfo *mli,
54                                     bool IsPostRAFlag, bool RemoveKillFlags,
55                                     LiveIntervals *lis)
56    : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), LIS(lis),
57      IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags),
58      CanHandleTerminators(false), FirstDbgValue(nullptr) {
59  assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
60  DbgValues.clear();
61  assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
62         "Virtual registers must be removed prior to PostRA scheduling");
63
64  const TargetSubtargetInfo &ST = mf.getSubtarget();
65  SchedModel.init(ST.getSchedModel(), &ST, TII);
66}
67
68/// getUnderlyingObjectFromInt - This is the function that does the work of
69/// looking through basic ptrtoint+arithmetic+inttoptr sequences.
70static const Value *getUnderlyingObjectFromInt(const Value *V) {
71  do {
72    if (const Operator *U = dyn_cast<Operator>(V)) {
73      // If we find a ptrtoint, we can transfer control back to the
74      // regular getUnderlyingObjectFromInt.
75      if (U->getOpcode() == Instruction::PtrToInt)
76        return U->getOperand(0);
77      // If we find an add of a constant, a multiplied value, or a phi, it's
78      // likely that the other operand will lead us to the base
79      // object. We don't have to worry about the case where the
80      // object address is somehow being computed by the multiply,
81      // because our callers only care when the result is an
82      // identifiable object.
83      if (U->getOpcode() != Instruction::Add ||
84          (!isa<ConstantInt>(U->getOperand(1)) &&
85           Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
86           !isa<PHINode>(U->getOperand(1))))
87        return V;
88      V = U->getOperand(0);
89    } else {
90      return V;
91    }
92    assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
93  } while (1);
94}
95
96/// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects
97/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
98static void getUnderlyingObjects(const Value *V,
99                                 SmallVectorImpl<Value *> &Objects,
100                                 const DataLayout &DL) {
101  SmallPtrSet<const Value *, 16> Visited;
102  SmallVector<const Value *, 4> Working(1, V);
103  do {
104    V = Working.pop_back_val();
105
106    SmallVector<Value *, 4> Objs;
107    GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
108
109    for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
110         I != IE; ++I) {
111      V = *I;
112      if (!Visited.insert(V).second)
113        continue;
114      if (Operator::getOpcode(V) == Instruction::IntToPtr) {
115        const Value *O =
116          getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
117        if (O->getType()->isPointerTy()) {
118          Working.push_back(O);
119          continue;
120        }
121      }
122      Objects.push_back(const_cast<Value *>(V));
123    }
124  } while (!Working.empty());
125}
126
127typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
128typedef SmallVector<PointerIntPair<ValueType, 1, bool>, 4>
129UnderlyingObjectsVector;
130
131/// getUnderlyingObjectsForInstr - If this machine instr has memory reference
132/// information and it can be tracked to a normal reference to a known
133/// object, return the Value for that object.
134static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
135                                         const MachineFrameInfo *MFI,
136                                         UnderlyingObjectsVector &Objects,
137                                         const DataLayout &DL) {
138  if (!MI->hasOneMemOperand() ||
139      (!(*MI->memoperands_begin())->getValue() &&
140       !(*MI->memoperands_begin())->getPseudoValue()) ||
141      (*MI->memoperands_begin())->isVolatile())
142    return;
143
144  if (const PseudoSourceValue *PSV =
145      (*MI->memoperands_begin())->getPseudoValue()) {
146    // For now, ignore PseudoSourceValues which may alias LLVM IR values
147    // because the code that uses this function has no way to cope with
148    // such aliases.
149    if (!PSV->isAliased(MFI)) {
150      bool MayAlias = PSV->mayAlias(MFI);
151      Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias));
152    }
153    return;
154  }
155
156  const Value *V = (*MI->memoperands_begin())->getValue();
157  if (!V)
158    return;
159
160  SmallVector<Value *, 4> Objs;
161  getUnderlyingObjects(V, Objs, DL);
162
163  for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
164         I != IE; ++I) {
165    V = *I;
166
167    if (!isIdentifiedObject(V)) {
168      Objects.clear();
169      return;
170    }
171
172    Objects.push_back(UnderlyingObjectsVector::value_type(V, true));
173  }
174}
175
176void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
177  BB = bb;
178}
179
180void ScheduleDAGInstrs::finishBlock() {
181  // Subclasses should no longer refer to the old block.
182  BB = nullptr;
183}
184
185/// Initialize the DAG and common scheduler state for the current scheduling
186/// region. This does not actually create the DAG, only clears it. The
187/// scheduling driver may call BuildSchedGraph multiple times per scheduling
188/// region.
189void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
190                                    MachineBasicBlock::iterator begin,
191                                    MachineBasicBlock::iterator end,
192                                    unsigned regioninstrs) {
193  assert(bb == BB && "startBlock should set BB");
194  RegionBegin = begin;
195  RegionEnd = end;
196  NumRegionInstrs = regioninstrs;
197}
198
199/// Close the current scheduling region. Don't clear any state in case the
200/// driver wants to refer to the previous scheduling region.
201void ScheduleDAGInstrs::exitRegion() {
202  // Nothing to do.
203}
204
205/// addSchedBarrierDeps - Add dependencies from instructions in the current
206/// list of instructions being scheduled to scheduling barrier by adding
207/// the exit SU to the register defs and use list. This is because we want to
208/// make sure instructions which define registers that are either used by
209/// the terminator or are live-out are properly scheduled. This is
210/// especially important when the definition latency of the return value(s)
211/// are too high to be hidden by the branch or when the liveout registers
212/// used by instructions in the fallthrough block.
213void ScheduleDAGInstrs::addSchedBarrierDeps() {
214  MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr;
215  ExitSU.setInstr(ExitMI);
216  bool AllDepKnown = ExitMI &&
217    (ExitMI->isCall() || ExitMI->isBarrier());
218  if (ExitMI && AllDepKnown) {
219    // If it's a call or a barrier, add dependencies on the defs and uses of
220    // instruction.
221    for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
222      const MachineOperand &MO = ExitMI->getOperand(i);
223      if (!MO.isReg() || MO.isDef()) continue;
224      unsigned Reg = MO.getReg();
225      if (Reg == 0) continue;
226
227      if (TRI->isPhysicalRegister(Reg))
228        Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
229      else {
230        assert(!IsPostRA && "Virtual register encountered after regalloc.");
231        if (MO.readsReg()) // ignore undef operands
232          addVRegUseDeps(&ExitSU, i);
233      }
234    }
235  } else {
236    // For others, e.g. fallthrough, conditional branch, assume the exit
237    // uses all the registers that are livein to the successor blocks.
238    assert(Uses.empty() && "Uses in set before adding deps?");
239    for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
240           SE = BB->succ_end(); SI != SE; ++SI)
241      for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
242             E = (*SI)->livein_end(); I != E; ++I) {
243        unsigned Reg = *I;
244        if (!Uses.contains(Reg))
245          Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
246      }
247  }
248}
249
250/// MO is an operand of SU's instruction that defines a physical register. Add
251/// data dependencies from SU to any uses of the physical register.
252void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
253  const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
254  assert(MO.isDef() && "expect physreg def");
255
256  // Ask the target if address-backscheduling is desirable, and if so how much.
257  const TargetSubtargetInfo &ST = MF.getSubtarget();
258
259  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
260       Alias.isValid(); ++Alias) {
261    if (!Uses.contains(*Alias))
262      continue;
263    for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
264      SUnit *UseSU = I->SU;
265      if (UseSU == SU)
266        continue;
267
268      // Adjust the dependence latency using operand def/use information,
269      // then allow the target to perform its own adjustments.
270      int UseOp = I->OpIdx;
271      MachineInstr *RegUse = nullptr;
272      SDep Dep;
273      if (UseOp < 0)
274        Dep = SDep(SU, SDep::Artificial);
275      else {
276        // Set the hasPhysRegDefs only for physreg defs that have a use within
277        // the scheduling region.
278        SU->hasPhysRegDefs = true;
279        Dep = SDep(SU, SDep::Data, *Alias);
280        RegUse = UseSU->getInstr();
281      }
282      Dep.setLatency(
283        SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse,
284                                         UseOp));
285
286      ST.adjustSchedDependency(SU, UseSU, Dep);
287      UseSU->addPred(Dep);
288    }
289  }
290}
291
292/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
293/// this SUnit to following instructions in the same scheduling region that
294/// depend the physical register referenced at OperIdx.
295void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
296  MachineInstr *MI = SU->getInstr();
297  MachineOperand &MO = MI->getOperand(OperIdx);
298
299  // Optionally add output and anti dependencies. For anti
300  // dependencies we use a latency of 0 because for a multi-issue
301  // target we want to allow the defining instruction to issue
302  // in the same cycle as the using instruction.
303  // TODO: Using a latency of 1 here for output dependencies assumes
304  //       there's no cost for reusing registers.
305  SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
306  for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
307       Alias.isValid(); ++Alias) {
308    if (!Defs.contains(*Alias))
309      continue;
310    for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
311      SUnit *DefSU = I->SU;
312      if (DefSU == &ExitSU)
313        continue;
314      if (DefSU != SU &&
315          (Kind != SDep::Output || !MO.isDead() ||
316           !DefSU->getInstr()->registerDefIsDead(*Alias))) {
317        if (Kind == SDep::Anti)
318          DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias));
319        else {
320          SDep Dep(SU, Kind, /*Reg=*/*Alias);
321          Dep.setLatency(
322            SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
323          DefSU->addPred(Dep);
324        }
325      }
326    }
327  }
328
329  if (!MO.isDef()) {
330    SU->hasPhysRegUses = true;
331    // Either insert a new Reg2SUnits entry with an empty SUnits list, or
332    // retrieve the existing SUnits list for this register's uses.
333    // Push this SUnit on the use list.
334    Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
335    if (RemoveKillFlags)
336      MO.setIsKill(false);
337  }
338  else {
339    addPhysRegDataDeps(SU, OperIdx);
340    unsigned Reg = MO.getReg();
341
342    // clear this register's use list
343    if (Uses.contains(Reg))
344      Uses.eraseAll(Reg);
345
346    if (!MO.isDead()) {
347      Defs.eraseAll(Reg);
348    } else if (SU->isCall) {
349      // Calls will not be reordered because of chain dependencies (see
350      // below). Since call operands are dead, calls may continue to be added
351      // to the DefList making dependence checking quadratic in the size of
352      // the block. Instead, we leave only one call at the back of the
353      // DefList.
354      Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
355      Reg2SUnitsMap::iterator B = P.first;
356      Reg2SUnitsMap::iterator I = P.second;
357      for (bool isBegin = I == B; !isBegin; /* empty */) {
358        isBegin = (--I) == B;
359        if (!I->SU->isCall)
360          break;
361        I = Defs.erase(I);
362      }
363    }
364
365    // Defs are pushed in the order they are visited and never reordered.
366    Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
367  }
368}
369
370/// addVRegDefDeps - Add register output and data dependencies from this SUnit
371/// to instructions that occur later in the same scheduling region if they read
372/// from or write to the virtual register defined at OperIdx.
373///
374/// TODO: Hoist loop induction variable increments. This has to be
375/// reevaluated. Generally, IV scheduling should be done before coalescing.
376void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
377  const MachineInstr *MI = SU->getInstr();
378  unsigned Reg = MI->getOperand(OperIdx).getReg();
379
380  // Singly defined vregs do not have output/anti dependencies.
381  // The current operand is a def, so we have at least one.
382  // Check here if there are any others...
383  if (MRI.hasOneDef(Reg))
384    return;
385
386  // Add output dependence to the next nearest def of this vreg.
387  //
388  // Unless this definition is dead, the output dependence should be
389  // transitively redundant with antidependencies from this definition's
390  // uses. We're conservative for now until we have a way to guarantee the uses
391  // are not eliminated sometime during scheduling. The output dependence edge
392  // is also useful if output latency exceeds def-use latency.
393  VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
394  if (DefI == VRegDefs.end())
395    VRegDefs.insert(VReg2SUnit(Reg, SU));
396  else {
397    SUnit *DefSU = DefI->SU;
398    if (DefSU != SU && DefSU != &ExitSU) {
399      SDep Dep(SU, SDep::Output, Reg);
400      Dep.setLatency(
401        SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
402      DefSU->addPred(Dep);
403    }
404    DefI->SU = SU;
405  }
406}
407
408/// addVRegUseDeps - Add a register data dependency if the instruction that
409/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
410/// register antidependency from this SUnit to instructions that occur later in
411/// the same scheduling region if they write the virtual register.
412///
413/// TODO: Handle ExitSU "uses" properly.
414void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
415  MachineInstr *MI = SU->getInstr();
416  unsigned Reg = MI->getOperand(OperIdx).getReg();
417
418  // Record this local VReg use.
419  VReg2UseMap::iterator UI = VRegUses.find(Reg);
420  for (; UI != VRegUses.end(); ++UI) {
421    if (UI->SU == SU)
422      break;
423  }
424  if (UI == VRegUses.end())
425    VRegUses.insert(VReg2SUnit(Reg, SU));
426
427  // Lookup this operand's reaching definition.
428  assert(LIS && "vreg dependencies requires LiveIntervals");
429  LiveQueryResult LRQ
430    = LIS->getInterval(Reg).Query(LIS->getInstructionIndex(MI));
431  VNInfo *VNI = LRQ.valueIn();
432
433  // VNI will be valid because MachineOperand::readsReg() is checked by caller.
434  assert(VNI && "No value to read by operand");
435  MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
436  // Phis and other noninstructions (after coalescing) have a NULL Def.
437  if (Def) {
438    SUnit *DefSU = getSUnit(Def);
439    if (DefSU) {
440      // The reaching Def lives within this scheduling region.
441      // Create a data dependence.
442      SDep dep(DefSU, SDep::Data, Reg);
443      // Adjust the dependence latency using operand def/use information, then
444      // allow the target to perform its own adjustments.
445      int DefOp = Def->findRegisterDefOperandIdx(Reg);
446      dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx));
447
448      const TargetSubtargetInfo &ST = MF.getSubtarget();
449      ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
450      SU->addPred(dep);
451    }
452  }
453
454  // Add antidependence to the following def of the vreg it uses.
455  VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
456  if (DefI != VRegDefs.end() && DefI->SU != SU)
457    DefI->SU->addPred(SDep(SU, SDep::Anti, Reg));
458}
459
460/// Return true if MI is an instruction we are unable to reason about
461/// (like a call or something with unmodeled side effects).
462static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
463  if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
464      (MI->hasOrderedMemoryRef() &&
465       (!MI->mayLoad() || !MI->isInvariantLoad(AA))))
466    return true;
467  return false;
468}
469
470// This MI might have either incomplete info, or known to be unsafe
471// to deal with (i.e. volatile object).
472static inline bool isUnsafeMemoryObject(MachineInstr *MI,
473                                        const MachineFrameInfo *MFI,
474                                        const DataLayout &DL) {
475  if (!MI || MI->memoperands_empty())
476    return true;
477  // We purposefully do no check for hasOneMemOperand() here
478  // in hope to trigger an assert downstream in order to
479  // finish implementation.
480  if ((*MI->memoperands_begin())->isVolatile() ||
481       MI->hasUnmodeledSideEffects())
482    return true;
483
484  if ((*MI->memoperands_begin())->getPseudoValue()) {
485    // Similarly to getUnderlyingObjectForInstr:
486    // For now, ignore PseudoSourceValues which may alias LLVM IR values
487    // because the code that uses this function has no way to cope with
488    // such aliases.
489    return true;
490  }
491
492  const Value *V = (*MI->memoperands_begin())->getValue();
493  if (!V)
494    return true;
495
496  SmallVector<Value *, 4> Objs;
497  getUnderlyingObjects(V, Objs, DL);
498  for (SmallVectorImpl<Value *>::iterator I = Objs.begin(),
499         IE = Objs.end(); I != IE; ++I) {
500    // Does this pointer refer to a distinct and identifiable object?
501    if (!isIdentifiedObject(*I))
502      return true;
503  }
504
505  return false;
506}
507
508/// This returns true if the two MIs need a chain edge betwee them.
509/// If these are not even memory operations, we still may need
510/// chain deps between them. The question really is - could
511/// these two MIs be reordered during scheduling from memory dependency
512/// point of view.
513static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
514                             const DataLayout &DL, MachineInstr *MIa,
515                             MachineInstr *MIb) {
516  const MachineFunction *MF = MIa->getParent()->getParent();
517  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
518
519  // Cover a trivial case - no edge is need to itself.
520  if (MIa == MIb)
521    return false;
522
523  // Let the target decide if memory accesses cannot possibly overlap.
524  if ((MIa->mayLoad() || MIa->mayStore()) &&
525      (MIb->mayLoad() || MIb->mayStore()))
526    if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA))
527      return false;
528
529  // FIXME: Need to handle multiple memory operands to support all targets.
530  if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
531    return true;
532
533  if (isUnsafeMemoryObject(MIa, MFI, DL) || isUnsafeMemoryObject(MIb, MFI, DL))
534    return true;
535
536  // If we are dealing with two "normal" loads, we do not need an edge
537  // between them - they could be reordered.
538  if (!MIa->mayStore() && !MIb->mayStore())
539    return false;
540
541  // To this point analysis is generic. From here on we do need AA.
542  if (!AA)
543    return true;
544
545  MachineMemOperand *MMOa = *MIa->memoperands_begin();
546  MachineMemOperand *MMOb = *MIb->memoperands_begin();
547
548  if (!MMOa->getValue() || !MMOb->getValue())
549    return true;
550
551  // The following interface to AA is fashioned after DAGCombiner::isAlias
552  // and operates with MachineMemOperand offset with some important
553  // assumptions:
554  //   - LLVM fundamentally assumes flat address spaces.
555  //   - MachineOperand offset can *only* result from legalization and
556  //     cannot affect queries other than the trivial case of overlap
557  //     checking.
558  //   - These offsets never wrap and never step outside
559  //     of allocated objects.
560  //   - There should never be any negative offsets here.
561  //
562  // FIXME: Modify API to hide this math from "user"
563  // FIXME: Even before we go to AA we can reason locally about some
564  // memory objects. It can save compile time, and possibly catch some
565  // corner cases not currently covered.
566
567  assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset");
568  assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset");
569
570  int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset());
571  int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
572  int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
573
574  AliasAnalysis::AliasResult AAResult = AA->alias(
575      AliasAnalysis::Location(MMOa->getValue(), Overlapa,
576                              UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
577      AliasAnalysis::Location(MMOb->getValue(), Overlapb,
578                              UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
579
580  return (AAResult != AliasAnalysis::NoAlias);
581}
582
583/// This recursive function iterates over chain deps of SUb looking for
584/// "latest" node that needs a chain edge to SUa.
585static unsigned iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
586                                 const DataLayout &DL, SUnit *SUa, SUnit *SUb,
587                                 SUnit *ExitSU, unsigned *Depth,
588                                 SmallPtrSetImpl<const SUnit *> &Visited) {
589  if (!SUa || !SUb || SUb == ExitSU)
590    return *Depth;
591
592  // Remember visited nodes.
593  if (!Visited.insert(SUb).second)
594      return *Depth;
595  // If there is _some_ dependency already in place, do not
596  // descend any further.
597  // TODO: Need to make sure that if that dependency got eliminated or ignored
598  // for any reason in the future, we would not violate DAG topology.
599  // Currently it does not happen, but makes an implicit assumption about
600  // future implementation.
601  //
602  // Independently, if we encounter node that is some sort of global
603  // object (like a call) we already have full set of dependencies to it
604  // and we can stop descending.
605  if (SUa->isSucc(SUb) ||
606      isGlobalMemoryObject(AA, SUb->getInstr()))
607    return *Depth;
608
609  // If we do need an edge, or we have exceeded depth budget,
610  // add that edge to the predecessors chain of SUb,
611  // and stop descending.
612  if (*Depth > 200 ||
613      MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
614    SUb->addPred(SDep(SUa, SDep::MayAliasMem));
615    return *Depth;
616  }
617  // Track current depth.
618  (*Depth)++;
619  // Iterate over memory dependencies only.
620  for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end();
621       I != E; ++I)
622    if (I->isNormalMemoryOrBarrier())
623      iterateChainSucc(AA, MFI, DL, SUa, I->getSUnit(), ExitSU, Depth, Visited);
624  return *Depth;
625}
626
627/// This function assumes that "downward" from SU there exist
628/// tail/leaf of already constructed DAG. It iterates downward and
629/// checks whether SU can be aliasing any node dominated
630/// by it.
631static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
632                            const DataLayout &DL, SUnit *SU, SUnit *ExitSU,
633                            std::set<SUnit *> &CheckList,
634                            unsigned LatencyToLoad) {
635  if (!SU)
636    return;
637
638  SmallPtrSet<const SUnit*, 16> Visited;
639  unsigned Depth = 0;
640
641  for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end();
642       I != IE; ++I) {
643    if (SU == *I)
644      continue;
645    if (MIsNeedChainEdge(AA, MFI, DL, SU->getInstr(), (*I)->getInstr())) {
646      SDep Dep(SU, SDep::MayAliasMem);
647      Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0);
648      (*I)->addPred(Dep);
649    }
650
651    // Iterate recursively over all previously added memory chain
652    // successors. Keep track of visited nodes.
653    for (SUnit::const_succ_iterator J = (*I)->Succs.begin(),
654         JE = (*I)->Succs.end(); J != JE; ++J)
655      if (J->isNormalMemoryOrBarrier())
656        iterateChainSucc(AA, MFI, DL, SU, J->getSUnit(), ExitSU, &Depth,
657                         Visited);
658  }
659}
660
661/// Check whether two objects need a chain edge, if so, add it
662/// otherwise remember the rejected SU.
663static inline void addChainDependency(AliasAnalysis *AA,
664                                      const MachineFrameInfo *MFI,
665                                      const DataLayout &DL, SUnit *SUa,
666                                      SUnit *SUb, std::set<SUnit *> &RejectList,
667                                      unsigned TrueMemOrderLatency = 0,
668                                      bool isNormalMemory = false) {
669  // If this is a false dependency,
670  // do not add the edge, but rememeber the rejected node.
671  if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
672    SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier);
673    Dep.setLatency(TrueMemOrderLatency);
674    SUb->addPred(Dep);
675  }
676  else {
677    // Duplicate entries should be ignored.
678    RejectList.insert(SUb);
679    DEBUG(dbgs() << "\tReject chain dep between SU("
680          << SUa->NodeNum << ") and SU("
681          << SUb->NodeNum << ")\n");
682  }
683}
684
685/// Create an SUnit for each real instruction, numbered in top-down toplological
686/// order. The instruction order A < B, implies that no edge exists from B to A.
687///
688/// Map each real instruction to its SUnit.
689///
690/// After initSUnits, the SUnits vector cannot be resized and the scheduler may
691/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
692/// instead of pointers.
693///
694/// MachineScheduler relies on initSUnits numbering the nodes by their order in
695/// the original instruction list.
696void ScheduleDAGInstrs::initSUnits() {
697  // We'll be allocating one SUnit for each real instruction in the region,
698  // which is contained within a basic block.
699  SUnits.reserve(NumRegionInstrs);
700
701  for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
702    MachineInstr *MI = I;
703    if (MI->isDebugValue())
704      continue;
705
706    SUnit *SU = newSUnit(MI);
707    MISUnitMap[MI] = SU;
708
709    SU->isCall = MI->isCall();
710    SU->isCommutable = MI->isCommutable();
711
712    // Assign the Latency field of SU using target-provided information.
713    SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
714
715    // If this SUnit uses a reserved or unbuffered resource, mark it as such.
716    //
717    // Reserved resources block an instruction from issuing and stall the
718    // entire pipeline. These are identified by BufferSize=0.
719    //
720    // Unbuffered resources prevent execution of subsequent instructions that
721    // require the same resources. This is used for in-order execution pipelines
722    // within an out-of-order core. These are identified by BufferSize=1.
723    if (SchedModel.hasInstrSchedModel()) {
724      const MCSchedClassDesc *SC = getSchedClass(SU);
725      for (TargetSchedModel::ProcResIter
726             PI = SchedModel.getWriteProcResBegin(SC),
727             PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) {
728        switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) {
729        case 0:
730          SU->hasReservedResource = true;
731          break;
732        case 1:
733          SU->isUnbuffered = true;
734          break;
735        default:
736          break;
737        }
738      }
739    }
740  }
741}
742
743/// If RegPressure is non-null, compute register pressure as a side effect. The
744/// DAG builder is an efficient place to do it because it already visits
745/// operands.
746void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
747                                        RegPressureTracker *RPTracker,
748                                        PressureDiffs *PDiffs) {
749  const TargetSubtargetInfo &ST = MF.getSubtarget();
750  bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
751                                                       : ST.useAA();
752  AliasAnalysis *AAForDep = UseAA ? AA : nullptr;
753
754  MISUnitMap.clear();
755  ScheduleDAG::clearDAG();
756
757  // Create an SUnit for each real instruction.
758  initSUnits();
759
760  if (PDiffs)
761    PDiffs->init(SUnits.size());
762
763  // We build scheduling units by walking a block's instruction list from bottom
764  // to top.
765
766  // Remember where a generic side-effecting instruction is as we procede.
767  SUnit *BarrierChain = nullptr, *AliasChain = nullptr;
768
769  // Memory references to specific known memory locations are tracked
770  // so that they can be given more precise dependencies. We track
771  // separately the known memory locations that may alias and those
772  // that are known not to alias
773  MapVector<ValueType, std::vector<SUnit *> > AliasMemDefs, NonAliasMemDefs;
774  MapVector<ValueType, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
775  std::set<SUnit*> RejectMemNodes;
776
777  // Remove any stale debug info; sometimes BuildSchedGraph is called again
778  // without emitting the info from the previous call.
779  DbgValues.clear();
780  FirstDbgValue = nullptr;
781
782  assert(Defs.empty() && Uses.empty() &&
783         "Only BuildGraph should update Defs/Uses");
784  Defs.setUniverse(TRI->getNumRegs());
785  Uses.setUniverse(TRI->getNumRegs());
786
787  assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
788  VRegUses.clear();
789  VRegDefs.setUniverse(MRI.getNumVirtRegs());
790  VRegUses.setUniverse(MRI.getNumVirtRegs());
791
792  // Model data dependencies between instructions being scheduled and the
793  // ExitSU.
794  addSchedBarrierDeps();
795
796  // Walk the list of instructions, from bottom moving up.
797  MachineInstr *DbgMI = nullptr;
798  for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
799       MII != MIE; --MII) {
800    MachineInstr *MI = std::prev(MII);
801    if (MI && DbgMI) {
802      DbgValues.push_back(std::make_pair(DbgMI, MI));
803      DbgMI = nullptr;
804    }
805
806    if (MI->isDebugValue()) {
807      DbgMI = MI;
808      continue;
809    }
810    SUnit *SU = MISUnitMap[MI];
811    assert(SU && "No SUnit mapped to this MI");
812
813    if (RPTracker) {
814      PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : nullptr;
815      RPTracker->recede(/*LiveUses=*/nullptr, PDiff);
816      assert(RPTracker->getPos() == std::prev(MII) &&
817             "RPTracker can't find MI");
818    }
819
820    assert(
821        (CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) &&
822        "Cannot schedule terminators or labels!");
823
824    // Add register-based dependencies (data, anti, and output).
825    bool HasVRegDef = false;
826    for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
827      const MachineOperand &MO = MI->getOperand(j);
828      if (!MO.isReg()) continue;
829      unsigned Reg = MO.getReg();
830      if (Reg == 0) continue;
831
832      if (TRI->isPhysicalRegister(Reg))
833        addPhysRegDeps(SU, j);
834      else {
835        assert(!IsPostRA && "Virtual register encountered!");
836        if (MO.isDef()) {
837          HasVRegDef = true;
838          addVRegDefDeps(SU, j);
839        }
840        else if (MO.readsReg()) // ignore undef operands
841          addVRegUseDeps(SU, j);
842      }
843    }
844    // If we haven't seen any uses in this scheduling region, create a
845    // dependence edge to ExitSU to model the live-out latency. This is required
846    // for vreg defs with no in-region use, and prefetches with no vreg def.
847    //
848    // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
849    // check currently relies on being called before adding chain deps.
850    if (SU->NumSuccs == 0 && SU->Latency > 1
851        && (HasVRegDef || MI->mayLoad())) {
852      SDep Dep(SU, SDep::Artificial);
853      Dep.setLatency(SU->Latency - 1);
854      ExitSU.addPred(Dep);
855    }
856
857    // Add chain dependencies.
858    // Chain dependencies used to enforce memory order should have
859    // latency of 0 (except for true dependency of Store followed by
860    // aliased Load... we estimate that with a single cycle of latency
861    // assuming the hardware will bypass)
862    // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
863    // after stack slots are lowered to actual addresses.
864    // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
865    // produce more precise dependence information.
866    unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0;
867    if (isGlobalMemoryObject(AA, MI)) {
868      // Be conservative with these and add dependencies on all memory
869      // references, even those that are known to not alias.
870      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
871             NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
872        for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
873          I->second[i]->addPred(SDep(SU, SDep::Barrier));
874        }
875      }
876      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
877             NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
878        for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
879          SDep Dep(SU, SDep::Barrier);
880          Dep.setLatency(TrueMemOrderLatency);
881          I->second[i]->addPred(Dep);
882        }
883      }
884      // Add SU to the barrier chain.
885      if (BarrierChain)
886        BarrierChain->addPred(SDep(SU, SDep::Barrier));
887      BarrierChain = SU;
888      // This is a barrier event that acts as a pivotal node in the DAG,
889      // so it is safe to clear list of exposed nodes.
890      adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
891                      TrueMemOrderLatency);
892      RejectMemNodes.clear();
893      NonAliasMemDefs.clear();
894      NonAliasMemUses.clear();
895
896      // fall-through
897    new_alias_chain:
898      // Chain all possibly aliasing memory references through SU.
899      if (AliasChain) {
900        unsigned ChainLatency = 0;
901        if (AliasChain->getInstr()->mayLoad())
902          ChainLatency = TrueMemOrderLatency;
903        addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
904                           RejectMemNodes, ChainLatency);
905      }
906      AliasChain = SU;
907      for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
908        addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
909                           PendingLoads[k], RejectMemNodes,
910                           TrueMemOrderLatency);
911      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
912           AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) {
913        for (unsigned i = 0, e = I->second.size(); i != e; ++i)
914          addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
915                             I->second[i], RejectMemNodes);
916      }
917      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
918           AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
919        for (unsigned i = 0, e = I->second.size(); i != e; ++i)
920          addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
921                             I->second[i], RejectMemNodes, TrueMemOrderLatency);
922      }
923      adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
924                      TrueMemOrderLatency);
925      PendingLoads.clear();
926      AliasMemDefs.clear();
927      AliasMemUses.clear();
928    } else if (MI->mayStore()) {
929      // Add dependence on barrier chain, if needed.
930      // There is no point to check aliasing on barrier event. Even if
931      // SU and barrier _could_ be reordered, they should not. In addition,
932      // we have lost all RejectMemNodes below barrier.
933      if (BarrierChain)
934        BarrierChain->addPred(SDep(SU, SDep::Barrier));
935
936      UnderlyingObjectsVector Objs;
937      getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
938
939      if (Objs.empty()) {
940        // Treat all other stores conservatively.
941        goto new_alias_chain;
942      }
943
944      bool MayAlias = false;
945      for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end();
946           K != KE; ++K) {
947        ValueType V = K->getPointer();
948        bool ThisMayAlias = K->getInt();
949        if (ThisMayAlias)
950          MayAlias = true;
951
952        // A store to a specific PseudoSourceValue. Add precise dependencies.
953        // Record the def in MemDefs, first adding a dep if there is
954        // an existing def.
955        MapVector<ValueType, std::vector<SUnit *> >::iterator I =
956          ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
957        MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
958          ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
959        if (I != IE) {
960          for (unsigned i = 0, e = I->second.size(); i != e; ++i)
961            addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
962                               I->second[i], RejectMemNodes, 0, true);
963
964          // If we're not using AA, then we only need one store per object.
965          if (!AAForDep)
966            I->second.clear();
967          I->second.push_back(SU);
968        } else {
969          if (ThisMayAlias) {
970            if (!AAForDep)
971              AliasMemDefs[V].clear();
972            AliasMemDefs[V].push_back(SU);
973          } else {
974            if (!AAForDep)
975              NonAliasMemDefs[V].clear();
976            NonAliasMemDefs[V].push_back(SU);
977          }
978        }
979        // Handle the uses in MemUses, if there are any.
980        MapVector<ValueType, std::vector<SUnit *> >::iterator J =
981          ((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
982        MapVector<ValueType, std::vector<SUnit *> >::iterator JE =
983          ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
984        if (J != JE) {
985          for (unsigned i = 0, e = J->second.size(); i != e; ++i)
986            addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
987                               J->second[i], RejectMemNodes,
988                               TrueMemOrderLatency, true);
989          J->second.clear();
990        }
991      }
992      if (MayAlias) {
993        // Add dependencies from all the PendingLoads, i.e. loads
994        // with no underlying object.
995        for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
996          addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
997                             PendingLoads[k], RejectMemNodes,
998                             TrueMemOrderLatency);
999        // Add dependence on alias chain, if needed.
1000        if (AliasChain)
1001          addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
1002                             RejectMemNodes);
1003      }
1004      adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
1005                      TrueMemOrderLatency);
1006    } else if (MI->mayLoad()) {
1007      bool MayAlias = true;
1008      if (MI->isInvariantLoad(AA)) {
1009        // Invariant load, no chain dependencies needed!
1010      } else {
1011        UnderlyingObjectsVector Objs;
1012        getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
1013
1014        if (Objs.empty()) {
1015          // A load with no underlying object. Depend on all
1016          // potentially aliasing stores.
1017          for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
1018                 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
1019            for (unsigned i = 0, e = I->second.size(); i != e; ++i)
1020              addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
1021                                 I->second[i], RejectMemNodes);
1022
1023          PendingLoads.push_back(SU);
1024          MayAlias = true;
1025        } else {
1026          MayAlias = false;
1027        }
1028
1029        for (UnderlyingObjectsVector::iterator
1030             J = Objs.begin(), JE = Objs.end(); J != JE; ++J) {
1031          ValueType V = J->getPointer();
1032          bool ThisMayAlias = J->getInt();
1033
1034          if (ThisMayAlias)
1035            MayAlias = true;
1036
1037          // A load from a specific PseudoSourceValue. Add precise dependencies.
1038          MapVector<ValueType, std::vector<SUnit *> >::iterator I =
1039            ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
1040          MapVector<ValueType, std::vector<SUnit *> >::iterator IE =
1041            ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
1042          if (I != IE)
1043            for (unsigned i = 0, e = I->second.size(); i != e; ++i)
1044              addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
1045                                 I->second[i], RejectMemNodes, 0, true);
1046          if (ThisMayAlias)
1047            AliasMemUses[V].push_back(SU);
1048          else
1049            NonAliasMemUses[V].push_back(SU);
1050        }
1051        if (MayAlias)
1052          adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU,
1053                          RejectMemNodes, /*Latency=*/0);
1054        // Add dependencies on alias and barrier chains, if needed.
1055        if (MayAlias && AliasChain)
1056          addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
1057                             RejectMemNodes);
1058        if (BarrierChain)
1059          BarrierChain->addPred(SDep(SU, SDep::Barrier));
1060      }
1061    }
1062  }
1063  if (DbgMI)
1064    FirstDbgValue = DbgMI;
1065
1066  Defs.clear();
1067  Uses.clear();
1068  VRegDefs.clear();
1069  PendingLoads.clear();
1070}
1071
1072/// \brief Initialize register live-range state for updating kills.
1073void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) {
1074  // Start with no live registers.
1075  LiveRegs.reset();
1076
1077  // Examine the live-in regs of all successors.
1078  for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1079       SE = BB->succ_end(); SI != SE; ++SI) {
1080    for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
1081         E = (*SI)->livein_end(); I != E; ++I) {
1082      unsigned Reg = *I;
1083      // Repeat, for reg and all subregs.
1084      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
1085           SubRegs.isValid(); ++SubRegs)
1086        LiveRegs.set(*SubRegs);
1087    }
1088  }
1089}
1090
1091bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) {
1092  // Setting kill flag...
1093  if (!MO.isKill()) {
1094    MO.setIsKill(true);
1095    return false;
1096  }
1097
1098  // If MO itself is live, clear the kill flag...
1099  if (LiveRegs.test(MO.getReg())) {
1100    MO.setIsKill(false);
1101    return false;
1102  }
1103
1104  // If any subreg of MO is live, then create an imp-def for that
1105  // subreg and keep MO marked as killed.
1106  MO.setIsKill(false);
1107  bool AllDead = true;
1108  const unsigned SuperReg = MO.getReg();
1109  MachineInstrBuilder MIB(MF, MI);
1110  for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) {
1111    if (LiveRegs.test(*SubRegs)) {
1112      MIB.addReg(*SubRegs, RegState::ImplicitDefine);
1113      AllDead = false;
1114    }
1115  }
1116
1117  if(AllDead)
1118    MO.setIsKill(true);
1119  return false;
1120}
1121
1122// FIXME: Reuse the LivePhysRegs utility for this.
1123void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) {
1124  DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
1125
1126  LiveRegs.resize(TRI->getNumRegs());
1127  BitVector killedRegs(TRI->getNumRegs());
1128
1129  startBlockForKills(MBB);
1130
1131  // Examine block from end to start...
1132  unsigned Count = MBB->size();
1133  for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
1134       I != E; --Count) {
1135    MachineInstr *MI = --I;
1136    if (MI->isDebugValue())
1137      continue;
1138
1139    // Update liveness.  Registers that are defed but not used in this
1140    // instruction are now dead. Mark register and all subregs as they
1141    // are completely defined.
1142    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1143      MachineOperand &MO = MI->getOperand(i);
1144      if (MO.isRegMask())
1145        LiveRegs.clearBitsNotInMask(MO.getRegMask());
1146      if (!MO.isReg()) continue;
1147      unsigned Reg = MO.getReg();
1148      if (Reg == 0) continue;
1149      if (!MO.isDef()) continue;
1150      // Ignore two-addr defs.
1151      if (MI->isRegTiedToUseOperand(i)) continue;
1152
1153      // Repeat for reg and all subregs.
1154      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
1155           SubRegs.isValid(); ++SubRegs)
1156        LiveRegs.reset(*SubRegs);
1157    }
1158
1159    // Examine all used registers and set/clear kill flag. When a
1160    // register is used multiple times we only set the kill flag on
1161    // the first use. Don't set kill flags on undef operands.
1162    killedRegs.reset();
1163    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1164      MachineOperand &MO = MI->getOperand(i);
1165      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1166      unsigned Reg = MO.getReg();
1167      if ((Reg == 0) || MRI.isReserved(Reg)) continue;
1168
1169      bool kill = false;
1170      if (!killedRegs.test(Reg)) {
1171        kill = true;
1172        // A register is not killed if any subregs are live...
1173        for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
1174          if (LiveRegs.test(*SubRegs)) {
1175            kill = false;
1176            break;
1177          }
1178        }
1179
1180        // If subreg is not live, then register is killed if it became
1181        // live in this instruction
1182        if (kill)
1183          kill = !LiveRegs.test(Reg);
1184      }
1185
1186      if (MO.isKill() != kill) {
1187        DEBUG(dbgs() << "Fixing " << MO << " in ");
1188        // Warning: toggleKillFlag may invalidate MO.
1189        toggleKillFlag(MI, MO);
1190        DEBUG(MI->dump());
1191      }
1192
1193      killedRegs.set(Reg);
1194    }
1195
1196    // Mark any used register (that is not using undef) and subregs as
1197    // now live...
1198    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1199      MachineOperand &MO = MI->getOperand(i);
1200      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1201      unsigned Reg = MO.getReg();
1202      if ((Reg == 0) || MRI.isReserved(Reg)) continue;
1203
1204      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
1205           SubRegs.isValid(); ++SubRegs)
1206        LiveRegs.set(*SubRegs);
1207    }
1208  }
1209}
1210
1211void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
1212#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1213  SU->getInstr()->dump();
1214#endif
1215}
1216
1217std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
1218  std::string s;
1219  raw_string_ostream oss(s);
1220  if (SU == &EntrySU)
1221    oss << "<entry>";
1222  else if (SU == &ExitSU)
1223    oss << "<exit>";
1224  else
1225    SU->getInstr()->print(oss, /*SkipOpers=*/true);
1226  return oss.str();
1227}
1228
1229/// Return the basic block label. It is not necessarilly unique because a block
1230/// contains multiple scheduling regions. But it is fine for visualization.
1231std::string ScheduleDAGInstrs::getDAGName() const {
1232  return "dag." + BB->getFullName();
1233}
1234
1235//===----------------------------------------------------------------------===//
1236// SchedDFSResult Implementation
1237//===----------------------------------------------------------------------===//
1238
1239namespace llvm {
1240/// \brief Internal state used to compute SchedDFSResult.
1241class SchedDFSImpl {
1242  SchedDFSResult &R;
1243
1244  /// Join DAG nodes into equivalence classes by their subtree.
1245  IntEqClasses SubtreeClasses;
1246  /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1247  std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs;
1248
1249  struct RootData {
1250    unsigned NodeID;
1251    unsigned ParentNodeID;  // Parent node (member of the parent subtree).
1252    unsigned SubInstrCount; // Instr count in this tree only, not children.
1253
1254    RootData(unsigned id): NodeID(id),
1255                           ParentNodeID(SchedDFSResult::InvalidSubtreeID),
1256                           SubInstrCount(0) {}
1257
1258    unsigned getSparseSetIndex() const { return NodeID; }
1259  };
1260
1261  SparseSet<RootData> RootSet;
1262
1263public:
1264  SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) {
1265    RootSet.setUniverse(R.DFSNodeData.size());
1266  }
1267
1268  /// Return true if this node been visited by the DFS traversal.
1269  ///
1270  /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1271  /// ID. Later, SubtreeID is updated but remains valid.
1272  bool isVisited(const SUnit *SU) const {
1273    return R.DFSNodeData[SU->NodeNum].SubtreeID
1274      != SchedDFSResult::InvalidSubtreeID;
1275  }
1276
1277  /// Initialize this node's instruction count. We don't need to flag the node
1278  /// visited until visitPostorder because the DAG cannot have cycles.
1279  void visitPreorder(const SUnit *SU) {
1280    R.DFSNodeData[SU->NodeNum].InstrCount =
1281      SU->getInstr()->isTransient() ? 0 : 1;
1282  }
1283
1284  /// Called once for each node after all predecessors are visited. Revisit this
1285  /// node's predecessors and potentially join them now that we know the ILP of
1286  /// the other predecessors.
1287  void visitPostorderNode(const SUnit *SU) {
1288    // Mark this node as the root of a subtree. It may be joined with its
1289    // successors later.
1290    R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum;
1291    RootData RData(SU->NodeNum);
1292    RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1;
1293
1294    // If any predecessors are still in their own subtree, they either cannot be
1295    // joined or are large enough to remain separate. If this parent node's
1296    // total instruction count is not greater than a child subtree by at least
1297    // the subtree limit, then try to join it now since splitting subtrees is
1298    // only useful if multiple high-pressure paths are possible.
1299    unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount;
1300    for (SUnit::const_pred_iterator
1301           PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1302      if (PI->getKind() != SDep::Data)
1303        continue;
1304      unsigned PredNum = PI->getSUnit()->NodeNum;
1305      if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit)
1306        joinPredSubtree(*PI, SU, /*CheckLimit=*/false);
1307
1308      // Either link or merge the TreeData entry from the child to the parent.
1309      if (R.DFSNodeData[PredNum].SubtreeID == PredNum) {
1310        // If the predecessor's parent is invalid, this is a tree edge and the
1311        // current node is the parent.
1312        if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID)
1313          RootSet[PredNum].ParentNodeID = SU->NodeNum;
1314      }
1315      else if (RootSet.count(PredNum)) {
1316        // The predecessor is not a root, but is still in the root set. This
1317        // must be the new parent that it was just joined to. Note that
1318        // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1319        // set to the original parent.
1320        RData.SubInstrCount += RootSet[PredNum].SubInstrCount;
1321        RootSet.erase(PredNum);
1322      }
1323    }
1324    RootSet[SU->NodeNum] = RData;
1325  }
1326
1327  /// Called once for each tree edge after calling visitPostOrderNode on the
1328  /// predecessor. Increment the parent node's instruction count and
1329  /// preemptively join this subtree to its parent's if it is small enough.
1330  void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) {
1331    R.DFSNodeData[Succ->NodeNum].InstrCount
1332      += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount;
1333    joinPredSubtree(PredDep, Succ);
1334  }
1335
1336  /// Add a connection for cross edges.
1337  void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) {
1338    ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ));
1339  }
1340
1341  /// Set each node's subtree ID to the representative ID and record connections
1342  /// between trees.
1343  void finalize() {
1344    SubtreeClasses.compress();
1345    R.DFSTreeData.resize(SubtreeClasses.getNumClasses());
1346    assert(SubtreeClasses.getNumClasses() == RootSet.size()
1347           && "number of roots should match trees");
1348    for (SparseSet<RootData>::const_iterator
1349           RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) {
1350      unsigned TreeID = SubtreeClasses[RI->NodeID];
1351      if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID)
1352        R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID];
1353      R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount;
1354      // Note that SubInstrCount may be greater than InstrCount if we joined
1355      // subtrees across a cross edge. InstrCount will be attributed to the
1356      // original parent, while SubInstrCount will be attributed to the joined
1357      // parent.
1358    }
1359    R.SubtreeConnections.resize(SubtreeClasses.getNumClasses());
1360    R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses());
1361    DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
1362    for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) {
1363      R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx];
1364      DEBUG(dbgs() << "  SU(" << Idx << ") in tree "
1365            << R.DFSNodeData[Idx].SubtreeID << '\n');
1366    }
1367    for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator
1368           I = ConnectionPairs.begin(), E = ConnectionPairs.end();
1369         I != E; ++I) {
1370      unsigned PredTree = SubtreeClasses[I->first->NodeNum];
1371      unsigned SuccTree = SubtreeClasses[I->second->NodeNum];
1372      if (PredTree == SuccTree)
1373        continue;
1374      unsigned Depth = I->first->getDepth();
1375      addConnection(PredTree, SuccTree, Depth);
1376      addConnection(SuccTree, PredTree, Depth);
1377    }
1378  }
1379
1380protected:
1381  /// Join the predecessor subtree with the successor that is its DFS
1382  /// parent. Apply some heuristics before joining.
1383  bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ,
1384                       bool CheckLimit = true) {
1385    assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges");
1386
1387    // Check if the predecessor is already joined.
1388    const SUnit *PredSU = PredDep.getSUnit();
1389    unsigned PredNum = PredSU->NodeNum;
1390    if (R.DFSNodeData[PredNum].SubtreeID != PredNum)
1391      return false;
1392
1393    // Four is the magic number of successors before a node is considered a
1394    // pinch point.
1395    unsigned NumDataSucs = 0;
1396    for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(),
1397           SE = PredSU->Succs.end(); SI != SE; ++SI) {
1398      if (SI->getKind() == SDep::Data) {
1399        if (++NumDataSucs >= 4)
1400          return false;
1401      }
1402    }
1403    if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit)
1404      return false;
1405    R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum;
1406    SubtreeClasses.join(Succ->NodeNum, PredNum);
1407    return true;
1408  }
1409
1410  /// Called by finalize() to record a connection between trees.
1411  void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) {
1412    if (!Depth)
1413      return;
1414
1415    do {
1416      SmallVectorImpl<SchedDFSResult::Connection> &Connections =
1417        R.SubtreeConnections[FromTree];
1418      for (SmallVectorImpl<SchedDFSResult::Connection>::iterator
1419             I = Connections.begin(), E = Connections.end(); I != E; ++I) {
1420        if (I->TreeID == ToTree) {
1421          I->Level = std::max(I->Level, Depth);
1422          return;
1423        }
1424      }
1425      Connections.push_back(SchedDFSResult::Connection(ToTree, Depth));
1426      FromTree = R.DFSTreeData[FromTree].ParentTreeID;
1427    } while (FromTree != SchedDFSResult::InvalidSubtreeID);
1428  }
1429};
1430} // namespace llvm
1431
1432namespace {
1433/// \brief Manage the stack used by a reverse depth-first search over the DAG.
1434class SchedDAGReverseDFS {
1435  std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
1436public:
1437  bool isComplete() const { return DFSStack.empty(); }
1438
1439  void follow(const SUnit *SU) {
1440    DFSStack.push_back(std::make_pair(SU, SU->Preds.begin()));
1441  }
1442  void advance() { ++DFSStack.back().second; }
1443
1444  const SDep *backtrack() {
1445    DFSStack.pop_back();
1446    return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second);
1447  }
1448
1449  const SUnit *getCurr() const { return DFSStack.back().first; }
1450
1451  SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; }
1452
1453  SUnit::const_pred_iterator getPredEnd() const {
1454    return getCurr()->Preds.end();
1455  }
1456};
1457} // anonymous
1458
1459static bool hasDataSucc(const SUnit *SU) {
1460  for (SUnit::const_succ_iterator
1461         SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) {
1462    if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode())
1463      return true;
1464  }
1465  return false;
1466}
1467
1468/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first
1469/// search from this root.
1470void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) {
1471  if (!IsBottomUp)
1472    llvm_unreachable("Top-down ILP metric is unimplemnted");
1473
1474  SchedDFSImpl Impl(*this);
1475  for (ArrayRef<SUnit>::const_iterator
1476         SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) {
1477    const SUnit *SU = &*SI;
1478    if (Impl.isVisited(SU) || hasDataSucc(SU))
1479      continue;
1480
1481    SchedDAGReverseDFS DFS;
1482    Impl.visitPreorder(SU);
1483    DFS.follow(SU);
1484    for (;;) {
1485      // Traverse the leftmost path as far as possible.
1486      while (DFS.getPred() != DFS.getPredEnd()) {
1487        const SDep &PredDep = *DFS.getPred();
1488        DFS.advance();
1489        // Ignore non-data edges.
1490        if (PredDep.getKind() != SDep::Data
1491            || PredDep.getSUnit()->isBoundaryNode()) {
1492          continue;
1493        }
1494        // An already visited edge is a cross edge, assuming an acyclic DAG.
1495        if (Impl.isVisited(PredDep.getSUnit())) {
1496          Impl.visitCrossEdge(PredDep, DFS.getCurr());
1497          continue;
1498        }
1499        Impl.visitPreorder(PredDep.getSUnit());
1500        DFS.follow(PredDep.getSUnit());
1501      }
1502      // Visit the top of the stack in postorder and backtrack.
1503      const SUnit *Child = DFS.getCurr();
1504      const SDep *PredDep = DFS.backtrack();
1505      Impl.visitPostorderNode(Child);
1506      if (PredDep)
1507        Impl.visitPostorderEdge(*PredDep, DFS.getCurr());
1508      if (DFS.isComplete())
1509        break;
1510    }
1511  }
1512  Impl.finalize();
1513}
1514
1515/// The root of the given SubtreeID was just scheduled. For all subtrees
1516/// connected to this tree, record the depth of the connection so that the
1517/// nearest connected subtrees can be prioritized.
1518void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
1519  for (SmallVectorImpl<Connection>::const_iterator
1520         I = SubtreeConnections[SubtreeID].begin(),
1521         E = SubtreeConnections[SubtreeID].end(); I != E; ++I) {
1522    SubtreeConnectLevels[I->TreeID] =
1523      std::max(SubtreeConnectLevels[I->TreeID], I->Level);
1524    DEBUG(dbgs() << "  Tree: " << I->TreeID
1525          << " @" << SubtreeConnectLevels[I->TreeID] << '\n');
1526  }
1527}
1528
1529LLVM_DUMP_METHOD
1530void ILPValue::print(raw_ostream &OS) const {
1531  OS << InstrCount << " / " << Length << " = ";
1532  if (!Length)
1533    OS << "BADILP";
1534  else
1535    OS << format("%g", ((double)InstrCount / Length));
1536}
1537
1538LLVM_DUMP_METHOD
1539void ILPValue::dump() const {
1540  dbgs() << *this << '\n';
1541}
1542
1543namespace llvm {
1544
1545LLVM_DUMP_METHOD
1546raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
1547  Val.print(OS);
1548  return OS;
1549}
1550
1551} // namespace llvm
1552