1//===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs loop invariant code motion on machine instructions. We
11// attempt to remove as much code from the body of a loop as possible.
12//
13// This pass is not intended to be a replacement or a complete alternative
14// for the LLVM-IR-level LICM pass. It is only designed to hoist simple
15// constructs that are not exposed before lowering and instruction selection.
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/CodeGen/Passes.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/CodeGen/MachineDominators.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineMemOperand.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/CodeGen/TargetSchedule.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetLowering.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetRegisterInfo.h"
38#include "llvm/Target/TargetSubtargetInfo.h"
39using namespace llvm;
40
41#define DEBUG_TYPE "machine-licm"
42
43static cl::opt<bool>
44AvoidSpeculation("avoid-speculation",
45                 cl::desc("MachineLICM should avoid speculation"),
46                 cl::init(true), cl::Hidden);
47
48static cl::opt<bool>
49HoistCheapInsts("hoist-cheap-insts",
50                cl::desc("MachineLICM should hoist even cheap instructions"),
51                cl::init(false), cl::Hidden);
52
53static cl::opt<bool>
54SinkInstsToAvoidSpills("sink-insts-to-avoid-spills",
55                       cl::desc("MachineLICM should sink instructions into "
56                                "loops to avoid register spills"),
57                       cl::init(false), cl::Hidden);
58
59STATISTIC(NumHoisted,
60          "Number of machine instructions hoisted out of loops");
61STATISTIC(NumLowRP,
62          "Number of instructions hoisted in low reg pressure situation");
63STATISTIC(NumHighLatency,
64          "Number of high latency instructions hoisted");
65STATISTIC(NumCSEed,
66          "Number of hoisted machine instructions CSEed");
67STATISTIC(NumPostRAHoisted,
68          "Number of machine instructions hoisted out of loops post regalloc");
69
70namespace {
71  class MachineLICM : public MachineFunctionPass {
72    const TargetInstrInfo *TII;
73    const TargetLoweringBase *TLI;
74    const TargetRegisterInfo *TRI;
75    const MachineFrameInfo *MFI;
76    MachineRegisterInfo *MRI;
77    TargetSchedModel SchedModel;
78    bool PreRegAlloc;
79
80    // Various analyses that we use...
81    AliasAnalysis        *AA;      // Alias analysis info.
82    MachineLoopInfo      *MLI;     // Current MachineLoopInfo
83    MachineDominatorTree *DT;      // Machine dominator tree for the cur loop
84
85    // State that is updated as we process loops
86    bool         Changed;          // True if a loop is changed.
87    bool         FirstInLoop;      // True if it's the first LICM in the loop.
88    MachineLoop *CurLoop;          // The current loop we are working on.
89    MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
90
91    // Exit blocks for CurLoop.
92    SmallVector<MachineBasicBlock*, 8> ExitBlocks;
93
94    bool isExitBlock(const MachineBasicBlock *MBB) const {
95      return std::find(ExitBlocks.begin(), ExitBlocks.end(), MBB) !=
96        ExitBlocks.end();
97    }
98
99    // Track 'estimated' register pressure.
100    SmallSet<unsigned, 32> RegSeen;
101    SmallVector<unsigned, 8> RegPressure;
102
103    // Register pressure "limit" per register pressure set. If the pressure
104    // is higher than the limit, then it's considered high.
105    SmallVector<unsigned, 8> RegLimit;
106
107    // Register pressure on path leading from loop preheader to current BB.
108    SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
109
110    // For each opcode, keep a list of potential CSE instructions.
111    DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
112
113    enum {
114      SpeculateFalse   = 0,
115      SpeculateTrue    = 1,
116      SpeculateUnknown = 2
117    };
118
119    // If a MBB does not dominate loop exiting blocks then it may not safe
120    // to hoist loads from this block.
121    // Tri-state: 0 - false, 1 - true, 2 - unknown
122    unsigned SpeculationState;
123
124  public:
125    static char ID; // Pass identification, replacement for typeid
126    MachineLICM() :
127      MachineFunctionPass(ID), PreRegAlloc(true) {
128        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
129      }
130
131    explicit MachineLICM(bool PreRA) :
132      MachineFunctionPass(ID), PreRegAlloc(PreRA) {
133        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
134      }
135
136    bool runOnMachineFunction(MachineFunction &MF) override;
137
138    void getAnalysisUsage(AnalysisUsage &AU) const override {
139      AU.addRequired<MachineLoopInfo>();
140      AU.addRequired<MachineDominatorTree>();
141      AU.addRequired<AAResultsWrapperPass>();
142      AU.addPreserved<MachineLoopInfo>();
143      AU.addPreserved<MachineDominatorTree>();
144      MachineFunctionPass::getAnalysisUsage(AU);
145    }
146
147    void releaseMemory() override {
148      RegSeen.clear();
149      RegPressure.clear();
150      RegLimit.clear();
151      BackTrace.clear();
152      CSEMap.clear();
153    }
154
155  private:
156    /// Keep track of information about hoisting candidates.
157    struct CandidateInfo {
158      MachineInstr *MI;
159      unsigned      Def;
160      int           FI;
161      CandidateInfo(MachineInstr *mi, unsigned def, int fi)
162        : MI(mi), Def(def), FI(fi) {}
163    };
164
165    void HoistRegionPostRA();
166
167    void HoistPostRA(MachineInstr *MI, unsigned Def);
168
169    void ProcessMI(MachineInstr *MI, BitVector &PhysRegDefs,
170                   BitVector &PhysRegClobbers, SmallSet<int, 32> &StoredFIs,
171                   SmallVectorImpl<CandidateInfo> &Candidates);
172
173    void AddToLiveIns(unsigned Reg);
174
175    bool IsLICMCandidate(MachineInstr &I);
176
177    bool IsLoopInvariantInst(MachineInstr &I);
178
179    bool HasLoopPHIUse(const MachineInstr *MI) const;
180
181    bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
182                               unsigned Reg) const;
183
184    bool IsCheapInstruction(MachineInstr &MI) const;
185
186    bool CanCauseHighRegPressure(const DenseMap<unsigned, int> &Cost,
187                                 bool Cheap);
188
189    void UpdateBackTraceRegPressure(const MachineInstr *MI);
190
191    bool IsProfitableToHoist(MachineInstr &MI);
192
193    bool IsGuaranteedToExecute(MachineBasicBlock *BB);
194
195    void EnterScope(MachineBasicBlock *MBB);
196
197    void ExitScope(MachineBasicBlock *MBB);
198
199    void ExitScopeIfDone(
200        MachineDomTreeNode *Node,
201        DenseMap<MachineDomTreeNode *, unsigned> &OpenChildren,
202        DenseMap<MachineDomTreeNode *, MachineDomTreeNode *> &ParentMap);
203
204    void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
205
206    void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
207
208    void SinkIntoLoop();
209
210    void InitRegPressure(MachineBasicBlock *BB);
211
212    DenseMap<unsigned, int> calcRegisterCost(const MachineInstr *MI,
213                                             bool ConsiderSeen,
214                                             bool ConsiderUnseenAsDef);
215
216    void UpdateRegPressure(const MachineInstr *MI,
217                           bool ConsiderUnseenAsDef = false);
218
219    MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
220
221    const MachineInstr *
222    LookForDuplicate(const MachineInstr *MI,
223                     std::vector<const MachineInstr *> &PrevMIs);
224
225    bool EliminateCSE(
226        MachineInstr *MI,
227        DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator &CI);
228
229    bool MayCSE(MachineInstr *MI);
230
231    bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
232
233    void InitCSEMap(MachineBasicBlock *BB);
234
235    MachineBasicBlock *getCurPreheader();
236  };
237} // end anonymous namespace
238
239char MachineLICM::ID = 0;
240char &llvm::MachineLICMID = MachineLICM::ID;
241INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
242                "Machine Loop Invariant Code Motion", false, false)
243INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
244INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
245INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
246INITIALIZE_PASS_END(MachineLICM, "machinelicm",
247                "Machine Loop Invariant Code Motion", false, false)
248
249/// Test if the given loop is the outer-most loop that has a unique predecessor.
250static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
251  // Check whether this loop even has a unique predecessor.
252  if (!CurLoop->getLoopPredecessor())
253    return false;
254  // Ok, now check to see if any of its outer loops do.
255  for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
256    if (L->getLoopPredecessor())
257      return false;
258  // None of them did, so this is the outermost with a unique predecessor.
259  return true;
260}
261
262bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
263  if (skipFunction(*MF.getFunction()))
264    return false;
265
266  Changed = FirstInLoop = false;
267  const TargetSubtargetInfo &ST = MF.getSubtarget();
268  TII = ST.getInstrInfo();
269  TLI = ST.getTargetLowering();
270  TRI = ST.getRegisterInfo();
271  MFI = MF.getFrameInfo();
272  MRI = &MF.getRegInfo();
273  SchedModel.init(ST.getSchedModel(), &ST, TII);
274
275  PreRegAlloc = MRI->isSSA();
276
277  if (PreRegAlloc)
278    DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
279  else
280    DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
281  DEBUG(dbgs() << MF.getName() << " ********\n");
282
283  if (PreRegAlloc) {
284    // Estimate register pressure during pre-regalloc pass.
285    unsigned NumRPS = TRI->getNumRegPressureSets();
286    RegPressure.resize(NumRPS);
287    std::fill(RegPressure.begin(), RegPressure.end(), 0);
288    RegLimit.resize(NumRPS);
289    for (unsigned i = 0, e = NumRPS; i != e; ++i)
290      RegLimit[i] = TRI->getRegPressureSetLimit(MF, i);
291  }
292
293  // Get our Loop information...
294  MLI = &getAnalysis<MachineLoopInfo>();
295  DT  = &getAnalysis<MachineDominatorTree>();
296  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
297
298  SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
299  while (!Worklist.empty()) {
300    CurLoop = Worklist.pop_back_val();
301    CurPreheader = nullptr;
302    ExitBlocks.clear();
303
304    // If this is done before regalloc, only visit outer-most preheader-sporting
305    // loops.
306    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
307      Worklist.append(CurLoop->begin(), CurLoop->end());
308      continue;
309    }
310
311    CurLoop->getExitBlocks(ExitBlocks);
312
313    if (!PreRegAlloc)
314      HoistRegionPostRA();
315    else {
316      // CSEMap is initialized for loop header when the first instruction is
317      // being hoisted.
318      MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
319      FirstInLoop = true;
320      HoistOutOfLoop(N);
321      CSEMap.clear();
322
323      if (SinkInstsToAvoidSpills)
324        SinkIntoLoop();
325    }
326  }
327
328  return Changed;
329}
330
331/// Return true if instruction stores to the specified frame.
332static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
333  // If we lost memory operands, conservatively assume that the instruction
334  // writes to all slots.
335  if (MI->memoperands_empty())
336    return true;
337  for (const MachineMemOperand *MemOp : MI->memoperands()) {
338    if (!MemOp->isStore() || !MemOp->getPseudoValue())
339      continue;
340    if (const FixedStackPseudoSourceValue *Value =
341        dyn_cast<FixedStackPseudoSourceValue>(MemOp->getPseudoValue())) {
342      if (Value->getFrameIndex() == FI)
343        return true;
344    }
345  }
346  return false;
347}
348
349/// Examine the instruction for potentai LICM candidate. Also
350/// gather register def and frame object update information.
351void MachineLICM::ProcessMI(MachineInstr *MI,
352                            BitVector &PhysRegDefs,
353                            BitVector &PhysRegClobbers,
354                            SmallSet<int, 32> &StoredFIs,
355                            SmallVectorImpl<CandidateInfo> &Candidates) {
356  bool RuledOut = false;
357  bool HasNonInvariantUse = false;
358  unsigned Def = 0;
359  for (const MachineOperand &MO : MI->operands()) {
360    if (MO.isFI()) {
361      // Remember if the instruction stores to the frame index.
362      int FI = MO.getIndex();
363      if (!StoredFIs.count(FI) &&
364          MFI->isSpillSlotObjectIndex(FI) &&
365          InstructionStoresToFI(MI, FI))
366        StoredFIs.insert(FI);
367      HasNonInvariantUse = true;
368      continue;
369    }
370
371    // We can't hoist an instruction defining a physreg that is clobbered in
372    // the loop.
373    if (MO.isRegMask()) {
374      PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
375      continue;
376    }
377
378    if (!MO.isReg())
379      continue;
380    unsigned Reg = MO.getReg();
381    if (!Reg)
382      continue;
383    assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
384           "Not expecting virtual register!");
385
386    if (!MO.isDef()) {
387      if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
388        // If it's using a non-loop-invariant register, then it's obviously not
389        // safe to hoist.
390        HasNonInvariantUse = true;
391      continue;
392    }
393
394    if (MO.isImplicit()) {
395      for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
396        PhysRegClobbers.set(*AI);
397      if (!MO.isDead())
398        // Non-dead implicit def? This cannot be hoisted.
399        RuledOut = true;
400      // No need to check if a dead implicit def is also defined by
401      // another instruction.
402      continue;
403    }
404
405    // FIXME: For now, avoid instructions with multiple defs, unless
406    // it's a dead implicit def.
407    if (Def)
408      RuledOut = true;
409    else
410      Def = Reg;
411
412    // If we have already seen another instruction that defines the same
413    // register, then this is not safe.  Two defs is indicated by setting a
414    // PhysRegClobbers bit.
415    for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
416      if (PhysRegDefs.test(*AS))
417        PhysRegClobbers.set(*AS);
418      PhysRegDefs.set(*AS);
419    }
420    if (PhysRegClobbers.test(Reg))
421      // MI defined register is seen defined by another instruction in
422      // the loop, it cannot be a LICM candidate.
423      RuledOut = true;
424  }
425
426  // Only consider reloads for now and remats which do not have register
427  // operands. FIXME: Consider unfold load folding instructions.
428  if (Def && !RuledOut) {
429    int FI = INT_MIN;
430    if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
431        (TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
432      Candidates.push_back(CandidateInfo(MI, Def, FI));
433  }
434}
435
436/// Walk the specified region of the CFG and hoist loop invariants out to the
437/// preheader.
438void MachineLICM::HoistRegionPostRA() {
439  MachineBasicBlock *Preheader = getCurPreheader();
440  if (!Preheader)
441    return;
442
443  unsigned NumRegs = TRI->getNumRegs();
444  BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
445  BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
446
447  SmallVector<CandidateInfo, 32> Candidates;
448  SmallSet<int, 32> StoredFIs;
449
450  // Walk the entire region, count number of defs for each register, and
451  // collect potential LICM candidates.
452  const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
453  for (MachineBasicBlock *BB : Blocks) {
454    // If the header of the loop containing this basic block is a landing pad,
455    // then don't try to hoist instructions out of this loop.
456    const MachineLoop *ML = MLI->getLoopFor(BB);
457    if (ML && ML->getHeader()->isEHPad()) continue;
458
459    // Conservatively treat live-in's as an external def.
460    // FIXME: That means a reload that're reused in successor block(s) will not
461    // be LICM'ed.
462    for (const auto &LI : BB->liveins()) {
463      for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI)
464        PhysRegDefs.set(*AI);
465    }
466
467    SpeculationState = SpeculateUnknown;
468    for (MachineInstr &MI : *BB)
469      ProcessMI(&MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
470  }
471
472  // Gather the registers read / clobbered by the terminator.
473  BitVector TermRegs(NumRegs);
474  MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
475  if (TI != Preheader->end()) {
476    for (const MachineOperand &MO : TI->operands()) {
477      if (!MO.isReg())
478        continue;
479      unsigned Reg = MO.getReg();
480      if (!Reg)
481        continue;
482      for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
483        TermRegs.set(*AI);
484    }
485  }
486
487  // Now evaluate whether the potential candidates qualify.
488  // 1. Check if the candidate defined register is defined by another
489  //    instruction in the loop.
490  // 2. If the candidate is a load from stack slot (always true for now),
491  //    check if the slot is stored anywhere in the loop.
492  // 3. Make sure candidate def should not clobber
493  //    registers read by the terminator. Similarly its def should not be
494  //    clobbered by the terminator.
495  for (CandidateInfo &Candidate : Candidates) {
496    if (Candidate.FI != INT_MIN &&
497        StoredFIs.count(Candidate.FI))
498      continue;
499
500    unsigned Def = Candidate.Def;
501    if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
502      bool Safe = true;
503      MachineInstr *MI = Candidate.MI;
504      for (const MachineOperand &MO : MI->operands()) {
505        if (!MO.isReg() || MO.isDef() || !MO.getReg())
506          continue;
507        unsigned Reg = MO.getReg();
508        if (PhysRegDefs.test(Reg) ||
509            PhysRegClobbers.test(Reg)) {
510          // If it's using a non-loop-invariant register, then it's obviously
511          // not safe to hoist.
512          Safe = false;
513          break;
514        }
515      }
516      if (Safe)
517        HoistPostRA(MI, Candidate.Def);
518    }
519  }
520}
521
522/// Add register 'Reg' to the livein sets of BBs in the current loop, and make
523/// sure it is not killed by any instructions in the loop.
524void MachineLICM::AddToLiveIns(unsigned Reg) {
525  const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
526  for (MachineBasicBlock *BB : Blocks) {
527    if (!BB->isLiveIn(Reg))
528      BB->addLiveIn(Reg);
529    for (MachineInstr &MI : *BB) {
530      for (MachineOperand &MO : MI.operands()) {
531        if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
532        if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
533          MO.setIsKill(false);
534      }
535    }
536  }
537}
538
539/// When an instruction is found to only use loop invariant operands that is
540/// safe to hoist, this instruction is called to do the dirty work.
541void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
542  MachineBasicBlock *Preheader = getCurPreheader();
543
544  // Now move the instructions to the predecessor, inserting it before any
545  // terminator instructions.
546  DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
547               << MI->getParent()->getNumber() << ": " << *MI);
548
549  // Splice the instruction to the preheader.
550  MachineBasicBlock *MBB = MI->getParent();
551  Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
552
553  // Add register to livein list to all the BBs in the current loop since a
554  // loop invariant must be kept live throughout the whole loop. This is
555  // important to ensure later passes do not scavenge the def register.
556  AddToLiveIns(Def);
557
558  ++NumPostRAHoisted;
559  Changed = true;
560}
561
562/// Check if this mbb is guaranteed to execute. If not then a load from this mbb
563/// may not be safe to hoist.
564bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
565  if (SpeculationState != SpeculateUnknown)
566    return SpeculationState == SpeculateFalse;
567
568  if (BB != CurLoop->getHeader()) {
569    // Check loop exiting blocks.
570    SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
571    CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
572    for (MachineBasicBlock *CurrentLoopExitingBlock : CurrentLoopExitingBlocks)
573      if (!DT->dominates(BB, CurrentLoopExitingBlock)) {
574        SpeculationState = SpeculateTrue;
575        return false;
576      }
577  }
578
579  SpeculationState = SpeculateFalse;
580  return true;
581}
582
583void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
584  DEBUG(dbgs() << "Entering BB#" << MBB->getNumber() << '\n');
585
586  // Remember livein register pressure.
587  BackTrace.push_back(RegPressure);
588}
589
590void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
591  DEBUG(dbgs() << "Exiting BB#" << MBB->getNumber() << '\n');
592  BackTrace.pop_back();
593}
594
595/// Destroy scope for the MBB that corresponds to the given dominator tree node
596/// if its a leaf or all of its children are done. Walk up the dominator tree to
597/// destroy ancestors which are now done.
598void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
599                DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
600                DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
601  if (OpenChildren[Node])
602    return;
603
604  // Pop scope.
605  ExitScope(Node->getBlock());
606
607  // Now traverse upwards to pop ancestors whose offsprings are all done.
608  while (MachineDomTreeNode *Parent = ParentMap[Node]) {
609    unsigned Left = --OpenChildren[Parent];
610    if (Left != 0)
611      break;
612    ExitScope(Parent->getBlock());
613    Node = Parent;
614  }
615}
616
617/// Walk the specified loop in the CFG (defined by all blocks dominated by the
618/// specified header block, and that are in the current loop) in depth first
619/// order w.r.t the DominatorTree. This allows us to visit definitions before
620/// uses, allowing us to hoist a loop body in one pass without iteration.
621///
622void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
623  MachineBasicBlock *Preheader = getCurPreheader();
624  if (!Preheader)
625    return;
626
627  SmallVector<MachineDomTreeNode*, 32> Scopes;
628  SmallVector<MachineDomTreeNode*, 8> WorkList;
629  DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
630  DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
631
632  // Perform a DFS walk to determine the order of visit.
633  WorkList.push_back(HeaderN);
634  while (!WorkList.empty()) {
635    MachineDomTreeNode *Node = WorkList.pop_back_val();
636    assert(Node && "Null dominator tree node?");
637    MachineBasicBlock *BB = Node->getBlock();
638
639    // If the header of the loop containing this basic block is a landing pad,
640    // then don't try to hoist instructions out of this loop.
641    const MachineLoop *ML = MLI->getLoopFor(BB);
642    if (ML && ML->getHeader()->isEHPad())
643      continue;
644
645    // If this subregion is not in the top level loop at all, exit.
646    if (!CurLoop->contains(BB))
647      continue;
648
649    Scopes.push_back(Node);
650    const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
651    unsigned NumChildren = Children.size();
652
653    // Don't hoist things out of a large switch statement.  This often causes
654    // code to be hoisted that wasn't going to be executed, and increases
655    // register pressure in a situation where it's likely to matter.
656    if (BB->succ_size() >= 25)
657      NumChildren = 0;
658
659    OpenChildren[Node] = NumChildren;
660    // Add children in reverse order as then the next popped worklist node is
661    // the first child of this node.  This means we ultimately traverse the
662    // DOM tree in exactly the same order as if we'd recursed.
663    for (int i = (int)NumChildren-1; i >= 0; --i) {
664      MachineDomTreeNode *Child = Children[i];
665      ParentMap[Child] = Node;
666      WorkList.push_back(Child);
667    }
668  }
669
670  if (Scopes.size() == 0)
671    return;
672
673  // Compute registers which are livein into the loop headers.
674  RegSeen.clear();
675  BackTrace.clear();
676  InitRegPressure(Preheader);
677
678  // Now perform LICM.
679  for (MachineDomTreeNode *Node : Scopes) {
680    MachineBasicBlock *MBB = Node->getBlock();
681
682    EnterScope(MBB);
683
684    // Process the block
685    SpeculationState = SpeculateUnknown;
686    for (MachineBasicBlock::iterator
687         MII = MBB->begin(), E = MBB->end(); MII != E; ) {
688      MachineBasicBlock::iterator NextMII = MII; ++NextMII;
689      MachineInstr *MI = &*MII;
690      if (!Hoist(MI, Preheader))
691        UpdateRegPressure(MI);
692      MII = NextMII;
693    }
694
695    // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
696    ExitScopeIfDone(Node, OpenChildren, ParentMap);
697  }
698}
699
700/// Sink instructions into loops if profitable. This especially tries to prevent
701/// register spills caused by register pressure if there is little to no
702/// overhead moving instructions into loops.
703void MachineLICM::SinkIntoLoop() {
704  MachineBasicBlock *Preheader = getCurPreheader();
705  if (!Preheader)
706    return;
707
708  SmallVector<MachineInstr *, 8> Candidates;
709  for (MachineBasicBlock::instr_iterator I = Preheader->instr_begin();
710       I != Preheader->instr_end(); ++I) {
711    // We need to ensure that we can safely move this instruction into the loop.
712    // As such, it must not have side-effects, e.g. such as a call has.
713    if (IsLoopInvariantInst(*I) && !HasLoopPHIUse(&*I))
714      Candidates.push_back(&*I);
715  }
716
717  for (MachineInstr *I : Candidates) {
718    const MachineOperand &MO = I->getOperand(0);
719    if (!MO.isDef() || !MO.isReg() || !MO.getReg())
720      continue;
721    if (!MRI->hasOneDef(MO.getReg()))
722      continue;
723    bool CanSink = true;
724    MachineBasicBlock *B = nullptr;
725    for (MachineInstr &MI : MRI->use_instructions(MO.getReg())) {
726      // FIXME: Come up with a proper cost model that estimates whether sinking
727      // the instruction (and thus possibly executing it on every loop
728      // iteration) is more expensive than a register.
729      // For now assumes that copies are cheap and thus almost always worth it.
730      if (!MI.isCopy()) {
731        CanSink = false;
732        break;
733      }
734      if (!B) {
735        B = MI.getParent();
736        continue;
737      }
738      B = DT->findNearestCommonDominator(B, MI.getParent());
739      if (!B) {
740        CanSink = false;
741        break;
742      }
743    }
744    if (!CanSink || !B || B == Preheader)
745      continue;
746    B->splice(B->getFirstNonPHI(), Preheader, I);
747  }
748}
749
750static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
751  return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
752}
753
754/// Find all virtual register references that are liveout of the preheader to
755/// initialize the starting "register pressure". Note this does not count live
756/// through (livein but not used) registers.
757void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
758  std::fill(RegPressure.begin(), RegPressure.end(), 0);
759
760  // If the preheader has only a single predecessor and it ends with a
761  // fallthrough or an unconditional branch, then scan its predecessor for live
762  // defs as well. This happens whenever the preheader is created by splitting
763  // the critical edge from the loop predecessor to the loop header.
764  if (BB->pred_size() == 1) {
765    MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
766    SmallVector<MachineOperand, 4> Cond;
767    if (!TII->analyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
768      InitRegPressure(*BB->pred_begin());
769  }
770
771  for (const MachineInstr &MI : *BB)
772    UpdateRegPressure(&MI, /*ConsiderUnseenAsDef=*/true);
773}
774
775/// Update estimate of register pressure after the specified instruction.
776void MachineLICM::UpdateRegPressure(const MachineInstr *MI,
777                                    bool ConsiderUnseenAsDef) {
778  auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef);
779  for (const auto &RPIdAndCost : Cost) {
780    unsigned Class = RPIdAndCost.first;
781    if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second)
782      RegPressure[Class] = 0;
783    else
784      RegPressure[Class] += RPIdAndCost.second;
785  }
786}
787
788/// Calculate the additional register pressure that the registers used in MI
789/// cause.
790///
791/// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
792/// figure out which usages are live-ins.
793/// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
794DenseMap<unsigned, int>
795MachineLICM::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
796                              bool ConsiderUnseenAsDef) {
797  DenseMap<unsigned, int> Cost;
798  if (MI->isImplicitDef())
799    return Cost;
800  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
801    const MachineOperand &MO = MI->getOperand(i);
802    if (!MO.isReg() || MO.isImplicit())
803      continue;
804    unsigned Reg = MO.getReg();
805    if (!TargetRegisterInfo::isVirtualRegister(Reg))
806      continue;
807
808    // FIXME: It seems bad to use RegSeen only for some of these calculations.
809    bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
810    const TargetRegisterClass *RC = MRI->getRegClass(Reg);
811
812    RegClassWeight W = TRI->getRegClassWeight(RC);
813    int RCCost = 0;
814    if (MO.isDef())
815      RCCost = W.RegWeight;
816    else {
817      bool isKill = isOperandKill(MO, MRI);
818      if (isNew && !isKill && ConsiderUnseenAsDef)
819        // Haven't seen this, it must be a livein.
820        RCCost = W.RegWeight;
821      else if (!isNew && isKill)
822        RCCost = -W.RegWeight;
823    }
824    if (RCCost == 0)
825      continue;
826    const int *PS = TRI->getRegClassPressureSets(RC);
827    for (; *PS != -1; ++PS) {
828      if (Cost.find(*PS) == Cost.end())
829        Cost[*PS] = RCCost;
830      else
831        Cost[*PS] += RCCost;
832    }
833  }
834  return Cost;
835}
836
837/// Return true if this machine instruction loads from global offset table or
838/// constant pool.
839static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
840  assert (MI.mayLoad() && "Expected MI that loads!");
841
842  // If we lost memory operands, conservatively assume that the instruction
843  // reads from everything..
844  if (MI.memoperands_empty())
845    return true;
846
847  for (MachineMemOperand *MemOp : MI.memoperands())
848    if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
849      if (PSV->isGOT() || PSV->isConstantPool())
850        return true;
851
852  return false;
853}
854
855/// Returns true if the instruction may be a suitable candidate for LICM.
856/// e.g. If the instruction is a call, then it's obviously not safe to hoist it.
857bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
858  // Check if it's safe to move the instruction.
859  bool DontMoveAcrossStore = true;
860  if (!I.isSafeToMove(AA, DontMoveAcrossStore))
861    return false;
862
863  // If it is load then check if it is guaranteed to execute by making sure that
864  // it dominates all exiting blocks. If it doesn't, then there is a path out of
865  // the loop which does not execute this load, so we can't hoist it. Loads
866  // from constant memory are not safe to speculate all the time, for example
867  // indexed load from a jump table.
868  // Stores and side effects are already checked by isSafeToMove.
869  if (I.mayLoad() && !mayLoadFromGOTOrConstantPool(I) &&
870      !IsGuaranteedToExecute(I.getParent()))
871    return false;
872
873  return true;
874}
875
876/// Returns true if the instruction is loop invariant.
877/// I.e., all virtual register operands are defined outside of the loop,
878/// physical registers aren't accessed explicitly, and there are no side
879/// effects that aren't captured by the operands or other flags.
880///
881bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
882  if (!IsLICMCandidate(I))
883    return false;
884
885  // The instruction is loop invariant if all of its operands are.
886  for (const MachineOperand &MO : I.operands()) {
887    if (!MO.isReg())
888      continue;
889
890    unsigned Reg = MO.getReg();
891    if (Reg == 0) continue;
892
893    // Don't hoist an instruction that uses or defines a physical register.
894    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
895      if (MO.isUse()) {
896        // If the physreg has no defs anywhere, it's just an ambient register
897        // and we can freely move its uses. Alternatively, if it's allocatable,
898        // it could get allocated to something with a def during allocation.
899        if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
900          return false;
901        // Otherwise it's safe to move.
902        continue;
903      } else if (!MO.isDead()) {
904        // A def that isn't dead. We can't move it.
905        return false;
906      } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
907        // If the reg is live into the loop, we can't hoist an instruction
908        // which would clobber it.
909        return false;
910      }
911    }
912
913    if (!MO.isUse())
914      continue;
915
916    assert(MRI->getVRegDef(Reg) &&
917           "Machine instr not mapped for this vreg?!");
918
919    // If the loop contains the definition of an operand, then the instruction
920    // isn't loop invariant.
921    if (CurLoop->contains(MRI->getVRegDef(Reg)))
922      return false;
923  }
924
925  // If we got this far, the instruction is loop invariant!
926  return true;
927}
928
929
930/// Return true if the specified instruction is used by a phi node and hoisting
931/// it could cause a copy to be inserted.
932bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
933  SmallVector<const MachineInstr*, 8> Work(1, MI);
934  do {
935    MI = Work.pop_back_val();
936    for (const MachineOperand &MO : MI->operands()) {
937      if (!MO.isReg() || !MO.isDef())
938        continue;
939      unsigned Reg = MO.getReg();
940      if (!TargetRegisterInfo::isVirtualRegister(Reg))
941        continue;
942      for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
943        // A PHI may cause a copy to be inserted.
944        if (UseMI.isPHI()) {
945          // A PHI inside the loop causes a copy because the live range of Reg is
946          // extended across the PHI.
947          if (CurLoop->contains(&UseMI))
948            return true;
949          // A PHI in an exit block can cause a copy to be inserted if the PHI
950          // has multiple predecessors in the loop with different values.
951          // For now, approximate by rejecting all exit blocks.
952          if (isExitBlock(UseMI.getParent()))
953            return true;
954          continue;
955        }
956        // Look past copies as well.
957        if (UseMI.isCopy() && CurLoop->contains(&UseMI))
958          Work.push_back(&UseMI);
959      }
960    }
961  } while (!Work.empty());
962  return false;
963}
964
965/// Compute operand latency between a def of 'Reg' and an use in the current
966/// loop, return true if the target considered it high.
967bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
968                                        unsigned DefIdx, unsigned Reg) const {
969  if (MRI->use_nodbg_empty(Reg))
970    return false;
971
972  for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
973    if (UseMI.isCopyLike())
974      continue;
975    if (!CurLoop->contains(UseMI.getParent()))
976      continue;
977    for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) {
978      const MachineOperand &MO = UseMI.getOperand(i);
979      if (!MO.isReg() || !MO.isUse())
980        continue;
981      unsigned MOReg = MO.getReg();
982      if (MOReg != Reg)
983        continue;
984
985      if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i))
986        return true;
987    }
988
989    // Only look at the first in loop use.
990    break;
991  }
992
993  return false;
994}
995
996/// Return true if the instruction is marked "cheap" or the operand latency
997/// between its def and a use is one or less.
998bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
999  if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike())
1000    return true;
1001
1002  bool isCheap = false;
1003  unsigned NumDefs = MI.getDesc().getNumDefs();
1004  for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
1005    MachineOperand &DefMO = MI.getOperand(i);
1006    if (!DefMO.isReg() || !DefMO.isDef())
1007      continue;
1008    --NumDefs;
1009    unsigned Reg = DefMO.getReg();
1010    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1011      continue;
1012
1013    if (!TII->hasLowDefLatency(SchedModel, MI, i))
1014      return false;
1015    isCheap = true;
1016  }
1017
1018  return isCheap;
1019}
1020
1021/// Visit BBs from header to current BB, check if hoisting an instruction of the
1022/// given cost matrix can cause high register pressure.
1023bool MachineLICM::CanCauseHighRegPressure(const DenseMap<unsigned, int>& Cost,
1024                                          bool CheapInstr) {
1025  for (const auto &RPIdAndCost : Cost) {
1026    if (RPIdAndCost.second <= 0)
1027      continue;
1028
1029    unsigned Class = RPIdAndCost.first;
1030    int Limit = RegLimit[Class];
1031
1032    // Don't hoist cheap instructions if they would increase register pressure,
1033    // even if we're under the limit.
1034    if (CheapInstr && !HoistCheapInsts)
1035      return true;
1036
1037    for (const auto &RP : BackTrace)
1038      if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit)
1039        return true;
1040  }
1041
1042  return false;
1043}
1044
1045/// Traverse the back trace from header to the current block and update their
1046/// register pressures to reflect the effect of hoisting MI from the current
1047/// block to the preheader.
1048void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
1049  // First compute the 'cost' of the instruction, i.e. its contribution
1050  // to register pressure.
1051  auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/false,
1052                               /*ConsiderUnseenAsDef=*/false);
1053
1054  // Update register pressure of blocks from loop header to current block.
1055  for (auto &RP : BackTrace)
1056    for (const auto &RPIdAndCost : Cost)
1057      RP[RPIdAndCost.first] += RPIdAndCost.second;
1058}
1059
1060/// Return true if it is potentially profitable to hoist the given loop
1061/// invariant.
1062bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
1063  if (MI.isImplicitDef())
1064    return true;
1065
1066  // Besides removing computation from the loop, hoisting an instruction has
1067  // these effects:
1068  //
1069  // - The value defined by the instruction becomes live across the entire
1070  //   loop. This increases register pressure in the loop.
1071  //
1072  // - If the value is used by a PHI in the loop, a copy will be required for
1073  //   lowering the PHI after extending the live range.
1074  //
1075  // - When hoisting the last use of a value in the loop, that value no longer
1076  //   needs to be live in the loop. This lowers register pressure in the loop.
1077
1078  bool CheapInstr = IsCheapInstruction(MI);
1079  bool CreatesCopy = HasLoopPHIUse(&MI);
1080
1081  // Don't hoist a cheap instruction if it would create a copy in the loop.
1082  if (CheapInstr && CreatesCopy) {
1083    DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
1084    return false;
1085  }
1086
1087  // Rematerializable instructions should always be hoisted since the register
1088  // allocator can just pull them down again when needed.
1089  if (TII->isTriviallyReMaterializable(MI, AA))
1090    return true;
1091
1092  // FIXME: If there are long latency loop-invariant instructions inside the
1093  // loop at this point, why didn't the optimizer's LICM hoist them?
1094  for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1095    const MachineOperand &MO = MI.getOperand(i);
1096    if (!MO.isReg() || MO.isImplicit())
1097      continue;
1098    unsigned Reg = MO.getReg();
1099    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1100      continue;
1101    if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
1102      DEBUG(dbgs() << "Hoist High Latency: " << MI);
1103      ++NumHighLatency;
1104      return true;
1105    }
1106  }
1107
1108  // Estimate register pressure to determine whether to LICM the instruction.
1109  // In low register pressure situation, we can be more aggressive about
1110  // hoisting. Also, favors hoisting long latency instructions even in
1111  // moderately high pressure situation.
1112  // Cheap instructions will only be hoisted if they don't increase register
1113  // pressure at all.
1114  auto Cost = calcRegisterCost(&MI, /*ConsiderSeen=*/false,
1115                               /*ConsiderUnseenAsDef=*/false);
1116
1117  // Visit BBs from header to current BB, if hoisting this doesn't cause
1118  // high register pressure, then it's safe to proceed.
1119  if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
1120    DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
1121    ++NumLowRP;
1122    return true;
1123  }
1124
1125  // Don't risk increasing register pressure if it would create copies.
1126  if (CreatesCopy) {
1127    DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
1128    return false;
1129  }
1130
1131  // Do not "speculate" in high register pressure situation. If an
1132  // instruction is not guaranteed to be executed in the loop, it's best to be
1133  // conservative.
1134  if (AvoidSpeculation &&
1135      (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
1136    DEBUG(dbgs() << "Won't speculate: " << MI);
1137    return false;
1138  }
1139
1140  // High register pressure situation, only hoist if the instruction is going
1141  // to be remat'ed.
1142  if (!TII->isTriviallyReMaterializable(MI, AA) && !MI.isInvariantLoad(AA)) {
1143    DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
1144    return false;
1145  }
1146
1147  return true;
1148}
1149
1150/// Unfold a load from the given machineinstr if the load itself could be
1151/// hoisted. Return the unfolded and hoistable load, or null if the load
1152/// couldn't be unfolded or if it wouldn't be hoistable.
1153MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
1154  // Don't unfold simple loads.
1155  if (MI->canFoldAsLoad())
1156    return nullptr;
1157
1158  // If not, we may be able to unfold a load and hoist that.
1159  // First test whether the instruction is loading from an amenable
1160  // memory location.
1161  if (!MI->isInvariantLoad(AA))
1162    return nullptr;
1163
1164  // Next determine the register class for a temporary register.
1165  unsigned LoadRegIndex;
1166  unsigned NewOpc =
1167    TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1168                                    /*UnfoldLoad=*/true,
1169                                    /*UnfoldStore=*/false,
1170                                    &LoadRegIndex);
1171  if (NewOpc == 0) return nullptr;
1172  const MCInstrDesc &MID = TII->get(NewOpc);
1173  MachineFunction &MF = *MI->getParent()->getParent();
1174  const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
1175  // Ok, we're unfolding. Create a temporary register and do the unfold.
1176  unsigned Reg = MRI->createVirtualRegister(RC);
1177
1178  SmallVector<MachineInstr *, 2> NewMIs;
1179  bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg,
1180                                          /*UnfoldLoad=*/true,
1181                                          /*UnfoldStore=*/false, NewMIs);
1182  (void)Success;
1183  assert(Success &&
1184         "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1185         "succeeded!");
1186  assert(NewMIs.size() == 2 &&
1187         "Unfolded a load into multiple instructions!");
1188  MachineBasicBlock *MBB = MI->getParent();
1189  MachineBasicBlock::iterator Pos = MI;
1190  MBB->insert(Pos, NewMIs[0]);
1191  MBB->insert(Pos, NewMIs[1]);
1192  // If unfolding produced a load that wasn't loop-invariant or profitable to
1193  // hoist, discard the new instructions and bail.
1194  if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1195    NewMIs[0]->eraseFromParent();
1196    NewMIs[1]->eraseFromParent();
1197    return nullptr;
1198  }
1199
1200  // Update register pressure for the unfolded instruction.
1201  UpdateRegPressure(NewMIs[1]);
1202
1203  // Otherwise we successfully unfolded a load that we can hoist.
1204  MI->eraseFromParent();
1205  return NewMIs[0];
1206}
1207
1208/// Initialize the CSE map with instructions that are in the current loop
1209/// preheader that may become duplicates of instructions that are hoisted
1210/// out of the loop.
1211void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1212  for (MachineInstr &MI : *BB)
1213    CSEMap[MI.getOpcode()].push_back(&MI);
1214}
1215
1216/// Find an instruction amount PrevMIs that is a duplicate of MI.
1217/// Return this instruction if it's found.
1218const MachineInstr*
1219MachineLICM::LookForDuplicate(const MachineInstr *MI,
1220                              std::vector<const MachineInstr*> &PrevMIs) {
1221  for (const MachineInstr *PrevMI : PrevMIs)
1222    if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr)))
1223      return PrevMI;
1224
1225  return nullptr;
1226}
1227
1228/// Given a LICM'ed instruction, look for an instruction on the preheader that
1229/// computes the same value. If it's found, do a RAU on with the definition of
1230/// the existing instruction rather than hoisting the instruction to the
1231/// preheader.
1232bool MachineLICM::EliminateCSE(MachineInstr *MI,
1233          DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1234  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1235  // the undef property onto uses.
1236  if (CI == CSEMap.end() || MI->isImplicitDef())
1237    return false;
1238
1239  if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1240    DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1241
1242    // Replace virtual registers defined by MI by their counterparts defined
1243    // by Dup.
1244    SmallVector<unsigned, 2> Defs;
1245    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1246      const MachineOperand &MO = MI->getOperand(i);
1247
1248      // Physical registers may not differ here.
1249      assert((!MO.isReg() || MO.getReg() == 0 ||
1250              !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1251              MO.getReg() == Dup->getOperand(i).getReg()) &&
1252             "Instructions with different phys regs are not identical!");
1253
1254      if (MO.isReg() && MO.isDef() &&
1255          !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1256        Defs.push_back(i);
1257    }
1258
1259    SmallVector<const TargetRegisterClass*, 2> OrigRCs;
1260    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1261      unsigned Idx = Defs[i];
1262      unsigned Reg = MI->getOperand(Idx).getReg();
1263      unsigned DupReg = Dup->getOperand(Idx).getReg();
1264      OrigRCs.push_back(MRI->getRegClass(DupReg));
1265
1266      if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
1267        // Restore old RCs if more than one defs.
1268        for (unsigned j = 0; j != i; ++j)
1269          MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
1270        return false;
1271      }
1272    }
1273
1274    for (unsigned Idx : Defs) {
1275      unsigned Reg = MI->getOperand(Idx).getReg();
1276      unsigned DupReg = Dup->getOperand(Idx).getReg();
1277      MRI->replaceRegWith(Reg, DupReg);
1278      MRI->clearKillFlags(DupReg);
1279    }
1280
1281    MI->eraseFromParent();
1282    ++NumCSEed;
1283    return true;
1284  }
1285  return false;
1286}
1287
1288/// Return true if the given instruction will be CSE'd if it's hoisted out of
1289/// the loop.
1290bool MachineLICM::MayCSE(MachineInstr *MI) {
1291  unsigned Opcode = MI->getOpcode();
1292  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1293    CI = CSEMap.find(Opcode);
1294  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1295  // the undef property onto uses.
1296  if (CI == CSEMap.end() || MI->isImplicitDef())
1297    return false;
1298
1299  return LookForDuplicate(MI, CI->second) != nullptr;
1300}
1301
1302/// When an instruction is found to use only loop invariant operands
1303/// that are safe to hoist, this instruction is called to do the dirty work.
1304/// It returns true if the instruction is hoisted.
1305bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1306  // First check whether we should hoist this instruction.
1307  if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1308    // If not, try unfolding a hoistable load.
1309    MI = ExtractHoistableLoad(MI);
1310    if (!MI) return false;
1311  }
1312
1313  // Now move the instructions to the predecessor, inserting it before any
1314  // terminator instructions.
1315  DEBUG({
1316      dbgs() << "Hoisting " << *MI;
1317      if (MI->getParent()->getBasicBlock())
1318        dbgs() << " from BB#" << MI->getParent()->getNumber();
1319      if (Preheader->getBasicBlock())
1320        dbgs() << " to BB#" << Preheader->getNumber();
1321      dbgs() << "\n";
1322    });
1323
1324  // If this is the first instruction being hoisted to the preheader,
1325  // initialize the CSE map with potential common expressions.
1326  if (FirstInLoop) {
1327    InitCSEMap(Preheader);
1328    FirstInLoop = false;
1329  }
1330
1331  // Look for opportunity to CSE the hoisted instruction.
1332  unsigned Opcode = MI->getOpcode();
1333  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1334    CI = CSEMap.find(Opcode);
1335  if (!EliminateCSE(MI, CI)) {
1336    // Otherwise, splice the instruction to the preheader.
1337    Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1338
1339    // Update register pressure for BBs from header to this block.
1340    UpdateBackTraceRegPressure(MI);
1341
1342    // Clear the kill flags of any register this instruction defines,
1343    // since they may need to be live throughout the entire loop
1344    // rather than just live for part of it.
1345    for (MachineOperand &MO : MI->operands())
1346      if (MO.isReg() && MO.isDef() && !MO.isDead())
1347        MRI->clearKillFlags(MO.getReg());
1348
1349    // Add to the CSE map.
1350    if (CI != CSEMap.end())
1351      CI->second.push_back(MI);
1352    else
1353      CSEMap[Opcode].push_back(MI);
1354  }
1355
1356  ++NumHoisted;
1357  Changed = true;
1358
1359  return true;
1360}
1361
1362/// Get the preheader for the current loop, splitting a critical edge if needed.
1363MachineBasicBlock *MachineLICM::getCurPreheader() {
1364  // Determine the block to which to hoist instructions. If we can't find a
1365  // suitable loop predecessor, we can't do any hoisting.
1366
1367  // If we've tried to get a preheader and failed, don't try again.
1368  if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1369    return nullptr;
1370
1371  if (!CurPreheader) {
1372    CurPreheader = CurLoop->getLoopPreheader();
1373    if (!CurPreheader) {
1374      MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1375      if (!Pred) {
1376        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1377        return nullptr;
1378      }
1379
1380      CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), *this);
1381      if (!CurPreheader) {
1382        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1383        return nullptr;
1384      }
1385    }
1386  }
1387  return CurPreheader;
1388}
1389