MachineLICM.cpp revision d0848a6398e0830898463ceb0041d4d7b163512d
1//===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs loop invariant code motion on machine instructions. We
11// attempt to remove as much code from the body of a loop as possible.
12//
13// This pass does not attempt to throttle itself to limit register pressure.
14// The register allocation phases are expected to perform rematerialization
15// to recover when register pressure is high.
16//
17// This pass is not intended to be a replacement or a complete alternative
18// for the LLVM-IR-level LICM pass. It is only designed to hoist simple
19// constructs that are not exposed before lowering and instruction selection.
20//
21//===----------------------------------------------------------------------===//
22
23#define DEBUG_TYPE "machine-licm"
24#include "llvm/CodeGen/Passes.h"
25#include "llvm/CodeGen/MachineDominators.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/MC/MCInstrItineraries.h"
32#include "llvm/Target/TargetLowering.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Analysis/AliasAnalysis.h"
37#include "llvm/ADT/DenseMap.h"
38#include "llvm/ADT/SmallSet.h"
39#include "llvm/ADT/Statistic.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/raw_ostream.h"
43using namespace llvm;
44
45static cl::opt<bool>
46AvoidSpeculation("avoid-speculation",
47                 cl::desc("MachineLICM should avoid speculation"),
48                 cl::init(true), cl::Hidden);
49
50STATISTIC(NumHoisted,
51          "Number of machine instructions hoisted out of loops");
52STATISTIC(NumLowRP,
53          "Number of instructions hoisted in low reg pressure situation");
54STATISTIC(NumHighLatency,
55          "Number of high latency instructions hoisted");
56STATISTIC(NumCSEed,
57          "Number of hoisted machine instructions CSEed");
58STATISTIC(NumPostRAHoisted,
59          "Number of machine instructions hoisted out of loops post regalloc");
60
61namespace {
62  class MachineLICM : public MachineFunctionPass {
63    bool PreRegAlloc;
64
65    const TargetMachine   *TM;
66    const TargetInstrInfo *TII;
67    const TargetLowering *TLI;
68    const TargetRegisterInfo *TRI;
69    const MachineFrameInfo *MFI;
70    MachineRegisterInfo *MRI;
71    const InstrItineraryData *InstrItins;
72
73    // Various analyses that we use...
74    AliasAnalysis        *AA;      // Alias analysis info.
75    MachineLoopInfo      *MLI;     // Current MachineLoopInfo
76    MachineDominatorTree *DT;      // Machine dominator tree for the cur loop
77
78    // State that is updated as we process loops
79    bool         Changed;          // True if a loop is changed.
80    bool         FirstInLoop;      // True if it's the first LICM in the loop.
81    MachineLoop *CurLoop;          // The current loop we are working on.
82    MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
83
84    // Track 'estimated' register pressure.
85    SmallSet<unsigned, 32> RegSeen;
86    SmallVector<unsigned, 8> RegPressure;
87
88    // Register pressure "limit" per register class. If the pressure
89    // is higher than the limit, then it's considered high.
90    SmallVector<unsigned, 8> RegLimit;
91
92    // Register pressure on path leading from loop preheader to current BB.
93    SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
94
95    // For each opcode, keep a list of potential CSE instructions.
96    DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
97
98    enum {
99      SpeculateFalse   = 0,
100      SpeculateTrue    = 1,
101      SpeculateUnknown = 2
102    };
103
104    // If a MBB does not dominate loop exiting blocks then it may not safe
105    // to hoist loads from this block.
106    // Tri-state: 0 - false, 1 - true, 2 - unknown
107    unsigned SpeculationState;
108
109  public:
110    static char ID; // Pass identification, replacement for typeid
111    MachineLICM() :
112      MachineFunctionPass(ID), PreRegAlloc(true) {
113        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
114      }
115
116    explicit MachineLICM(bool PreRA) :
117      MachineFunctionPass(ID), PreRegAlloc(PreRA) {
118        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
119      }
120
121    virtual bool runOnMachineFunction(MachineFunction &MF);
122
123    const char *getPassName() const { return "Machine Instruction LICM"; }
124
125    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
126      AU.addRequired<MachineLoopInfo>();
127      AU.addRequired<MachineDominatorTree>();
128      AU.addRequired<AliasAnalysis>();
129      AU.addPreserved<MachineLoopInfo>();
130      AU.addPreserved<MachineDominatorTree>();
131      MachineFunctionPass::getAnalysisUsage(AU);
132    }
133
134    virtual void releaseMemory() {
135      RegSeen.clear();
136      RegPressure.clear();
137      RegLimit.clear();
138      BackTrace.clear();
139      for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
140             CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
141        CI->second.clear();
142      CSEMap.clear();
143    }
144
145  private:
146    /// CandidateInfo - Keep track of information about hoisting candidates.
147    struct CandidateInfo {
148      MachineInstr *MI;
149      unsigned      Def;
150      int           FI;
151      CandidateInfo(MachineInstr *mi, unsigned def, int fi)
152        : MI(mi), Def(def), FI(fi) {}
153    };
154
155    /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
156    /// invariants out to the preheader.
157    void HoistRegionPostRA();
158
159    /// HoistPostRA - When an instruction is found to only use loop invariant
160    /// operands that is safe to hoist, this instruction is called to do the
161    /// dirty work.
162    void HoistPostRA(MachineInstr *MI, unsigned Def);
163
164    /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
165    /// gather register def and frame object update information.
166    void ProcessMI(MachineInstr *MI,
167                   BitVector &PhysRegDefs,
168                   BitVector &PhysRegClobbers,
169                   SmallSet<int, 32> &StoredFIs,
170                   SmallVector<CandidateInfo, 32> &Candidates);
171
172    /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
173    /// current loop.
174    void AddToLiveIns(unsigned Reg);
175
176    /// IsLICMCandidate - Returns true if the instruction may be a suitable
177    /// candidate for LICM. e.g. If the instruction is a call, then it's
178    /// obviously not safe to hoist it.
179    bool IsLICMCandidate(MachineInstr &I);
180
181    /// IsLoopInvariantInst - Returns true if the instruction is loop
182    /// invariant. I.e., all virtual register operands are defined outside of
183    /// the loop, physical registers aren't accessed (explicitly or implicitly),
184    /// and the instruction is hoistable.
185    ///
186    bool IsLoopInvariantInst(MachineInstr &I);
187
188    /// HasAnyPHIUse - Return true if the specified register is used by any
189    /// phi node.
190    bool HasAnyPHIUse(unsigned Reg) const;
191
192    /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
193    /// and an use in the current loop, return true if the target considered
194    /// it 'high'.
195    bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
196                               unsigned Reg) const;
197
198    bool IsCheapInstruction(MachineInstr &MI) const;
199
200    /// CanCauseHighRegPressure - Visit BBs from header to current BB,
201    /// check if hoisting an instruction of the given cost matrix can cause high
202    /// register pressure.
203    bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
204
205    /// UpdateBackTraceRegPressure - Traverse the back trace from header to
206    /// the current block and update their register pressures to reflect the
207    /// effect of hoisting MI from the current block to the preheader.
208    void UpdateBackTraceRegPressure(const MachineInstr *MI);
209
210    /// IsProfitableToHoist - Return true if it is potentially profitable to
211    /// hoist the given loop invariant.
212    bool IsProfitableToHoist(MachineInstr &MI);
213
214    /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
215    /// If not then a load from this mbb may not be safe to hoist.
216    bool IsGuaranteedToExecute(MachineBasicBlock *BB);
217
218    void EnterScope(MachineBasicBlock *MBB);
219
220    void ExitScope(MachineBasicBlock *MBB);
221
222    /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
223    /// dominator tree node if its a leaf or all of its children are done. Walk
224    /// up the dominator tree to destroy ancestors which are now done.
225    void ExitScopeIfDone(MachineDomTreeNode *Node,
226                DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
227                DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
228
229    /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
230    /// blocks dominated by the specified header block, and that are in the
231    /// current loop) in depth first order w.r.t the DominatorTree. This allows
232    /// us to visit definitions before uses, allowing us to hoist a loop body in
233    /// one pass without iteration.
234    ///
235    void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
236    void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
237
238    /// getRegisterClassIDAndCost - For a given MI, register, and the operand
239    /// index, return the ID and cost of its representative register class by
240    /// reference.
241    void getRegisterClassIDAndCost(const MachineInstr *MI,
242                                   unsigned Reg, unsigned OpIdx,
243                                   unsigned &RCId, unsigned &RCCost) const;
244
245    /// InitRegPressure - Find all virtual register references that are liveout
246    /// of the preheader to initialize the starting "register pressure". Note
247    /// this does not count live through (livein but not used) registers.
248    void InitRegPressure(MachineBasicBlock *BB);
249
250    /// UpdateRegPressure - Update estimate of register pressure after the
251    /// specified instruction.
252    void UpdateRegPressure(const MachineInstr *MI);
253
254    /// ExtractHoistableLoad - Unfold a load from the given machineinstr if
255    /// the load itself could be hoisted. Return the unfolded and hoistable
256    /// load, or null if the load couldn't be unfolded or if it wouldn't
257    /// be hoistable.
258    MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
259
260    /// LookForDuplicate - Find an instruction amount PrevMIs that is a
261    /// duplicate of MI. Return this instruction if it's found.
262    const MachineInstr *LookForDuplicate(const MachineInstr *MI,
263                                     std::vector<const MachineInstr*> &PrevMIs);
264
265    /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
266    /// the preheader that compute the same value. If it's found, do a RAU on
267    /// with the definition of the existing instruction rather than hoisting
268    /// the instruction to the preheader.
269    bool EliminateCSE(MachineInstr *MI,
270           DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
271
272    /// MayCSE - Return true if the given instruction will be CSE'd if it's
273    /// hoisted out of the loop.
274    bool MayCSE(MachineInstr *MI);
275
276    /// Hoist - When an instruction is found to only use loop invariant operands
277    /// that is safe to hoist, this instruction is called to do the dirty work.
278    /// It returns true if the instruction is hoisted.
279    bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
280
281    /// InitCSEMap - Initialize the CSE map with instructions that are in the
282    /// current loop preheader that may become duplicates of instructions that
283    /// are hoisted out of the loop.
284    void InitCSEMap(MachineBasicBlock *BB);
285
286    /// getCurPreheader - Get the preheader for the current loop, splitting
287    /// a critical edge if needed.
288    MachineBasicBlock *getCurPreheader();
289  };
290} // end anonymous namespace
291
292char MachineLICM::ID = 0;
293INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
294                "Machine Loop Invariant Code Motion", false, false)
295INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
296INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
297INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
298INITIALIZE_PASS_END(MachineLICM, "machinelicm",
299                "Machine Loop Invariant Code Motion", false, false)
300
301FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
302  return new MachineLICM(PreRegAlloc);
303}
304
305/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
306/// loop that has a unique predecessor.
307static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
308  // Check whether this loop even has a unique predecessor.
309  if (!CurLoop->getLoopPredecessor())
310    return false;
311  // Ok, now check to see if any of its outer loops do.
312  for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
313    if (L->getLoopPredecessor())
314      return false;
315  // None of them did, so this is the outermost with a unique predecessor.
316  return true;
317}
318
319bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
320  if (PreRegAlloc)
321    DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
322  else
323    DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
324  DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
325
326  Changed = FirstInLoop = false;
327  TM = &MF.getTarget();
328  TII = TM->getInstrInfo();
329  TLI = TM->getTargetLowering();
330  TRI = TM->getRegisterInfo();
331  MFI = MF.getFrameInfo();
332  MRI = &MF.getRegInfo();
333  InstrItins = TM->getInstrItineraryData();
334
335  if (PreRegAlloc) {
336    // Estimate register pressure during pre-regalloc pass.
337    unsigned NumRC = TRI->getNumRegClasses();
338    RegPressure.resize(NumRC);
339    std::fill(RegPressure.begin(), RegPressure.end(), 0);
340    RegLimit.resize(NumRC);
341    for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
342           E = TRI->regclass_end(); I != E; ++I)
343      RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF);
344  }
345
346  // Get our Loop information...
347  MLI = &getAnalysis<MachineLoopInfo>();
348  DT  = &getAnalysis<MachineDominatorTree>();
349  AA  = &getAnalysis<AliasAnalysis>();
350
351  SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
352  while (!Worklist.empty()) {
353    CurLoop = Worklist.pop_back_val();
354    CurPreheader = 0;
355
356    // If this is done before regalloc, only visit outer-most preheader-sporting
357    // loops.
358    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
359      Worklist.append(CurLoop->begin(), CurLoop->end());
360      continue;
361    }
362
363    if (!PreRegAlloc)
364      HoistRegionPostRA();
365    else {
366      // CSEMap is initialized for loop header when the first instruction is
367      // being hoisted.
368      MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
369      FirstInLoop = true;
370      HoistOutOfLoop(N);
371      CSEMap.clear();
372    }
373  }
374
375  return Changed;
376}
377
378/// InstructionStoresToFI - Return true if instruction stores to the
379/// specified frame.
380static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
381  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
382         oe = MI->memoperands_end(); o != oe; ++o) {
383    if (!(*o)->isStore() || !(*o)->getValue())
384      continue;
385    if (const FixedStackPseudoSourceValue *Value =
386        dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
387      if (Value->getFrameIndex() == FI)
388        return true;
389    }
390  }
391  return false;
392}
393
394/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
395/// gather register def and frame object update information.
396void MachineLICM::ProcessMI(MachineInstr *MI,
397                            BitVector &PhysRegDefs,
398                            BitVector &PhysRegClobbers,
399                            SmallSet<int, 32> &StoredFIs,
400                            SmallVector<CandidateInfo, 32> &Candidates) {
401  bool RuledOut = false;
402  bool HasNonInvariantUse = false;
403  unsigned Def = 0;
404  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
405    const MachineOperand &MO = MI->getOperand(i);
406    if (MO.isFI()) {
407      // Remember if the instruction stores to the frame index.
408      int FI = MO.getIndex();
409      if (!StoredFIs.count(FI) &&
410          MFI->isSpillSlotObjectIndex(FI) &&
411          InstructionStoresToFI(MI, FI))
412        StoredFIs.insert(FI);
413      HasNonInvariantUse = true;
414      continue;
415    }
416
417    // We can't hoist an instruction defining a physreg that is clobbered in
418    // the loop.
419    if (MO.isRegMask()) {
420      if (const uint32_t *Mask = MO.getRegMask())
421        PhysRegClobbers.setBitsNotInMask(Mask);
422      else
423        PhysRegClobbers.set();
424      continue;
425    }
426
427    if (!MO.isReg())
428      continue;
429    unsigned Reg = MO.getReg();
430    if (!Reg)
431      continue;
432    assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
433           "Not expecting virtual register!");
434
435    if (!MO.isDef()) {
436      if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
437        // If it's using a non-loop-invariant register, then it's obviously not
438        // safe to hoist.
439        HasNonInvariantUse = true;
440      continue;
441    }
442
443    if (MO.isImplicit()) {
444      for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
445        PhysRegClobbers.set(*AS);
446      if (!MO.isDead())
447        // Non-dead implicit def? This cannot be hoisted.
448        RuledOut = true;
449      // No need to check if a dead implicit def is also defined by
450      // another instruction.
451      continue;
452    }
453
454    // FIXME: For now, avoid instructions with multiple defs, unless
455    // it's a dead implicit def.
456    if (Def)
457      RuledOut = true;
458    else
459      Def = Reg;
460
461    // If we have already seen another instruction that defines the same
462    // register, then this is not safe.  Two defs is indicated by setting a
463    // PhysRegClobbers bit.
464    for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS) {
465      if (PhysRegDefs.test(*AS))
466        PhysRegClobbers.set(*AS);
467      if (PhysRegClobbers.test(*AS))
468        // MI defined register is seen defined by another instruction in
469        // the loop, it cannot be a LICM candidate.
470        RuledOut = true;
471      PhysRegDefs.set(*AS);
472    }
473  }
474
475  // Only consider reloads for now and remats which do not have register
476  // operands. FIXME: Consider unfold load folding instructions.
477  if (Def && !RuledOut) {
478    int FI = INT_MIN;
479    if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
480        (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
481      Candidates.push_back(CandidateInfo(MI, Def, FI));
482  }
483}
484
485/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
486/// invariants out to the preheader.
487void MachineLICM::HoistRegionPostRA() {
488  unsigned NumRegs = TRI->getNumRegs();
489  BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
490  BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
491
492  SmallVector<CandidateInfo, 32> Candidates;
493  SmallSet<int, 32> StoredFIs;
494
495  // Walk the entire region, count number of defs for each register, and
496  // collect potential LICM candidates.
497  const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
498  for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
499    MachineBasicBlock *BB = Blocks[i];
500
501    // If the header of the loop containing this basic block is a landing pad,
502    // then don't try to hoist instructions out of this loop.
503    const MachineLoop *ML = MLI->getLoopFor(BB);
504    if (ML && ML->getHeader()->isLandingPad()) continue;
505
506    // Conservatively treat live-in's as an external def.
507    // FIXME: That means a reload that're reused in successor block(s) will not
508    // be LICM'ed.
509    for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
510           E = BB->livein_end(); I != E; ++I) {
511      unsigned Reg = *I;
512      for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
513        PhysRegDefs.set(*AS);
514    }
515
516    SpeculationState = SpeculateUnknown;
517    for (MachineBasicBlock::iterator
518           MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
519      MachineInstr *MI = &*MII;
520      ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
521    }
522  }
523
524  // Now evaluate whether the potential candidates qualify.
525  // 1. Check if the candidate defined register is defined by another
526  //    instruction in the loop.
527  // 2. If the candidate is a load from stack slot (always true for now),
528  //    check if the slot is stored anywhere in the loop.
529  for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
530    if (Candidates[i].FI != INT_MIN &&
531        StoredFIs.count(Candidates[i].FI))
532      continue;
533
534    if (!PhysRegClobbers.test(Candidates[i].Def)) {
535      bool Safe = true;
536      MachineInstr *MI = Candidates[i].MI;
537      for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
538        const MachineOperand &MO = MI->getOperand(j);
539        if (!MO.isReg() || MO.isDef() || !MO.getReg())
540          continue;
541        if (PhysRegDefs.test(MO.getReg()) ||
542            PhysRegClobbers.test(MO.getReg())) {
543          // If it's using a non-loop-invariant register, then it's obviously
544          // not safe to hoist.
545          Safe = false;
546          break;
547        }
548      }
549      if (Safe)
550        HoistPostRA(MI, Candidates[i].Def);
551    }
552  }
553}
554
555/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
556/// loop, and make sure it is not killed by any instructions in the loop.
557void MachineLICM::AddToLiveIns(unsigned Reg) {
558  const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
559  for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
560    MachineBasicBlock *BB = Blocks[i];
561    if (!BB->isLiveIn(Reg))
562      BB->addLiveIn(Reg);
563    for (MachineBasicBlock::iterator
564           MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
565      MachineInstr *MI = &*MII;
566      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
567        MachineOperand &MO = MI->getOperand(i);
568        if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
569        if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
570          MO.setIsKill(false);
571      }
572    }
573  }
574}
575
576/// HoistPostRA - When an instruction is found to only use loop invariant
577/// operands that is safe to hoist, this instruction is called to do the
578/// dirty work.
579void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
580  MachineBasicBlock *Preheader = getCurPreheader();
581  if (!Preheader) return;
582
583  // Now move the instructions to the predecessor, inserting it before any
584  // terminator instructions.
585  DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
586               << MI->getParent()->getNumber() << ": " << *MI);
587
588  // Splice the instruction to the preheader.
589  MachineBasicBlock *MBB = MI->getParent();
590  Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
591
592  // Add register to livein list to all the BBs in the current loop since a
593  // loop invariant must be kept live throughout the whole loop. This is
594  // important to ensure later passes do not scavenge the def register.
595  AddToLiveIns(Def);
596
597  ++NumPostRAHoisted;
598  Changed = true;
599}
600
601// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
602// If not then a load from this mbb may not be safe to hoist.
603bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
604  if (SpeculationState != SpeculateUnknown)
605    return SpeculationState == SpeculateFalse;
606
607  if (BB != CurLoop->getHeader()) {
608    // Check loop exiting blocks.
609    SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
610    CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
611    for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i)
612      if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) {
613        SpeculationState = SpeculateTrue;
614        return false;
615      }
616  }
617
618  SpeculationState = SpeculateFalse;
619  return true;
620}
621
622void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
623  DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
624
625  // Remember livein register pressure.
626  BackTrace.push_back(RegPressure);
627}
628
629void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
630  DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
631  BackTrace.pop_back();
632}
633
634/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
635/// dominator tree node if its a leaf or all of its children are done. Walk
636/// up the dominator tree to destroy ancestors which are now done.
637void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
638                DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
639                DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
640  if (OpenChildren[Node])
641    return;
642
643  // Pop scope.
644  ExitScope(Node->getBlock());
645
646  // Now traverse upwards to pop ancestors whose offsprings are all done.
647  while (MachineDomTreeNode *Parent = ParentMap[Node]) {
648    unsigned Left = --OpenChildren[Parent];
649    if (Left != 0)
650      break;
651    ExitScope(Parent->getBlock());
652    Node = Parent;
653  }
654}
655
656/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
657/// blocks dominated by the specified header block, and that are in the
658/// current loop) in depth first order w.r.t the DominatorTree. This allows
659/// us to visit definitions before uses, allowing us to hoist a loop body in
660/// one pass without iteration.
661///
662void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
663  SmallVector<MachineDomTreeNode*, 32> Scopes;
664  SmallVector<MachineDomTreeNode*, 8> WorkList;
665  DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
666  DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
667
668  // Perform a DFS walk to determine the order of visit.
669  WorkList.push_back(HeaderN);
670  do {
671    MachineDomTreeNode *Node = WorkList.pop_back_val();
672    assert(Node != 0 && "Null dominator tree node?");
673    MachineBasicBlock *BB = Node->getBlock();
674
675    // If the header of the loop containing this basic block is a landing pad,
676    // then don't try to hoist instructions out of this loop.
677    const MachineLoop *ML = MLI->getLoopFor(BB);
678    if (ML && ML->getHeader()->isLandingPad())
679      continue;
680
681    // If this subregion is not in the top level loop at all, exit.
682    if (!CurLoop->contains(BB))
683      continue;
684
685    Scopes.push_back(Node);
686    const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
687    unsigned NumChildren = Children.size();
688
689    // Don't hoist things out of a large switch statement.  This often causes
690    // code to be hoisted that wasn't going to be executed, and increases
691    // register pressure in a situation where it's likely to matter.
692    if (BB->succ_size() >= 25)
693      NumChildren = 0;
694
695    OpenChildren[Node] = NumChildren;
696    // Add children in reverse order as then the next popped worklist node is
697    // the first child of this node.  This means we ultimately traverse the
698    // DOM tree in exactly the same order as if we'd recursed.
699    for (int i = (int)NumChildren-1; i >= 0; --i) {
700      MachineDomTreeNode *Child = Children[i];
701      ParentMap[Child] = Node;
702      WorkList.push_back(Child);
703    }
704  } while (!WorkList.empty());
705
706  if (Scopes.size() != 0) {
707    MachineBasicBlock *Preheader = getCurPreheader();
708    if (!Preheader)
709      return;
710
711    // Compute registers which are livein into the loop headers.
712    RegSeen.clear();
713    BackTrace.clear();
714    InitRegPressure(Preheader);
715  }
716
717  // Now perform LICM.
718  for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
719    MachineDomTreeNode *Node = Scopes[i];
720    MachineBasicBlock *MBB = Node->getBlock();
721
722    MachineBasicBlock *Preheader = getCurPreheader();
723    if (!Preheader)
724      continue;
725
726    EnterScope(MBB);
727
728    // Process the block
729    SpeculationState = SpeculateUnknown;
730    for (MachineBasicBlock::iterator
731         MII = MBB->begin(), E = MBB->end(); MII != E; ) {
732      MachineBasicBlock::iterator NextMII = MII; ++NextMII;
733      MachineInstr *MI = &*MII;
734      if (!Hoist(MI, Preheader))
735        UpdateRegPressure(MI);
736      MII = NextMII;
737    }
738
739    // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
740    ExitScopeIfDone(Node, OpenChildren, ParentMap);
741  }
742}
743
744static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
745  return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
746}
747
748/// getRegisterClassIDAndCost - For a given MI, register, and the operand
749/// index, return the ID and cost of its representative register class.
750void
751MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
752                                       unsigned Reg, unsigned OpIdx,
753                                       unsigned &RCId, unsigned &RCCost) const {
754  const TargetRegisterClass *RC = MRI->getRegClass(Reg);
755  EVT VT = *RC->vt_begin();
756  if (VT == MVT::Untyped) {
757    RCId = RC->getID();
758    RCCost = 1;
759  } else {
760    RCId = TLI->getRepRegClassFor(VT)->getID();
761    RCCost = TLI->getRepRegClassCostFor(VT);
762  }
763}
764
765/// InitRegPressure - Find all virtual register references that are liveout of
766/// the preheader to initialize the starting "register pressure". Note this
767/// does not count live through (livein but not used) registers.
768void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
769  std::fill(RegPressure.begin(), RegPressure.end(), 0);
770
771  // If the preheader has only a single predecessor and it ends with a
772  // fallthrough or an unconditional branch, then scan its predecessor for live
773  // defs as well. This happens whenever the preheader is created by splitting
774  // the critical edge from the loop predecessor to the loop header.
775  if (BB->pred_size() == 1) {
776    MachineBasicBlock *TBB = 0, *FBB = 0;
777    SmallVector<MachineOperand, 4> Cond;
778    if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
779      InitRegPressure(*BB->pred_begin());
780  }
781
782  for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
783       MII != E; ++MII) {
784    MachineInstr *MI = &*MII;
785    for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
786      const MachineOperand &MO = MI->getOperand(i);
787      if (!MO.isReg() || MO.isImplicit())
788        continue;
789      unsigned Reg = MO.getReg();
790      if (!TargetRegisterInfo::isVirtualRegister(Reg))
791        continue;
792
793      bool isNew = RegSeen.insert(Reg);
794      unsigned RCId, RCCost;
795      getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
796      if (MO.isDef())
797        RegPressure[RCId] += RCCost;
798      else {
799        bool isKill = isOperandKill(MO, MRI);
800        if (isNew && !isKill)
801          // Haven't seen this, it must be a livein.
802          RegPressure[RCId] += RCCost;
803        else if (!isNew && isKill)
804          RegPressure[RCId] -= RCCost;
805      }
806    }
807  }
808}
809
810/// UpdateRegPressure - Update estimate of register pressure after the
811/// specified instruction.
812void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
813  if (MI->isImplicitDef())
814    return;
815
816  SmallVector<unsigned, 4> Defs;
817  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
818    const MachineOperand &MO = MI->getOperand(i);
819    if (!MO.isReg() || MO.isImplicit())
820      continue;
821    unsigned Reg = MO.getReg();
822    if (!TargetRegisterInfo::isVirtualRegister(Reg))
823      continue;
824
825    bool isNew = RegSeen.insert(Reg);
826    if (MO.isDef())
827      Defs.push_back(Reg);
828    else if (!isNew && isOperandKill(MO, MRI)) {
829      unsigned RCId, RCCost;
830      getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
831      if (RCCost > RegPressure[RCId])
832        RegPressure[RCId] = 0;
833      else
834        RegPressure[RCId] -= RCCost;
835    }
836  }
837
838  unsigned Idx = 0;
839  while (!Defs.empty()) {
840    unsigned Reg = Defs.pop_back_val();
841    unsigned RCId, RCCost;
842    getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost);
843    RegPressure[RCId] += RCCost;
844    ++Idx;
845  }
846}
847
848/// isLoadFromGOTOrConstantPool - Return true if this machine instruction
849/// loads from global offset table or constant pool.
850static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
851  assert (MI.mayLoad() && "Expected MI that loads!");
852  for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
853	 E = MI.memoperands_end(); I != E; ++I) {
854    if (const Value *V = (*I)->getValue()) {
855      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
856        if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
857	  return true;
858    }
859  }
860  return false;
861}
862
863/// IsLICMCandidate - Returns true if the instruction may be a suitable
864/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
865/// not safe to hoist it.
866bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
867  // Check if it's safe to move the instruction.
868  bool DontMoveAcrossStore = true;
869  if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
870    return false;
871
872  // If it is load then check if it is guaranteed to execute by making sure that
873  // it dominates all exiting blocks. If it doesn't, then there is a path out of
874  // the loop which does not execute this load, so we can't hoist it. Loads
875  // from constant memory are not safe to speculate all the time, for example
876  // indexed load from a jump table.
877  // Stores and side effects are already checked by isSafeToMove.
878  if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
879      !IsGuaranteedToExecute(I.getParent()))
880    return false;
881
882  return true;
883}
884
885/// IsLoopInvariantInst - Returns true if the instruction is loop
886/// invariant. I.e., all virtual register operands are defined outside of the
887/// loop, physical registers aren't accessed explicitly, and there are no side
888/// effects that aren't captured by the operands or other flags.
889///
890bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
891  if (!IsLICMCandidate(I))
892    return false;
893
894  // The instruction is loop invariant if all of its operands are.
895  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
896    const MachineOperand &MO = I.getOperand(i);
897
898    if (!MO.isReg())
899      continue;
900
901    unsigned Reg = MO.getReg();
902    if (Reg == 0) continue;
903
904    // Don't hoist an instruction that uses or defines a physical register.
905    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
906      if (MO.isUse()) {
907        // If the physreg has no defs anywhere, it's just an ambient register
908        // and we can freely move its uses. Alternatively, if it's allocatable,
909        // it could get allocated to something with a def during allocation.
910        if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
911          return false;
912        // Otherwise it's safe to move.
913        continue;
914      } else if (!MO.isDead()) {
915        // A def that isn't dead. We can't move it.
916        return false;
917      } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
918        // If the reg is live into the loop, we can't hoist an instruction
919        // which would clobber it.
920        return false;
921      }
922    }
923
924    if (!MO.isUse())
925      continue;
926
927    assert(MRI->getVRegDef(Reg) &&
928           "Machine instr not mapped for this vreg?!");
929
930    // If the loop contains the definition of an operand, then the instruction
931    // isn't loop invariant.
932    if (CurLoop->contains(MRI->getVRegDef(Reg)))
933      return false;
934  }
935
936  // If we got this far, the instruction is loop invariant!
937  return true;
938}
939
940
941/// HasAnyPHIUse - Return true if the specified register is used by any
942/// phi node.
943bool MachineLICM::HasAnyPHIUse(unsigned Reg) const {
944  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
945         UE = MRI->use_end(); UI != UE; ++UI) {
946    MachineInstr *UseMI = &*UI;
947    if (UseMI->isPHI())
948      return true;
949    // Look pass copies as well.
950    if (UseMI->isCopy()) {
951      unsigned Def = UseMI->getOperand(0).getReg();
952      if (TargetRegisterInfo::isVirtualRegister(Def) &&
953          HasAnyPHIUse(Def))
954        return true;
955    }
956  }
957  return false;
958}
959
960/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
961/// and an use in the current loop, return true if the target considered
962/// it 'high'.
963bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
964                                        unsigned DefIdx, unsigned Reg) const {
965  if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
966    return false;
967
968  for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
969         E = MRI->use_nodbg_end(); I != E; ++I) {
970    MachineInstr *UseMI = &*I;
971    if (UseMI->isCopyLike())
972      continue;
973    if (!CurLoop->contains(UseMI->getParent()))
974      continue;
975    for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
976      const MachineOperand &MO = UseMI->getOperand(i);
977      if (!MO.isReg() || !MO.isUse())
978        continue;
979      unsigned MOReg = MO.getReg();
980      if (MOReg != Reg)
981        continue;
982
983      if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
984        return true;
985    }
986
987    // Only look at the first in loop use.
988    break;
989  }
990
991  return false;
992}
993
994/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
995/// the operand latency between its def and a use is one or less.
996bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
997  if (MI.isAsCheapAsAMove() || MI.isCopyLike())
998    return true;
999  if (!InstrItins || InstrItins->isEmpty())
1000    return false;
1001
1002  bool isCheap = false;
1003  unsigned NumDefs = MI.getDesc().getNumDefs();
1004  for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
1005    MachineOperand &DefMO = MI.getOperand(i);
1006    if (!DefMO.isReg() || !DefMO.isDef())
1007      continue;
1008    --NumDefs;
1009    unsigned Reg = DefMO.getReg();
1010    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1011      continue;
1012
1013    if (!TII->hasLowDefLatency(InstrItins, &MI, i))
1014      return false;
1015    isCheap = true;
1016  }
1017
1018  return isCheap;
1019}
1020
1021/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
1022/// if hoisting an instruction of the given cost matrix can cause high
1023/// register pressure.
1024bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
1025  for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1026       CI != CE; ++CI) {
1027    if (CI->second <= 0)
1028      continue;
1029
1030    unsigned RCId = CI->first;
1031    unsigned Limit = RegLimit[RCId];
1032    int Cost = CI->second;
1033    for (unsigned i = BackTrace.size(); i != 0; --i) {
1034      SmallVector<unsigned, 8> &RP = BackTrace[i-1];
1035      if (RP[RCId] + Cost >= Limit)
1036        return true;
1037    }
1038  }
1039
1040  return false;
1041}
1042
1043/// UpdateBackTraceRegPressure - Traverse the back trace from header to the
1044/// current block and update their register pressures to reflect the effect
1045/// of hoisting MI from the current block to the preheader.
1046void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
1047  if (MI->isImplicitDef())
1048    return;
1049
1050  // First compute the 'cost' of the instruction, i.e. its contribution
1051  // to register pressure.
1052  DenseMap<unsigned, int> Cost;
1053  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
1054    const MachineOperand &MO = MI->getOperand(i);
1055    if (!MO.isReg() || MO.isImplicit())
1056      continue;
1057    unsigned Reg = MO.getReg();
1058    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1059      continue;
1060
1061    unsigned RCId, RCCost;
1062    getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
1063    if (MO.isDef()) {
1064      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1065      if (CI != Cost.end())
1066        CI->second += RCCost;
1067      else
1068        Cost.insert(std::make_pair(RCId, RCCost));
1069    } else if (isOperandKill(MO, MRI)) {
1070      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1071      if (CI != Cost.end())
1072        CI->second -= RCCost;
1073      else
1074        Cost.insert(std::make_pair(RCId, -RCCost));
1075    }
1076  }
1077
1078  // Update register pressure of blocks from loop header to current block.
1079  for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
1080    SmallVector<unsigned, 8> &RP = BackTrace[i];
1081    for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1082         CI != CE; ++CI) {
1083      unsigned RCId = CI->first;
1084      RP[RCId] += CI->second;
1085    }
1086  }
1087}
1088
1089/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
1090/// the given loop invariant.
1091bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
1092  if (MI.isImplicitDef())
1093    return true;
1094
1095  // If the instruction is cheap, only hoist if it is re-materilizable. LICM
1096  // will increase register pressure. It's probably not worth it if the
1097  // instruction is cheap.
1098  // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
1099  // these tend to help performance in low register pressure situation. The
1100  // trade off is it may cause spill in high pressure situation. It will end up
1101  // adding a store in the loop preheader. But the reload is no more expensive.
1102  // The side benefit is these loads are frequently CSE'ed.
1103  if (IsCheapInstruction(MI)) {
1104    if (!TII->isTriviallyReMaterializable(&MI, AA))
1105      return false;
1106  } else {
1107    // Estimate register pressure to determine whether to LICM the instruction.
1108    // In low register pressure situation, we can be more aggressive about
1109    // hoisting. Also, favors hoisting long latency instructions even in
1110    // moderately high pressure situation.
1111    // FIXME: If there are long latency loop-invariant instructions inside the
1112    // loop at this point, why didn't the optimizer's LICM hoist them?
1113    DenseMap<unsigned, int> Cost;
1114    for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1115      const MachineOperand &MO = MI.getOperand(i);
1116      if (!MO.isReg() || MO.isImplicit())
1117        continue;
1118      unsigned Reg = MO.getReg();
1119      if (!TargetRegisterInfo::isVirtualRegister(Reg))
1120        continue;
1121
1122      unsigned RCId, RCCost;
1123      getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
1124      if (MO.isDef()) {
1125        if (HasHighOperandLatency(MI, i, Reg)) {
1126          ++NumHighLatency;
1127          return true;
1128        }
1129
1130        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1131        if (CI != Cost.end())
1132          CI->second += RCCost;
1133        else
1134          Cost.insert(std::make_pair(RCId, RCCost));
1135      } else if (isOperandKill(MO, MRI)) {
1136        // Is a virtual register use is a kill, hoisting it out of the loop
1137        // may actually reduce register pressure or be register pressure
1138        // neutral.
1139        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1140        if (CI != Cost.end())
1141          CI->second -= RCCost;
1142        else
1143          Cost.insert(std::make_pair(RCId, -RCCost));
1144      }
1145    }
1146
1147    // Visit BBs from header to current BB, if hoisting this doesn't cause
1148    // high register pressure, then it's safe to proceed.
1149    if (!CanCauseHighRegPressure(Cost)) {
1150      ++NumLowRP;
1151      return true;
1152    }
1153
1154    // Do not "speculate" in high register pressure situation. If an
1155    // instruction is not guaranteed to be executed in the loop, it's best to be
1156    // conservative.
1157    if (AvoidSpeculation &&
1158        (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI)))
1159      return false;
1160
1161    // High register pressure situation, only hoist if the instruction is going to
1162    // be remat'ed.
1163    if (!TII->isTriviallyReMaterializable(&MI, AA) &&
1164        !MI.isInvariantLoad(AA))
1165      return false;
1166  }
1167
1168  // If result(s) of this instruction is used by PHIs outside of the loop, then
1169  // don't hoist it if the instruction because it will introduce an extra copy.
1170  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1171    const MachineOperand &MO = MI.getOperand(i);
1172    if (!MO.isReg() || !MO.isDef())
1173      continue;
1174    if (HasAnyPHIUse(MO.getReg()))
1175      return false;
1176  }
1177
1178  return true;
1179}
1180
1181MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
1182  // Don't unfold simple loads.
1183  if (MI->canFoldAsLoad())
1184    return 0;
1185
1186  // If not, we may be able to unfold a load and hoist that.
1187  // First test whether the instruction is loading from an amenable
1188  // memory location.
1189  if (!MI->isInvariantLoad(AA))
1190    return 0;
1191
1192  // Next determine the register class for a temporary register.
1193  unsigned LoadRegIndex;
1194  unsigned NewOpc =
1195    TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1196                                    /*UnfoldLoad=*/true,
1197                                    /*UnfoldStore=*/false,
1198                                    &LoadRegIndex);
1199  if (NewOpc == 0) return 0;
1200  const MCInstrDesc &MID = TII->get(NewOpc);
1201  if (MID.getNumDefs() != 1) return 0;
1202  const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
1203  // Ok, we're unfolding. Create a temporary register and do the unfold.
1204  unsigned Reg = MRI->createVirtualRegister(RC);
1205
1206  MachineFunction &MF = *MI->getParent()->getParent();
1207  SmallVector<MachineInstr *, 2> NewMIs;
1208  bool Success =
1209    TII->unfoldMemoryOperand(MF, MI, Reg,
1210                             /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
1211                             NewMIs);
1212  (void)Success;
1213  assert(Success &&
1214         "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1215         "succeeded!");
1216  assert(NewMIs.size() == 2 &&
1217         "Unfolded a load into multiple instructions!");
1218  MachineBasicBlock *MBB = MI->getParent();
1219  MachineBasicBlock::iterator Pos = MI;
1220  MBB->insert(Pos, NewMIs[0]);
1221  MBB->insert(Pos, NewMIs[1]);
1222  // If unfolding produced a load that wasn't loop-invariant or profitable to
1223  // hoist, discard the new instructions and bail.
1224  if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1225    NewMIs[0]->eraseFromParent();
1226    NewMIs[1]->eraseFromParent();
1227    return 0;
1228  }
1229
1230  // Update register pressure for the unfolded instruction.
1231  UpdateRegPressure(NewMIs[1]);
1232
1233  // Otherwise we successfully unfolded a load that we can hoist.
1234  MI->eraseFromParent();
1235  return NewMIs[0];
1236}
1237
1238void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1239  for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
1240    const MachineInstr *MI = &*I;
1241    unsigned Opcode = MI->getOpcode();
1242    DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1243      CI = CSEMap.find(Opcode);
1244    if (CI != CSEMap.end())
1245      CI->second.push_back(MI);
1246    else {
1247      std::vector<const MachineInstr*> CSEMIs;
1248      CSEMIs.push_back(MI);
1249      CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1250    }
1251  }
1252}
1253
1254const MachineInstr*
1255MachineLICM::LookForDuplicate(const MachineInstr *MI,
1256                              std::vector<const MachineInstr*> &PrevMIs) {
1257  for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
1258    const MachineInstr *PrevMI = PrevMIs[i];
1259    if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
1260      return PrevMI;
1261  }
1262  return 0;
1263}
1264
1265bool MachineLICM::EliminateCSE(MachineInstr *MI,
1266          DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1267  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1268  // the undef property onto uses.
1269  if (CI == CSEMap.end() || MI->isImplicitDef())
1270    return false;
1271
1272  if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1273    DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1274
1275    // Replace virtual registers defined by MI by their counterparts defined
1276    // by Dup.
1277    SmallVector<unsigned, 2> Defs;
1278    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1279      const MachineOperand &MO = MI->getOperand(i);
1280
1281      // Physical registers may not differ here.
1282      assert((!MO.isReg() || MO.getReg() == 0 ||
1283              !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1284              MO.getReg() == Dup->getOperand(i).getReg()) &&
1285             "Instructions with different phys regs are not identical!");
1286
1287      if (MO.isReg() && MO.isDef() &&
1288          !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1289        Defs.push_back(i);
1290    }
1291
1292    SmallVector<const TargetRegisterClass*, 2> OrigRCs;
1293    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1294      unsigned Idx = Defs[i];
1295      unsigned Reg = MI->getOperand(Idx).getReg();
1296      unsigned DupReg = Dup->getOperand(Idx).getReg();
1297      OrigRCs.push_back(MRI->getRegClass(DupReg));
1298
1299      if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
1300        // Restore old RCs if more than one defs.
1301        for (unsigned j = 0; j != i; ++j)
1302          MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
1303        return false;
1304      }
1305    }
1306
1307    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1308      unsigned Idx = Defs[i];
1309      unsigned Reg = MI->getOperand(Idx).getReg();
1310      unsigned DupReg = Dup->getOperand(Idx).getReg();
1311      MRI->replaceRegWith(Reg, DupReg);
1312      MRI->clearKillFlags(DupReg);
1313    }
1314
1315    MI->eraseFromParent();
1316    ++NumCSEed;
1317    return true;
1318  }
1319  return false;
1320}
1321
1322/// MayCSE - Return true if the given instruction will be CSE'd if it's
1323/// hoisted out of the loop.
1324bool MachineLICM::MayCSE(MachineInstr *MI) {
1325  unsigned Opcode = MI->getOpcode();
1326  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1327    CI = CSEMap.find(Opcode);
1328  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1329  // the undef property onto uses.
1330  if (CI == CSEMap.end() || MI->isImplicitDef())
1331    return false;
1332
1333  return LookForDuplicate(MI, CI->second) != 0;
1334}
1335
1336/// Hoist - When an instruction is found to use only loop invariant operands
1337/// that are safe to hoist, this instruction is called to do the dirty work.
1338///
1339bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1340  // First check whether we should hoist this instruction.
1341  if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1342    // If not, try unfolding a hoistable load.
1343    MI = ExtractHoistableLoad(MI);
1344    if (!MI) return false;
1345  }
1346
1347  // Now move the instructions to the predecessor, inserting it before any
1348  // terminator instructions.
1349  DEBUG({
1350      dbgs() << "Hoisting " << *MI;
1351      if (Preheader->getBasicBlock())
1352        dbgs() << " to MachineBasicBlock "
1353               << Preheader->getName();
1354      if (MI->getParent()->getBasicBlock())
1355        dbgs() << " from MachineBasicBlock "
1356               << MI->getParent()->getName();
1357      dbgs() << "\n";
1358    });
1359
1360  // If this is the first instruction being hoisted to the preheader,
1361  // initialize the CSE map with potential common expressions.
1362  if (FirstInLoop) {
1363    InitCSEMap(Preheader);
1364    FirstInLoop = false;
1365  }
1366
1367  // Look for opportunity to CSE the hoisted instruction.
1368  unsigned Opcode = MI->getOpcode();
1369  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1370    CI = CSEMap.find(Opcode);
1371  if (!EliminateCSE(MI, CI)) {
1372    // Otherwise, splice the instruction to the preheader.
1373    Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1374
1375    // Update register pressure for BBs from header to this block.
1376    UpdateBackTraceRegPressure(MI);
1377
1378    // Clear the kill flags of any register this instruction defines,
1379    // since they may need to be live throughout the entire loop
1380    // rather than just live for part of it.
1381    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1382      MachineOperand &MO = MI->getOperand(i);
1383      if (MO.isReg() && MO.isDef() && !MO.isDead())
1384        MRI->clearKillFlags(MO.getReg());
1385    }
1386
1387    // Add to the CSE map.
1388    if (CI != CSEMap.end())
1389      CI->second.push_back(MI);
1390    else {
1391      std::vector<const MachineInstr*> CSEMIs;
1392      CSEMIs.push_back(MI);
1393      CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1394    }
1395  }
1396
1397  ++NumHoisted;
1398  Changed = true;
1399
1400  return true;
1401}
1402
1403MachineBasicBlock *MachineLICM::getCurPreheader() {
1404  // Determine the block to which to hoist instructions. If we can't find a
1405  // suitable loop predecessor, we can't do any hoisting.
1406
1407  // If we've tried to get a preheader and failed, don't try again.
1408  if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1409    return 0;
1410
1411  if (!CurPreheader) {
1412    CurPreheader = CurLoop->getLoopPreheader();
1413    if (!CurPreheader) {
1414      MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1415      if (!Pred) {
1416        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1417        return 0;
1418      }
1419
1420      CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
1421      if (!CurPreheader) {
1422        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1423        return 0;
1424      }
1425    }
1426  }
1427  return CurPreheader;
1428}
1429