MachineLICM.cpp revision 9f17cf625dc8be1e92cd2755e2d118ce38fc7268
1//===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs loop invariant code motion on machine instructions. We
11// attempt to remove as much code from the body of a loop as possible.
12//
13// This pass does not attempt to throttle itself to limit register pressure.
14// The register allocation phases are expected to perform rematerialization
15// to recover when register pressure is high.
16//
17// This pass is not intended to be a replacement or a complete alternative
18// for the LLVM-IR-level LICM pass. It is only designed to hoist simple
19// constructs that are not exposed before lowering and instruction selection.
20//
21//===----------------------------------------------------------------------===//
22
23#define DEBUG_TYPE "machine-licm"
24#include "llvm/CodeGen/Passes.h"
25#include "llvm/CodeGen/MachineDominators.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/MC/MCInstrItineraries.h"
32#include "llvm/Target/TargetLowering.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Analysis/AliasAnalysis.h"
37#include "llvm/ADT/DenseMap.h"
38#include "llvm/ADT/SmallSet.h"
39#include "llvm/ADT/Statistic.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/raw_ostream.h"
43using namespace llvm;
44
45static cl::opt<bool>
46AvoidSpeculation("avoid-speculation",
47                 cl::desc("MachineLICM should avoid speculation"),
48                 cl::init(true), cl::Hidden);
49
50STATISTIC(NumHoisted,
51          "Number of machine instructions hoisted out of loops");
52STATISTIC(NumLowRP,
53          "Number of instructions hoisted in low reg pressure situation");
54STATISTIC(NumHighLatency,
55          "Number of high latency instructions hoisted");
56STATISTIC(NumCSEed,
57          "Number of hoisted machine instructions CSEed");
58STATISTIC(NumPostRAHoisted,
59          "Number of machine instructions hoisted out of loops post regalloc");
60
61namespace {
62  class MachineLICM : public MachineFunctionPass {
63    bool PreRegAlloc;
64
65    const TargetMachine   *TM;
66    const TargetInstrInfo *TII;
67    const TargetLowering *TLI;
68    const TargetRegisterInfo *TRI;
69    const MachineFrameInfo *MFI;
70    MachineRegisterInfo *MRI;
71    const InstrItineraryData *InstrItins;
72
73    // Various analyses that we use...
74    AliasAnalysis        *AA;      // Alias analysis info.
75    MachineLoopInfo      *MLI;     // Current MachineLoopInfo
76    MachineDominatorTree *DT;      // Machine dominator tree for the cur loop
77
78    // State that is updated as we process loops
79    bool         Changed;          // True if a loop is changed.
80    bool         FirstInLoop;      // True if it's the first LICM in the loop.
81    MachineLoop *CurLoop;          // The current loop we are working on.
82    MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
83
84    // Track 'estimated' register pressure.
85    SmallSet<unsigned, 32> RegSeen;
86    SmallVector<unsigned, 8> RegPressure;
87
88    // Register pressure "limit" per register class. If the pressure
89    // is higher than the limit, then it's considered high.
90    SmallVector<unsigned, 8> RegLimit;
91
92    // Register pressure on path leading from loop preheader to current BB.
93    SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
94
95    // For each opcode, keep a list of potential CSE instructions.
96    DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
97
98    enum {
99      SpeculateFalse   = 0,
100      SpeculateTrue    = 1,
101      SpeculateUnknown = 2
102    };
103
104    // If a MBB does not dominate loop exiting blocks then it may not safe
105    // to hoist loads from this block.
106    // Tri-state: 0 - false, 1 - true, 2 - unknown
107    unsigned SpeculationState;
108
109  public:
110    static char ID; // Pass identification, replacement for typeid
111    MachineLICM() :
112      MachineFunctionPass(ID), PreRegAlloc(true) {
113        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
114      }
115
116    explicit MachineLICM(bool PreRA) :
117      MachineFunctionPass(ID), PreRegAlloc(PreRA) {
118        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
119      }
120
121    virtual bool runOnMachineFunction(MachineFunction &MF);
122
123    const char *getPassName() const { return "Machine Instruction LICM"; }
124
125    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
126      AU.addRequired<MachineLoopInfo>();
127      AU.addRequired<MachineDominatorTree>();
128      AU.addRequired<AliasAnalysis>();
129      AU.addPreserved<MachineLoopInfo>();
130      AU.addPreserved<MachineDominatorTree>();
131      MachineFunctionPass::getAnalysisUsage(AU);
132    }
133
134    virtual void releaseMemory() {
135      RegSeen.clear();
136      RegPressure.clear();
137      RegLimit.clear();
138      BackTrace.clear();
139      for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
140             CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
141        CI->second.clear();
142      CSEMap.clear();
143    }
144
145  private:
146    /// CandidateInfo - Keep track of information about hoisting candidates.
147    struct CandidateInfo {
148      MachineInstr *MI;
149      unsigned      Def;
150      int           FI;
151      CandidateInfo(MachineInstr *mi, unsigned def, int fi)
152        : MI(mi), Def(def), FI(fi) {}
153    };
154
155    /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
156    /// invariants out to the preheader.
157    void HoistRegionPostRA();
158
159    /// HoistPostRA - When an instruction is found to only use loop invariant
160    /// operands that is safe to hoist, this instruction is called to do the
161    /// dirty work.
162    void HoistPostRA(MachineInstr *MI, unsigned Def);
163
164    /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
165    /// gather register def and frame object update information.
166    void ProcessMI(MachineInstr *MI,
167                   BitVector &PhysRegDefs,
168                   BitVector &PhysRegClobbers,
169                   SmallSet<int, 32> &StoredFIs,
170                   SmallVector<CandidateInfo, 32> &Candidates);
171
172    /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
173    /// current loop.
174    void AddToLiveIns(unsigned Reg);
175
176    /// IsLICMCandidate - Returns true if the instruction may be a suitable
177    /// candidate for LICM. e.g. If the instruction is a call, then it's
178    /// obviously not safe to hoist it.
179    bool IsLICMCandidate(MachineInstr &I);
180
181    /// IsLoopInvariantInst - Returns true if the instruction is loop
182    /// invariant. I.e., all virtual register operands are defined outside of
183    /// the loop, physical registers aren't accessed (explicitly or implicitly),
184    /// and the instruction is hoistable.
185    ///
186    bool IsLoopInvariantInst(MachineInstr &I);
187
188    /// HasAnyPHIUse - Return true if the specified register is used by any
189    /// phi node.
190    bool HasAnyPHIUse(unsigned Reg) const;
191
192    /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
193    /// and an use in the current loop, return true if the target considered
194    /// it 'high'.
195    bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
196                               unsigned Reg) const;
197
198    bool IsCheapInstruction(MachineInstr &MI) const;
199
200    /// CanCauseHighRegPressure - Visit BBs from header to current BB,
201    /// check if hoisting an instruction of the given cost matrix can cause high
202    /// register pressure.
203    bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
204
205    /// UpdateBackTraceRegPressure - Traverse the back trace from header to
206    /// the current block and update their register pressures to reflect the
207    /// effect of hoisting MI from the current block to the preheader.
208    void UpdateBackTraceRegPressure(const MachineInstr *MI);
209
210    /// IsProfitableToHoist - Return true if it is potentially profitable to
211    /// hoist the given loop invariant.
212    bool IsProfitableToHoist(MachineInstr &MI);
213
214    /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
215    /// If not then a load from this mbb may not be safe to hoist.
216    bool IsGuaranteedToExecute(MachineBasicBlock *BB);
217
218    void EnterScope(MachineBasicBlock *MBB);
219
220    void ExitScope(MachineBasicBlock *MBB);
221
222    /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
223    /// dominator tree node if its a leaf or all of its children are done. Walk
224    /// up the dominator tree to destroy ancestors which are now done.
225    void ExitScopeIfDone(MachineDomTreeNode *Node,
226                DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
227                DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
228
229    /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
230    /// blocks dominated by the specified header block, and that are in the
231    /// current loop) in depth first order w.r.t the DominatorTree. This allows
232    /// us to visit definitions before uses, allowing us to hoist a loop body in
233    /// one pass without iteration.
234    ///
235    void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
236    void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
237
238    /// getRegisterClassIDAndCost - For a given MI, register, and the operand
239    /// index, return the ID and cost of its representative register class by
240    /// reference.
241    void getRegisterClassIDAndCost(const MachineInstr *MI,
242                                   unsigned Reg, unsigned OpIdx,
243                                   unsigned &RCId, unsigned &RCCost) const;
244
245    /// InitRegPressure - Find all virtual register references that are liveout
246    /// of the preheader to initialize the starting "register pressure". Note
247    /// this does not count live through (livein but not used) registers.
248    void InitRegPressure(MachineBasicBlock *BB);
249
250    /// UpdateRegPressure - Update estimate of register pressure after the
251    /// specified instruction.
252    void UpdateRegPressure(const MachineInstr *MI);
253
254    /// ExtractHoistableLoad - Unfold a load from the given machineinstr if
255    /// the load itself could be hoisted. Return the unfolded and hoistable
256    /// load, or null if the load couldn't be unfolded or if it wouldn't
257    /// be hoistable.
258    MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
259
260    /// LookForDuplicate - Find an instruction amount PrevMIs that is a
261    /// duplicate of MI. Return this instruction if it's found.
262    const MachineInstr *LookForDuplicate(const MachineInstr *MI,
263                                     std::vector<const MachineInstr*> &PrevMIs);
264
265    /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
266    /// the preheader that compute the same value. If it's found, do a RAU on
267    /// with the definition of the existing instruction rather than hoisting
268    /// the instruction to the preheader.
269    bool EliminateCSE(MachineInstr *MI,
270           DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
271
272    /// MayCSE - Return true if the given instruction will be CSE'd if it's
273    /// hoisted out of the loop.
274    bool MayCSE(MachineInstr *MI);
275
276    /// Hoist - When an instruction is found to only use loop invariant operands
277    /// that is safe to hoist, this instruction is called to do the dirty work.
278    /// It returns true if the instruction is hoisted.
279    bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
280
281    /// InitCSEMap - Initialize the CSE map with instructions that are in the
282    /// current loop preheader that may become duplicates of instructions that
283    /// are hoisted out of the loop.
284    void InitCSEMap(MachineBasicBlock *BB);
285
286    /// getCurPreheader - Get the preheader for the current loop, splitting
287    /// a critical edge if needed.
288    MachineBasicBlock *getCurPreheader();
289  };
290} // end anonymous namespace
291
292char MachineLICM::ID = 0;
293INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
294                "Machine Loop Invariant Code Motion", false, false)
295INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
296INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
297INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
298INITIALIZE_PASS_END(MachineLICM, "machinelicm",
299                "Machine Loop Invariant Code Motion", false, false)
300
301FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
302  return new MachineLICM(PreRegAlloc);
303}
304
305/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
306/// loop that has a unique predecessor.
307static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
308  // Check whether this loop even has a unique predecessor.
309  if (!CurLoop->getLoopPredecessor())
310    return false;
311  // Ok, now check to see if any of its outer loops do.
312  for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
313    if (L->getLoopPredecessor())
314      return false;
315  // None of them did, so this is the outermost with a unique predecessor.
316  return true;
317}
318
319bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
320  if (PreRegAlloc)
321    DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
322  else
323    DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
324  DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
325
326  Changed = FirstInLoop = false;
327  TM = &MF.getTarget();
328  TII = TM->getInstrInfo();
329  TLI = TM->getTargetLowering();
330  TRI = TM->getRegisterInfo();
331  MFI = MF.getFrameInfo();
332  MRI = &MF.getRegInfo();
333  InstrItins = TM->getInstrItineraryData();
334
335  if (PreRegAlloc) {
336    // Estimate register pressure during pre-regalloc pass.
337    unsigned NumRC = TRI->getNumRegClasses();
338    RegPressure.resize(NumRC);
339    std::fill(RegPressure.begin(), RegPressure.end(), 0);
340    RegLimit.resize(NumRC);
341    for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
342           E = TRI->regclass_end(); I != E; ++I)
343      RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF);
344  }
345
346  // Get our Loop information...
347  MLI = &getAnalysis<MachineLoopInfo>();
348  DT  = &getAnalysis<MachineDominatorTree>();
349  AA  = &getAnalysis<AliasAnalysis>();
350
351  SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
352  while (!Worklist.empty()) {
353    CurLoop = Worklist.pop_back_val();
354    CurPreheader = 0;
355
356    // If this is done before regalloc, only visit outer-most preheader-sporting
357    // loops.
358    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
359      Worklist.append(CurLoop->begin(), CurLoop->end());
360      continue;
361    }
362
363    if (!PreRegAlloc)
364      HoistRegionPostRA();
365    else {
366      // CSEMap is initialized for loop header when the first instruction is
367      // being hoisted.
368      MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
369      FirstInLoop = true;
370      HoistOutOfLoop(N);
371      CSEMap.clear();
372    }
373  }
374
375  return Changed;
376}
377
378/// InstructionStoresToFI - Return true if instruction stores to the
379/// specified frame.
380static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
381  for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
382         oe = MI->memoperands_end(); o != oe; ++o) {
383    if (!(*o)->isStore() || !(*o)->getValue())
384      continue;
385    if (const FixedStackPseudoSourceValue *Value =
386        dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
387      if (Value->getFrameIndex() == FI)
388        return true;
389    }
390  }
391  return false;
392}
393
394/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
395/// gather register def and frame object update information.
396void MachineLICM::ProcessMI(MachineInstr *MI,
397                            BitVector &PhysRegDefs,
398                            BitVector &PhysRegClobbers,
399                            SmallSet<int, 32> &StoredFIs,
400                            SmallVector<CandidateInfo, 32> &Candidates) {
401  bool RuledOut = false;
402  bool HasNonInvariantUse = false;
403  unsigned Def = 0;
404  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
405    const MachineOperand &MO = MI->getOperand(i);
406    if (MO.isFI()) {
407      // Remember if the instruction stores to the frame index.
408      int FI = MO.getIndex();
409      if (!StoredFIs.count(FI) &&
410          MFI->isSpillSlotObjectIndex(FI) &&
411          InstructionStoresToFI(MI, FI))
412        StoredFIs.insert(FI);
413      HasNonInvariantUse = true;
414      continue;
415    }
416
417    // We can't hoist an instruction defining a physreg that is clobbered in
418    // the loop.
419    if (MO.isRegMask()) {
420      PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
421      continue;
422    }
423
424    if (!MO.isReg())
425      continue;
426    unsigned Reg = MO.getReg();
427    if (!Reg)
428      continue;
429    assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
430           "Not expecting virtual register!");
431
432    if (!MO.isDef()) {
433      if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
434        // If it's using a non-loop-invariant register, then it's obviously not
435        // safe to hoist.
436        HasNonInvariantUse = true;
437      continue;
438    }
439
440    if (MO.isImplicit()) {
441      for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
442        PhysRegClobbers.set(*AS);
443      if (!MO.isDead())
444        // Non-dead implicit def? This cannot be hoisted.
445        RuledOut = true;
446      // No need to check if a dead implicit def is also defined by
447      // another instruction.
448      continue;
449    }
450
451    // FIXME: For now, avoid instructions with multiple defs, unless
452    // it's a dead implicit def.
453    if (Def)
454      RuledOut = true;
455    else
456      Def = Reg;
457
458    // If we have already seen another instruction that defines the same
459    // register, then this is not safe.  Two defs is indicated by setting a
460    // PhysRegClobbers bit.
461    for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS) {
462      if (PhysRegDefs.test(*AS))
463        PhysRegClobbers.set(*AS);
464      if (PhysRegClobbers.test(*AS))
465        // MI defined register is seen defined by another instruction in
466        // the loop, it cannot be a LICM candidate.
467        RuledOut = true;
468      PhysRegDefs.set(*AS);
469    }
470  }
471
472  // Only consider reloads for now and remats which do not have register
473  // operands. FIXME: Consider unfold load folding instructions.
474  if (Def && !RuledOut) {
475    int FI = INT_MIN;
476    if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
477        (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
478      Candidates.push_back(CandidateInfo(MI, Def, FI));
479  }
480}
481
482/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
483/// invariants out to the preheader.
484void MachineLICM::HoistRegionPostRA() {
485  unsigned NumRegs = TRI->getNumRegs();
486  BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
487  BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
488
489  SmallVector<CandidateInfo, 32> Candidates;
490  SmallSet<int, 32> StoredFIs;
491
492  // Walk the entire region, count number of defs for each register, and
493  // collect potential LICM candidates.
494  const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
495  for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
496    MachineBasicBlock *BB = Blocks[i];
497
498    // If the header of the loop containing this basic block is a landing pad,
499    // then don't try to hoist instructions out of this loop.
500    const MachineLoop *ML = MLI->getLoopFor(BB);
501    if (ML && ML->getHeader()->isLandingPad()) continue;
502
503    // Conservatively treat live-in's as an external def.
504    // FIXME: That means a reload that're reused in successor block(s) will not
505    // be LICM'ed.
506    for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
507           E = BB->livein_end(); I != E; ++I) {
508      unsigned Reg = *I;
509      for (const unsigned *AS = TRI->getOverlaps(Reg); *AS; ++AS)
510        PhysRegDefs.set(*AS);
511    }
512
513    SpeculationState = SpeculateUnknown;
514    for (MachineBasicBlock::iterator
515           MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
516      MachineInstr *MI = &*MII;
517      ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
518    }
519  }
520
521  // Now evaluate whether the potential candidates qualify.
522  // 1. Check if the candidate defined register is defined by another
523  //    instruction in the loop.
524  // 2. If the candidate is a load from stack slot (always true for now),
525  //    check if the slot is stored anywhere in the loop.
526  for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
527    if (Candidates[i].FI != INT_MIN &&
528        StoredFIs.count(Candidates[i].FI))
529      continue;
530
531    if (!PhysRegClobbers.test(Candidates[i].Def)) {
532      bool Safe = true;
533      MachineInstr *MI = Candidates[i].MI;
534      for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
535        const MachineOperand &MO = MI->getOperand(j);
536        if (!MO.isReg() || MO.isDef() || !MO.getReg())
537          continue;
538        if (PhysRegDefs.test(MO.getReg()) ||
539            PhysRegClobbers.test(MO.getReg())) {
540          // If it's using a non-loop-invariant register, then it's obviously
541          // not safe to hoist.
542          Safe = false;
543          break;
544        }
545      }
546      if (Safe)
547        HoistPostRA(MI, Candidates[i].Def);
548    }
549  }
550}
551
552/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
553/// loop, and make sure it is not killed by any instructions in the loop.
554void MachineLICM::AddToLiveIns(unsigned Reg) {
555  const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
556  for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
557    MachineBasicBlock *BB = Blocks[i];
558    if (!BB->isLiveIn(Reg))
559      BB->addLiveIn(Reg);
560    for (MachineBasicBlock::iterator
561           MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
562      MachineInstr *MI = &*MII;
563      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
564        MachineOperand &MO = MI->getOperand(i);
565        if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
566        if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
567          MO.setIsKill(false);
568      }
569    }
570  }
571}
572
573/// HoistPostRA - When an instruction is found to only use loop invariant
574/// operands that is safe to hoist, this instruction is called to do the
575/// dirty work.
576void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
577  MachineBasicBlock *Preheader = getCurPreheader();
578  if (!Preheader) return;
579
580  // Now move the instructions to the predecessor, inserting it before any
581  // terminator instructions.
582  DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
583               << MI->getParent()->getNumber() << ": " << *MI);
584
585  // Splice the instruction to the preheader.
586  MachineBasicBlock *MBB = MI->getParent();
587  Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
588
589  // Add register to livein list to all the BBs in the current loop since a
590  // loop invariant must be kept live throughout the whole loop. This is
591  // important to ensure later passes do not scavenge the def register.
592  AddToLiveIns(Def);
593
594  ++NumPostRAHoisted;
595  Changed = true;
596}
597
598// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
599// If not then a load from this mbb may not be safe to hoist.
600bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
601  if (SpeculationState != SpeculateUnknown)
602    return SpeculationState == SpeculateFalse;
603
604  if (BB != CurLoop->getHeader()) {
605    // Check loop exiting blocks.
606    SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
607    CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
608    for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i)
609      if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) {
610        SpeculationState = SpeculateTrue;
611        return false;
612      }
613  }
614
615  SpeculationState = SpeculateFalse;
616  return true;
617}
618
619void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
620  DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
621
622  // Remember livein register pressure.
623  BackTrace.push_back(RegPressure);
624}
625
626void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
627  DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
628  BackTrace.pop_back();
629}
630
631/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
632/// dominator tree node if its a leaf or all of its children are done. Walk
633/// up the dominator tree to destroy ancestors which are now done.
634void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
635                DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
636                DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
637  if (OpenChildren[Node])
638    return;
639
640  // Pop scope.
641  ExitScope(Node->getBlock());
642
643  // Now traverse upwards to pop ancestors whose offsprings are all done.
644  while (MachineDomTreeNode *Parent = ParentMap[Node]) {
645    unsigned Left = --OpenChildren[Parent];
646    if (Left != 0)
647      break;
648    ExitScope(Parent->getBlock());
649    Node = Parent;
650  }
651}
652
653/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
654/// blocks dominated by the specified header block, and that are in the
655/// current loop) in depth first order w.r.t the DominatorTree. This allows
656/// us to visit definitions before uses, allowing us to hoist a loop body in
657/// one pass without iteration.
658///
659void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
660  SmallVector<MachineDomTreeNode*, 32> Scopes;
661  SmallVector<MachineDomTreeNode*, 8> WorkList;
662  DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
663  DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
664
665  // Perform a DFS walk to determine the order of visit.
666  WorkList.push_back(HeaderN);
667  do {
668    MachineDomTreeNode *Node = WorkList.pop_back_val();
669    assert(Node != 0 && "Null dominator tree node?");
670    MachineBasicBlock *BB = Node->getBlock();
671
672    // If the header of the loop containing this basic block is a landing pad,
673    // then don't try to hoist instructions out of this loop.
674    const MachineLoop *ML = MLI->getLoopFor(BB);
675    if (ML && ML->getHeader()->isLandingPad())
676      continue;
677
678    // If this subregion is not in the top level loop at all, exit.
679    if (!CurLoop->contains(BB))
680      continue;
681
682    Scopes.push_back(Node);
683    const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
684    unsigned NumChildren = Children.size();
685
686    // Don't hoist things out of a large switch statement.  This often causes
687    // code to be hoisted that wasn't going to be executed, and increases
688    // register pressure in a situation where it's likely to matter.
689    if (BB->succ_size() >= 25)
690      NumChildren = 0;
691
692    OpenChildren[Node] = NumChildren;
693    // Add children in reverse order as then the next popped worklist node is
694    // the first child of this node.  This means we ultimately traverse the
695    // DOM tree in exactly the same order as if we'd recursed.
696    for (int i = (int)NumChildren-1; i >= 0; --i) {
697      MachineDomTreeNode *Child = Children[i];
698      ParentMap[Child] = Node;
699      WorkList.push_back(Child);
700    }
701  } while (!WorkList.empty());
702
703  if (Scopes.size() != 0) {
704    MachineBasicBlock *Preheader = getCurPreheader();
705    if (!Preheader)
706      return;
707
708    // Compute registers which are livein into the loop headers.
709    RegSeen.clear();
710    BackTrace.clear();
711    InitRegPressure(Preheader);
712  }
713
714  // Now perform LICM.
715  for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
716    MachineDomTreeNode *Node = Scopes[i];
717    MachineBasicBlock *MBB = Node->getBlock();
718
719    MachineBasicBlock *Preheader = getCurPreheader();
720    if (!Preheader)
721      continue;
722
723    EnterScope(MBB);
724
725    // Process the block
726    SpeculationState = SpeculateUnknown;
727    for (MachineBasicBlock::iterator
728         MII = MBB->begin(), E = MBB->end(); MII != E; ) {
729      MachineBasicBlock::iterator NextMII = MII; ++NextMII;
730      MachineInstr *MI = &*MII;
731      if (!Hoist(MI, Preheader))
732        UpdateRegPressure(MI);
733      MII = NextMII;
734    }
735
736    // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
737    ExitScopeIfDone(Node, OpenChildren, ParentMap);
738  }
739}
740
741static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
742  return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
743}
744
745/// getRegisterClassIDAndCost - For a given MI, register, and the operand
746/// index, return the ID and cost of its representative register class.
747void
748MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
749                                       unsigned Reg, unsigned OpIdx,
750                                       unsigned &RCId, unsigned &RCCost) const {
751  const TargetRegisterClass *RC = MRI->getRegClass(Reg);
752  EVT VT = *RC->vt_begin();
753  if (VT == MVT::Untyped) {
754    RCId = RC->getID();
755    RCCost = 1;
756  } else {
757    RCId = TLI->getRepRegClassFor(VT)->getID();
758    RCCost = TLI->getRepRegClassCostFor(VT);
759  }
760}
761
762/// InitRegPressure - Find all virtual register references that are liveout of
763/// the preheader to initialize the starting "register pressure". Note this
764/// does not count live through (livein but not used) registers.
765void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
766  std::fill(RegPressure.begin(), RegPressure.end(), 0);
767
768  // If the preheader has only a single predecessor and it ends with a
769  // fallthrough or an unconditional branch, then scan its predecessor for live
770  // defs as well. This happens whenever the preheader is created by splitting
771  // the critical edge from the loop predecessor to the loop header.
772  if (BB->pred_size() == 1) {
773    MachineBasicBlock *TBB = 0, *FBB = 0;
774    SmallVector<MachineOperand, 4> Cond;
775    if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
776      InitRegPressure(*BB->pred_begin());
777  }
778
779  for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
780       MII != E; ++MII) {
781    MachineInstr *MI = &*MII;
782    for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
783      const MachineOperand &MO = MI->getOperand(i);
784      if (!MO.isReg() || MO.isImplicit())
785        continue;
786      unsigned Reg = MO.getReg();
787      if (!TargetRegisterInfo::isVirtualRegister(Reg))
788        continue;
789
790      bool isNew = RegSeen.insert(Reg);
791      unsigned RCId, RCCost;
792      getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
793      if (MO.isDef())
794        RegPressure[RCId] += RCCost;
795      else {
796        bool isKill = isOperandKill(MO, MRI);
797        if (isNew && !isKill)
798          // Haven't seen this, it must be a livein.
799          RegPressure[RCId] += RCCost;
800        else if (!isNew && isKill)
801          RegPressure[RCId] -= RCCost;
802      }
803    }
804  }
805}
806
807/// UpdateRegPressure - Update estimate of register pressure after the
808/// specified instruction.
809void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
810  if (MI->isImplicitDef())
811    return;
812
813  SmallVector<unsigned, 4> Defs;
814  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
815    const MachineOperand &MO = MI->getOperand(i);
816    if (!MO.isReg() || MO.isImplicit())
817      continue;
818    unsigned Reg = MO.getReg();
819    if (!TargetRegisterInfo::isVirtualRegister(Reg))
820      continue;
821
822    bool isNew = RegSeen.insert(Reg);
823    if (MO.isDef())
824      Defs.push_back(Reg);
825    else if (!isNew && isOperandKill(MO, MRI)) {
826      unsigned RCId, RCCost;
827      getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
828      if (RCCost > RegPressure[RCId])
829        RegPressure[RCId] = 0;
830      else
831        RegPressure[RCId] -= RCCost;
832    }
833  }
834
835  unsigned Idx = 0;
836  while (!Defs.empty()) {
837    unsigned Reg = Defs.pop_back_val();
838    unsigned RCId, RCCost;
839    getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost);
840    RegPressure[RCId] += RCCost;
841    ++Idx;
842  }
843}
844
845/// isLoadFromGOTOrConstantPool - Return true if this machine instruction
846/// loads from global offset table or constant pool.
847static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
848  assert (MI.mayLoad() && "Expected MI that loads!");
849  for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
850         E = MI.memoperands_end(); I != E; ++I) {
851    if (const Value *V = (*I)->getValue()) {
852      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
853        if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
854          return true;
855    }
856  }
857  return false;
858}
859
860/// IsLICMCandidate - Returns true if the instruction may be a suitable
861/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
862/// not safe to hoist it.
863bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
864  // Check if it's safe to move the instruction.
865  bool DontMoveAcrossStore = true;
866  if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
867    return false;
868
869  // If it is load then check if it is guaranteed to execute by making sure that
870  // it dominates all exiting blocks. If it doesn't, then there is a path out of
871  // the loop which does not execute this load, so we can't hoist it. Loads
872  // from constant memory are not safe to speculate all the time, for example
873  // indexed load from a jump table.
874  // Stores and side effects are already checked by isSafeToMove.
875  if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
876      !IsGuaranteedToExecute(I.getParent()))
877    return false;
878
879  return true;
880}
881
882/// IsLoopInvariantInst - Returns true if the instruction is loop
883/// invariant. I.e., all virtual register operands are defined outside of the
884/// loop, physical registers aren't accessed explicitly, and there are no side
885/// effects that aren't captured by the operands or other flags.
886///
887bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
888  if (!IsLICMCandidate(I))
889    return false;
890
891  // The instruction is loop invariant if all of its operands are.
892  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
893    const MachineOperand &MO = I.getOperand(i);
894
895    if (!MO.isReg())
896      continue;
897
898    unsigned Reg = MO.getReg();
899    if (Reg == 0) continue;
900
901    // Don't hoist an instruction that uses or defines a physical register.
902    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
903      if (MO.isUse()) {
904        // If the physreg has no defs anywhere, it's just an ambient register
905        // and we can freely move its uses. Alternatively, if it's allocatable,
906        // it could get allocated to something with a def during allocation.
907        if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
908          return false;
909        // Otherwise it's safe to move.
910        continue;
911      } else if (!MO.isDead()) {
912        // A def that isn't dead. We can't move it.
913        return false;
914      } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
915        // If the reg is live into the loop, we can't hoist an instruction
916        // which would clobber it.
917        return false;
918      }
919    }
920
921    if (!MO.isUse())
922      continue;
923
924    assert(MRI->getVRegDef(Reg) &&
925           "Machine instr not mapped for this vreg?!");
926
927    // If the loop contains the definition of an operand, then the instruction
928    // isn't loop invariant.
929    if (CurLoop->contains(MRI->getVRegDef(Reg)))
930      return false;
931  }
932
933  // If we got this far, the instruction is loop invariant!
934  return true;
935}
936
937
938/// HasAnyPHIUse - Return true if the specified register is used by any
939/// phi node.
940bool MachineLICM::HasAnyPHIUse(unsigned Reg) const {
941  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
942         UE = MRI->use_end(); UI != UE; ++UI) {
943    MachineInstr *UseMI = &*UI;
944    if (UseMI->isPHI())
945      return true;
946    // Look pass copies as well.
947    if (UseMI->isCopy()) {
948      unsigned Def = UseMI->getOperand(0).getReg();
949      if (TargetRegisterInfo::isVirtualRegister(Def) &&
950          HasAnyPHIUse(Def))
951        return true;
952    }
953  }
954  return false;
955}
956
957/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
958/// and an use in the current loop, return true if the target considered
959/// it 'high'.
960bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
961                                        unsigned DefIdx, unsigned Reg) const {
962  if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
963    return false;
964
965  for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
966         E = MRI->use_nodbg_end(); I != E; ++I) {
967    MachineInstr *UseMI = &*I;
968    if (UseMI->isCopyLike())
969      continue;
970    if (!CurLoop->contains(UseMI->getParent()))
971      continue;
972    for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
973      const MachineOperand &MO = UseMI->getOperand(i);
974      if (!MO.isReg() || !MO.isUse())
975        continue;
976      unsigned MOReg = MO.getReg();
977      if (MOReg != Reg)
978        continue;
979
980      if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
981        return true;
982    }
983
984    // Only look at the first in loop use.
985    break;
986  }
987
988  return false;
989}
990
991/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
992/// the operand latency between its def and a use is one or less.
993bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
994  if (MI.isAsCheapAsAMove() || MI.isCopyLike())
995    return true;
996  if (!InstrItins || InstrItins->isEmpty())
997    return false;
998
999  bool isCheap = false;
1000  unsigned NumDefs = MI.getDesc().getNumDefs();
1001  for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
1002    MachineOperand &DefMO = MI.getOperand(i);
1003    if (!DefMO.isReg() || !DefMO.isDef())
1004      continue;
1005    --NumDefs;
1006    unsigned Reg = DefMO.getReg();
1007    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1008      continue;
1009
1010    if (!TII->hasLowDefLatency(InstrItins, &MI, i))
1011      return false;
1012    isCheap = true;
1013  }
1014
1015  return isCheap;
1016}
1017
1018/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
1019/// if hoisting an instruction of the given cost matrix can cause high
1020/// register pressure.
1021bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
1022  for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1023       CI != CE; ++CI) {
1024    if (CI->second <= 0)
1025      continue;
1026
1027    unsigned RCId = CI->first;
1028    unsigned Limit = RegLimit[RCId];
1029    int Cost = CI->second;
1030    for (unsigned i = BackTrace.size(); i != 0; --i) {
1031      SmallVector<unsigned, 8> &RP = BackTrace[i-1];
1032      if (RP[RCId] + Cost >= Limit)
1033        return true;
1034    }
1035  }
1036
1037  return false;
1038}
1039
1040/// UpdateBackTraceRegPressure - Traverse the back trace from header to the
1041/// current block and update their register pressures to reflect the effect
1042/// of hoisting MI from the current block to the preheader.
1043void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
1044  if (MI->isImplicitDef())
1045    return;
1046
1047  // First compute the 'cost' of the instruction, i.e. its contribution
1048  // to register pressure.
1049  DenseMap<unsigned, int> Cost;
1050  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
1051    const MachineOperand &MO = MI->getOperand(i);
1052    if (!MO.isReg() || MO.isImplicit())
1053      continue;
1054    unsigned Reg = MO.getReg();
1055    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1056      continue;
1057
1058    unsigned RCId, RCCost;
1059    getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
1060    if (MO.isDef()) {
1061      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1062      if (CI != Cost.end())
1063        CI->second += RCCost;
1064      else
1065        Cost.insert(std::make_pair(RCId, RCCost));
1066    } else if (isOperandKill(MO, MRI)) {
1067      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1068      if (CI != Cost.end())
1069        CI->second -= RCCost;
1070      else
1071        Cost.insert(std::make_pair(RCId, -RCCost));
1072    }
1073  }
1074
1075  // Update register pressure of blocks from loop header to current block.
1076  for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
1077    SmallVector<unsigned, 8> &RP = BackTrace[i];
1078    for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1079         CI != CE; ++CI) {
1080      unsigned RCId = CI->first;
1081      RP[RCId] += CI->second;
1082    }
1083  }
1084}
1085
1086/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
1087/// the given loop invariant.
1088bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
1089  if (MI.isImplicitDef())
1090    return true;
1091
1092  // If the instruction is cheap, only hoist if it is re-materilizable. LICM
1093  // will increase register pressure. It's probably not worth it if the
1094  // instruction is cheap.
1095  // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
1096  // these tend to help performance in low register pressure situation. The
1097  // trade off is it may cause spill in high pressure situation. It will end up
1098  // adding a store in the loop preheader. But the reload is no more expensive.
1099  // The side benefit is these loads are frequently CSE'ed.
1100  if (IsCheapInstruction(MI)) {
1101    if (!TII->isTriviallyReMaterializable(&MI, AA))
1102      return false;
1103  } else {
1104    // Estimate register pressure to determine whether to LICM the instruction.
1105    // In low register pressure situation, we can be more aggressive about
1106    // hoisting. Also, favors hoisting long latency instructions even in
1107    // moderately high pressure situation.
1108    // FIXME: If there are long latency loop-invariant instructions inside the
1109    // loop at this point, why didn't the optimizer's LICM hoist them?
1110    DenseMap<unsigned, int> Cost;
1111    for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1112      const MachineOperand &MO = MI.getOperand(i);
1113      if (!MO.isReg() || MO.isImplicit())
1114        continue;
1115      unsigned Reg = MO.getReg();
1116      if (!TargetRegisterInfo::isVirtualRegister(Reg))
1117        continue;
1118
1119      unsigned RCId, RCCost;
1120      getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
1121      if (MO.isDef()) {
1122        if (HasHighOperandLatency(MI, i, Reg)) {
1123          ++NumHighLatency;
1124          return true;
1125        }
1126
1127        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1128        if (CI != Cost.end())
1129          CI->second += RCCost;
1130        else
1131          Cost.insert(std::make_pair(RCId, RCCost));
1132      } else if (isOperandKill(MO, MRI)) {
1133        // Is a virtual register use is a kill, hoisting it out of the loop
1134        // may actually reduce register pressure or be register pressure
1135        // neutral.
1136        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1137        if (CI != Cost.end())
1138          CI->second -= RCCost;
1139        else
1140          Cost.insert(std::make_pair(RCId, -RCCost));
1141      }
1142    }
1143
1144    // Visit BBs from header to current BB, if hoisting this doesn't cause
1145    // high register pressure, then it's safe to proceed.
1146    if (!CanCauseHighRegPressure(Cost)) {
1147      ++NumLowRP;
1148      return true;
1149    }
1150
1151    // Do not "speculate" in high register pressure situation. If an
1152    // instruction is not guaranteed to be executed in the loop, it's best to be
1153    // conservative.
1154    if (AvoidSpeculation &&
1155        (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI)))
1156      return false;
1157
1158    // High register pressure situation, only hoist if the instruction is going to
1159    // be remat'ed.
1160    if (!TII->isTriviallyReMaterializable(&MI, AA) &&
1161        !MI.isInvariantLoad(AA))
1162      return false;
1163  }
1164
1165  // If result(s) of this instruction is used by PHIs outside of the loop, then
1166  // don't hoist it if the instruction because it will introduce an extra copy.
1167  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1168    const MachineOperand &MO = MI.getOperand(i);
1169    if (!MO.isReg() || !MO.isDef())
1170      continue;
1171    if (HasAnyPHIUse(MO.getReg()))
1172      return false;
1173  }
1174
1175  return true;
1176}
1177
1178MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
1179  // Don't unfold simple loads.
1180  if (MI->canFoldAsLoad())
1181    return 0;
1182
1183  // If not, we may be able to unfold a load and hoist that.
1184  // First test whether the instruction is loading from an amenable
1185  // memory location.
1186  if (!MI->isInvariantLoad(AA))
1187    return 0;
1188
1189  // Next determine the register class for a temporary register.
1190  unsigned LoadRegIndex;
1191  unsigned NewOpc =
1192    TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1193                                    /*UnfoldLoad=*/true,
1194                                    /*UnfoldStore=*/false,
1195                                    &LoadRegIndex);
1196  if (NewOpc == 0) return 0;
1197  const MCInstrDesc &MID = TII->get(NewOpc);
1198  if (MID.getNumDefs() != 1) return 0;
1199  const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
1200  // Ok, we're unfolding. Create a temporary register and do the unfold.
1201  unsigned Reg = MRI->createVirtualRegister(RC);
1202
1203  MachineFunction &MF = *MI->getParent()->getParent();
1204  SmallVector<MachineInstr *, 2> NewMIs;
1205  bool Success =
1206    TII->unfoldMemoryOperand(MF, MI, Reg,
1207                             /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
1208                             NewMIs);
1209  (void)Success;
1210  assert(Success &&
1211         "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1212         "succeeded!");
1213  assert(NewMIs.size() == 2 &&
1214         "Unfolded a load into multiple instructions!");
1215  MachineBasicBlock *MBB = MI->getParent();
1216  MachineBasicBlock::iterator Pos = MI;
1217  MBB->insert(Pos, NewMIs[0]);
1218  MBB->insert(Pos, NewMIs[1]);
1219  // If unfolding produced a load that wasn't loop-invariant or profitable to
1220  // hoist, discard the new instructions and bail.
1221  if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1222    NewMIs[0]->eraseFromParent();
1223    NewMIs[1]->eraseFromParent();
1224    return 0;
1225  }
1226
1227  // Update register pressure for the unfolded instruction.
1228  UpdateRegPressure(NewMIs[1]);
1229
1230  // Otherwise we successfully unfolded a load that we can hoist.
1231  MI->eraseFromParent();
1232  return NewMIs[0];
1233}
1234
1235void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1236  for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
1237    const MachineInstr *MI = &*I;
1238    unsigned Opcode = MI->getOpcode();
1239    DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1240      CI = CSEMap.find(Opcode);
1241    if (CI != CSEMap.end())
1242      CI->second.push_back(MI);
1243    else {
1244      std::vector<const MachineInstr*> CSEMIs;
1245      CSEMIs.push_back(MI);
1246      CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1247    }
1248  }
1249}
1250
1251const MachineInstr*
1252MachineLICM::LookForDuplicate(const MachineInstr *MI,
1253                              std::vector<const MachineInstr*> &PrevMIs) {
1254  for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
1255    const MachineInstr *PrevMI = PrevMIs[i];
1256    if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
1257      return PrevMI;
1258  }
1259  return 0;
1260}
1261
1262bool MachineLICM::EliminateCSE(MachineInstr *MI,
1263          DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1264  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1265  // the undef property onto uses.
1266  if (CI == CSEMap.end() || MI->isImplicitDef())
1267    return false;
1268
1269  if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1270    DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1271
1272    // Replace virtual registers defined by MI by their counterparts defined
1273    // by Dup.
1274    SmallVector<unsigned, 2> Defs;
1275    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1276      const MachineOperand &MO = MI->getOperand(i);
1277
1278      // Physical registers may not differ here.
1279      assert((!MO.isReg() || MO.getReg() == 0 ||
1280              !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1281              MO.getReg() == Dup->getOperand(i).getReg()) &&
1282             "Instructions with different phys regs are not identical!");
1283
1284      if (MO.isReg() && MO.isDef() &&
1285          !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1286        Defs.push_back(i);
1287    }
1288
1289    SmallVector<const TargetRegisterClass*, 2> OrigRCs;
1290    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1291      unsigned Idx = Defs[i];
1292      unsigned Reg = MI->getOperand(Idx).getReg();
1293      unsigned DupReg = Dup->getOperand(Idx).getReg();
1294      OrigRCs.push_back(MRI->getRegClass(DupReg));
1295
1296      if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
1297        // Restore old RCs if more than one defs.
1298        for (unsigned j = 0; j != i; ++j)
1299          MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
1300        return false;
1301      }
1302    }
1303
1304    for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1305      unsigned Idx = Defs[i];
1306      unsigned Reg = MI->getOperand(Idx).getReg();
1307      unsigned DupReg = Dup->getOperand(Idx).getReg();
1308      MRI->replaceRegWith(Reg, DupReg);
1309      MRI->clearKillFlags(DupReg);
1310    }
1311
1312    MI->eraseFromParent();
1313    ++NumCSEed;
1314    return true;
1315  }
1316  return false;
1317}
1318
1319/// MayCSE - Return true if the given instruction will be CSE'd if it's
1320/// hoisted out of the loop.
1321bool MachineLICM::MayCSE(MachineInstr *MI) {
1322  unsigned Opcode = MI->getOpcode();
1323  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1324    CI = CSEMap.find(Opcode);
1325  // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1326  // the undef property onto uses.
1327  if (CI == CSEMap.end() || MI->isImplicitDef())
1328    return false;
1329
1330  return LookForDuplicate(MI, CI->second) != 0;
1331}
1332
1333/// Hoist - When an instruction is found to use only loop invariant operands
1334/// that are safe to hoist, this instruction is called to do the dirty work.
1335///
1336bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1337  // First check whether we should hoist this instruction.
1338  if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1339    // If not, try unfolding a hoistable load.
1340    MI = ExtractHoistableLoad(MI);
1341    if (!MI) return false;
1342  }
1343
1344  // Now move the instructions to the predecessor, inserting it before any
1345  // terminator instructions.
1346  DEBUG({
1347      dbgs() << "Hoisting " << *MI;
1348      if (Preheader->getBasicBlock())
1349        dbgs() << " to MachineBasicBlock "
1350               << Preheader->getName();
1351      if (MI->getParent()->getBasicBlock())
1352        dbgs() << " from MachineBasicBlock "
1353               << MI->getParent()->getName();
1354      dbgs() << "\n";
1355    });
1356
1357  // If this is the first instruction being hoisted to the preheader,
1358  // initialize the CSE map with potential common expressions.
1359  if (FirstInLoop) {
1360    InitCSEMap(Preheader);
1361    FirstInLoop = false;
1362  }
1363
1364  // Look for opportunity to CSE the hoisted instruction.
1365  unsigned Opcode = MI->getOpcode();
1366  DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1367    CI = CSEMap.find(Opcode);
1368  if (!EliminateCSE(MI, CI)) {
1369    // Otherwise, splice the instruction to the preheader.
1370    Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1371
1372    // Update register pressure for BBs from header to this block.
1373    UpdateBackTraceRegPressure(MI);
1374
1375    // Clear the kill flags of any register this instruction defines,
1376    // since they may need to be live throughout the entire loop
1377    // rather than just live for part of it.
1378    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1379      MachineOperand &MO = MI->getOperand(i);
1380      if (MO.isReg() && MO.isDef() && !MO.isDead())
1381        MRI->clearKillFlags(MO.getReg());
1382    }
1383
1384    // Add to the CSE map.
1385    if (CI != CSEMap.end())
1386      CI->second.push_back(MI);
1387    else {
1388      std::vector<const MachineInstr*> CSEMIs;
1389      CSEMIs.push_back(MI);
1390      CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1391    }
1392  }
1393
1394  ++NumHoisted;
1395  Changed = true;
1396
1397  return true;
1398}
1399
1400MachineBasicBlock *MachineLICM::getCurPreheader() {
1401  // Determine the block to which to hoist instructions. If we can't find a
1402  // suitable loop predecessor, we can't do any hoisting.
1403
1404  // If we've tried to get a preheader and failed, don't try again.
1405  if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1406    return 0;
1407
1408  if (!CurPreheader) {
1409    CurPreheader = CurLoop->getLoopPreheader();
1410    if (!CurPreheader) {
1411      MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1412      if (!Pred) {
1413        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1414        return 0;
1415      }
1416
1417      CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
1418      if (!CurPreheader) {
1419        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1420        return 0;
1421      }
1422    }
1423  }
1424  return CurPreheader;
1425}
1426