MachineSink.cpp revision f99efdf3290b438a2cd74304e4299d50bce2b397
1//===-- MachineSink.cpp - Sinking for machine instructions ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass moves instructions into successor blocks when possible, so that
11// they aren't executed on paths where their results aren't needed.
12//
13// This pass is not intended to be a replacement or a complete alternative
14// for an LLVM-IR-level sinking pass. It is only designed to sink simple
15// constructs that are not exposed before lowering and instruction selection.
16//
17//===----------------------------------------------------------------------===//
18
19#define DEBUG_TYPE "machine-sink"
20#include "llvm/CodeGen/Passes.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/MachineLoopInfo.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Target/TargetRegisterInfo.h"
26#include "llvm/Target/TargetInstrInfo.h"
27#include "llvm/Target/TargetMachine.h"
28#include "llvm/ADT/SmallSet.h"
29#include "llvm/ADT/Statistic.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/raw_ostream.h"
33using namespace llvm;
34
35static cl::opt<bool>
36SplitEdges("machine-sink-split",
37           cl::desc("Split critical edges during machine sinking"),
38           cl::init(true), cl::Hidden);
39
40STATISTIC(NumSunk,      "Number of machine instructions sunk");
41STATISTIC(NumSplit,     "Number of critical edges split");
42STATISTIC(NumCoalesces, "Number of copies coalesced");
43
44namespace {
45  class MachineSinking : public MachineFunctionPass {
46    const TargetInstrInfo *TII;
47    const TargetRegisterInfo *TRI;
48    MachineRegisterInfo  *MRI;  // Machine register information
49    MachineDominatorTree *DT;   // Machine dominator tree
50    MachineLoopInfo *LI;
51    AliasAnalysis *AA;
52    BitVector AllocatableSet;   // Which physregs are allocatable?
53
54    // Remember which edges have been considered for breaking.
55    SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
56    CEBCandidates;
57
58  public:
59    static char ID; // Pass identification
60    MachineSinking() : MachineFunctionPass(ID) {
61      initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
62    }
63
64    virtual bool runOnMachineFunction(MachineFunction &MF);
65
66    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
67      AU.setPreservesCFG();
68      MachineFunctionPass::getAnalysisUsage(AU);
69      AU.addRequired<AliasAnalysis>();
70      AU.addRequired<MachineDominatorTree>();
71      AU.addRequired<MachineLoopInfo>();
72      AU.addPreserved<MachineDominatorTree>();
73      AU.addPreserved<MachineLoopInfo>();
74    }
75
76    virtual void releaseMemory() {
77      CEBCandidates.clear();
78    }
79
80  private:
81    bool ProcessBlock(MachineBasicBlock &MBB);
82    bool isWorthBreakingCriticalEdge(MachineInstr *MI,
83                                     MachineBasicBlock *From,
84                                     MachineBasicBlock *To);
85    MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI,
86                                         MachineBasicBlock *From,
87                                         MachineBasicBlock *To,
88                                         bool BreakPHIEdge);
89    bool SinkInstruction(MachineInstr *MI, bool &SawStore);
90    bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
91                                 MachineBasicBlock *DefMBB,
92                                 bool &BreakPHIEdge, bool &LocalUse) const;
93    MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
94               bool &BreakPHIEdge);
95    bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
96                              MachineBasicBlock *MBB,
97                              MachineBasicBlock *SuccToSinkTo);
98
99    bool PerformTrivialForwardCoalescing(MachineInstr *MI,
100                                         MachineBasicBlock *MBB);
101  };
102
103  // SuccessorSorter - Sort Successors according to their loop depth.
104  struct SuccessorSorter {
105    SuccessorSorter(MachineLoopInfo *LoopInfo) : LI(LoopInfo) {}
106    bool operator()(const MachineBasicBlock *LHS,
107                    const MachineBasicBlock *RHS) const {
108      return LI->getLoopDepth(LHS) < LI->getLoopDepth(RHS);
109    }
110    MachineLoopInfo *LI;
111  };
112} // end anonymous namespace
113
114char MachineSinking::ID = 0;
115char &llvm::MachineSinkingID = MachineSinking::ID;
116INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
117                "Machine code sinking", false, false)
118INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
119INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
120INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
121INITIALIZE_PASS_END(MachineSinking, "machine-sink",
122                "Machine code sinking", false, false)
123
124bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
125                                                     MachineBasicBlock *MBB) {
126  if (!MI->isCopy())
127    return false;
128
129  unsigned SrcReg = MI->getOperand(1).getReg();
130  unsigned DstReg = MI->getOperand(0).getReg();
131  if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
132      !TargetRegisterInfo::isVirtualRegister(DstReg) ||
133      !MRI->hasOneNonDBGUse(SrcReg))
134    return false;
135
136  const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
137  const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
138  if (SRC != DRC)
139    return false;
140
141  MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
142  if (DefMI->isCopyLike())
143    return false;
144  DEBUG(dbgs() << "Coalescing: " << *DefMI);
145  DEBUG(dbgs() << "*** to: " << *MI);
146  MRI->replaceRegWith(DstReg, SrcReg);
147  MI->eraseFromParent();
148  ++NumCoalesces;
149  return true;
150}
151
152/// AllUsesDominatedByBlock - Return true if all uses of the specified register
153/// occur in blocks dominated by the specified block. If any use is in the
154/// definition block, then return false since it is never legal to move def
155/// after uses.
156bool
157MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
158                                        MachineBasicBlock *MBB,
159                                        MachineBasicBlock *DefMBB,
160                                        bool &BreakPHIEdge,
161                                        bool &LocalUse) const {
162  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
163         "Only makes sense for vregs");
164
165  // Ignore debug uses because debug info doesn't affect the code.
166  if (MRI->use_nodbg_empty(Reg))
167    return true;
168
169  // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
170  // into and they are all PHI nodes. In this case, machine-sink must break
171  // the critical edge first. e.g.
172  //
173  // BB#1: derived from LLVM BB %bb4.preheader
174  //   Predecessors according to CFG: BB#0
175  //     ...
176  //     %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
177  //     ...
178  //     JE_4 <BB#37>, %EFLAGS<imp-use>
179  //   Successors according to CFG: BB#37 BB#2
180  //
181  // BB#2: derived from LLVM BB %bb.nph
182  //   Predecessors according to CFG: BB#0 BB#1
183  //     %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
184  BreakPHIEdge = true;
185  for (MachineRegisterInfo::use_nodbg_iterator
186         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
187       I != E; ++I) {
188    MachineInstr *UseInst = &*I;
189    MachineBasicBlock *UseBlock = UseInst->getParent();
190    if (!(UseBlock == MBB && UseInst->isPHI() &&
191          UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
192      BreakPHIEdge = false;
193      break;
194    }
195  }
196  if (BreakPHIEdge)
197    return true;
198
199  for (MachineRegisterInfo::use_nodbg_iterator
200         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
201       I != E; ++I) {
202    // Determine the block of the use.
203    MachineInstr *UseInst = &*I;
204    MachineBasicBlock *UseBlock = UseInst->getParent();
205    if (UseInst->isPHI()) {
206      // PHI nodes use the operand in the predecessor block, not the block with
207      // the PHI.
208      UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
209    } else if (UseBlock == DefMBB) {
210      LocalUse = true;
211      return false;
212    }
213
214    // Check that it dominates.
215    if (!DT->dominates(MBB, UseBlock))
216      return false;
217  }
218
219  return true;
220}
221
222bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
223  DEBUG(dbgs() << "******** Machine Sinking ********\n");
224
225  const TargetMachine &TM = MF.getTarget();
226  TII = TM.getInstrInfo();
227  TRI = TM.getRegisterInfo();
228  MRI = &MF.getRegInfo();
229  DT = &getAnalysis<MachineDominatorTree>();
230  LI = &getAnalysis<MachineLoopInfo>();
231  AA = &getAnalysis<AliasAnalysis>();
232  AllocatableSet = TRI->getAllocatableSet(MF);
233
234  bool EverMadeChange = false;
235
236  while (1) {
237    bool MadeChange = false;
238
239    // Process all basic blocks.
240    CEBCandidates.clear();
241    for (MachineFunction::iterator I = MF.begin(), E = MF.end();
242         I != E; ++I)
243      MadeChange |= ProcessBlock(*I);
244
245    // If this iteration over the code changed anything, keep iterating.
246    if (!MadeChange) break;
247    EverMadeChange = true;
248  }
249  return EverMadeChange;
250}
251
252bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
253  // Can't sink anything out of a block that has less than two successors.
254  if (MBB.succ_size() <= 1 || MBB.empty()) return false;
255
256  // Don't bother sinking code out of unreachable blocks. In addition to being
257  // unprofitable, it can also lead to infinite looping, because in an
258  // unreachable loop there may be nowhere to stop.
259  if (!DT->isReachableFromEntry(&MBB)) return false;
260
261  bool MadeChange = false;
262
263  // Walk the basic block bottom-up.  Remember if we saw a store.
264  MachineBasicBlock::iterator I = MBB.end();
265  --I;
266  bool ProcessedBegin, SawStore = false;
267  do {
268    MachineInstr *MI = I;  // The instruction to sink.
269
270    // Predecrement I (if it's not begin) so that it isn't invalidated by
271    // sinking.
272    ProcessedBegin = I == MBB.begin();
273    if (!ProcessedBegin)
274      --I;
275
276    if (MI->isDebugValue())
277      continue;
278
279    bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
280    if (Joined) {
281      MadeChange = true;
282      continue;
283    }
284
285    if (SinkInstruction(MI, SawStore))
286      ++NumSunk, MadeChange = true;
287
288    // If we just processed the first instruction in the block, we're done.
289  } while (!ProcessedBegin);
290
291  return MadeChange;
292}
293
294bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
295                                                 MachineBasicBlock *From,
296                                                 MachineBasicBlock *To) {
297  // FIXME: Need much better heuristics.
298
299  // If the pass has already considered breaking this edge (during this pass
300  // through the function), then let's go ahead and break it. This means
301  // sinking multiple "cheap" instructions into the same block.
302  if (!CEBCandidates.insert(std::make_pair(From, To)))
303    return true;
304
305  if (!MI->isCopy() && !MI->isAsCheapAsAMove())
306    return true;
307
308  // MI is cheap, we probably don't want to break the critical edge for it.
309  // However, if this would allow some definitions of its source operands
310  // to be sunk then it's probably worth it.
311  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
312    const MachineOperand &MO = MI->getOperand(i);
313    if (!MO.isReg()) continue;
314    unsigned Reg = MO.getReg();
315    if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg))
316      continue;
317    if (MRI->hasOneNonDBGUse(Reg))
318      return true;
319  }
320
321  return false;
322}
323
324MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
325                                                     MachineBasicBlock *FromBB,
326                                                     MachineBasicBlock *ToBB,
327                                                     bool BreakPHIEdge) {
328  if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
329    return 0;
330
331  // Avoid breaking back edge. From == To means backedge for single BB loop.
332  if (!SplitEdges || FromBB == ToBB)
333    return 0;
334
335  // Check for backedges of more "complex" loops.
336  if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
337      LI->isLoopHeader(ToBB))
338    return 0;
339
340  // It's not always legal to break critical edges and sink the computation
341  // to the edge.
342  //
343  // BB#1:
344  // v1024
345  // Beq BB#3
346  // <fallthrough>
347  // BB#2:
348  // ... no uses of v1024
349  // <fallthrough>
350  // BB#3:
351  // ...
352  //       = v1024
353  //
354  // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
355  //
356  // BB#1:
357  // ...
358  // Bne BB#2
359  // BB#4:
360  // v1024 =
361  // B BB#3
362  // BB#2:
363  // ... no uses of v1024
364  // <fallthrough>
365  // BB#3:
366  // ...
367  //       = v1024
368  //
369  // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
370  // flow. We need to ensure the new basic block where the computation is
371  // sunk to dominates all the uses.
372  // It's only legal to break critical edge and sink the computation to the
373  // new block if all the predecessors of "To", except for "From", are
374  // not dominated by "From". Given SSA property, this means these
375  // predecessors are dominated by "To".
376  //
377  // There is no need to do this check if all the uses are PHI nodes. PHI
378  // sources are only defined on the specific predecessor edges.
379  if (!BreakPHIEdge) {
380    for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
381           E = ToBB->pred_end(); PI != E; ++PI) {
382      if (*PI == FromBB)
383        continue;
384      if (!DT->dominates(ToBB, *PI))
385        return 0;
386    }
387  }
388
389  return FromBB->SplitCriticalEdge(ToBB, this);
390}
391
392static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
393  return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
394}
395
396/// collectDebgValues - Scan instructions following MI and collect any
397/// matching DBG_VALUEs.
398static void collectDebugValues(MachineInstr *MI,
399                               SmallVector<MachineInstr *, 2> & DbgValues) {
400  DbgValues.clear();
401  if (!MI->getOperand(0).isReg())
402    return;
403
404  MachineBasicBlock::iterator DI = MI; ++DI;
405  for (MachineBasicBlock::iterator DE = MI->getParent()->end();
406       DI != DE; ++DI) {
407    if (!DI->isDebugValue())
408      return;
409    if (DI->getOperand(0).isReg() &&
410        DI->getOperand(0).getReg() == MI->getOperand(0).getReg())
411      DbgValues.push_back(DI);
412  }
413}
414
415/// isPostDominatedBy - Return true if A is post dominated by B.
416static bool isPostDominatedBy(MachineBasicBlock *A, MachineBasicBlock *B) {
417
418  // FIXME - Use real post dominator.
419  if (A->succ_size() != 2)
420    return false;
421  MachineBasicBlock::succ_iterator I = A->succ_begin();
422  if (B == *I)
423    ++I;
424  MachineBasicBlock *OtherSuccBlock = *I;
425  if (OtherSuccBlock->succ_size() != 1 ||
426      *(OtherSuccBlock->succ_begin()) != B)
427    return false;
428
429  return true;
430}
431
432/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
433bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
434                                          MachineBasicBlock *MBB,
435                                          MachineBasicBlock *SuccToSinkTo) {
436  assert (MI && "Invalid MachineInstr!");
437  assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
438
439  if (MBB == SuccToSinkTo)
440    return false;
441
442  // It is profitable if SuccToSinkTo does not post dominate current block.
443  if (!isPostDominatedBy(MBB, SuccToSinkTo))
444      return true;
445
446  // Check if only use in post dominated block is PHI instruction.
447  bool NonPHIUse = false;
448  for (MachineRegisterInfo::use_nodbg_iterator
449         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
450       I != E; ++I) {
451    MachineInstr *UseInst = &*I;
452    MachineBasicBlock *UseBlock = UseInst->getParent();
453    if (UseBlock == SuccToSinkTo && !UseInst->isPHI())
454      NonPHIUse = true;
455  }
456  if (!NonPHIUse)
457    return true;
458
459  // If SuccToSinkTo post dominates then also it may be profitable if MI
460  // can further profitably sinked into another block in next round.
461  bool BreakPHIEdge = false;
462  // FIXME - If finding successor is compile time expensive then catch results.
463  if (MachineBasicBlock *MBB2 = FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge))
464    return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2);
465
466  // If SuccToSinkTo is final destination and it is a post dominator of current
467  // block then it is not profitable to sink MI into SuccToSinkTo block.
468  return false;
469}
470
471/// FindSuccToSinkTo - Find a successor to sink this instruction to.
472MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
473                                   MachineBasicBlock *MBB,
474                                   bool &BreakPHIEdge) {
475
476  assert (MI && "Invalid MachineInstr!");
477  assert (MBB && "Invalid MachineBasicBlock!");
478
479  // Loop over all the operands of the specified instruction.  If there is
480  // anything we can't handle, bail out.
481
482  // SuccToSinkTo - This is the successor to sink this instruction to, once we
483  // decide.
484  MachineBasicBlock *SuccToSinkTo = 0;
485  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
486    const MachineOperand &MO = MI->getOperand(i);
487    if (!MO.isReg()) continue;  // Ignore non-register operands.
488
489    unsigned Reg = MO.getReg();
490    if (Reg == 0) continue;
491
492    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
493      if (MO.isUse()) {
494        // If the physreg has no defs anywhere, it's just an ambient register
495        // and we can freely move its uses. Alternatively, if it's allocatable,
496        // it could get allocated to something with a def during allocation.
497        if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
498          return NULL;
499      } else if (!MO.isDead()) {
500        // A def that isn't dead. We can't move it.
501        return NULL;
502      }
503    } else {
504      // Virtual register uses are always safe to sink.
505      if (MO.isUse()) continue;
506
507      // If it's not safe to move defs of the register class, then abort.
508      if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
509        return NULL;
510
511      // FIXME: This picks a successor to sink into based on having one
512      // successor that dominates all the uses.  However, there are cases where
513      // sinking can happen but where the sink point isn't a successor.  For
514      // example:
515      //
516      //   x = computation
517      //   if () {} else {}
518      //   use x
519      //
520      // the instruction could be sunk over the whole diamond for the
521      // if/then/else (or loop, etc), allowing it to be sunk into other blocks
522      // after that.
523
524      // Virtual register defs can only be sunk if all their uses are in blocks
525      // dominated by one of the successors.
526      if (SuccToSinkTo) {
527        // If a previous operand picked a block to sink to, then this operand
528        // must be sinkable to the same block.
529        bool LocalUse = false;
530        if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
531                                     BreakPHIEdge, LocalUse))
532          return NULL;
533
534        continue;
535      }
536
537      // Otherwise, we should look at all the successors and decide which one
538      // we should sink to.
539      // We give successors with smaller loop depth higher priority.
540      SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(), MBB->succ_end());
541      std::stable_sort(Succs.begin(), Succs.end(), SuccessorSorter(LI));
542      for (SmallVector<MachineBasicBlock*, 4>::iterator SI = Succs.begin(),
543           E = Succs.end(); SI != E; ++SI) {
544        MachineBasicBlock *SuccBlock = *SI;
545        bool LocalUse = false;
546        if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
547                                    BreakPHIEdge, LocalUse)) {
548          SuccToSinkTo = SuccBlock;
549          break;
550        }
551        if (LocalUse)
552          // Def is used locally, it's never safe to move this def.
553          return NULL;
554      }
555
556      // If we couldn't find a block to sink to, ignore this instruction.
557      if (SuccToSinkTo == 0)
558        return NULL;
559      else if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
560        return NULL;
561    }
562  }
563
564  // It is not possible to sink an instruction into its own block.  This can
565  // happen with loops.
566  if (MBB == SuccToSinkTo)
567    return NULL;
568
569  // It's not safe to sink instructions to EH landing pad. Control flow into
570  // landing pad is implicitly defined.
571  if (SuccToSinkTo && SuccToSinkTo->isLandingPad())
572    return NULL;
573
574  return SuccToSinkTo;
575}
576
577/// SinkInstruction - Determine whether it is safe to sink the specified machine
578/// instruction out of its current block into a successor.
579bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
580  // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
581  // be close to the source to make it easier to coalesce.
582  if (AvoidsSinking(MI, MRI))
583    return false;
584
585  // Check if it's safe to move the instruction.
586  if (!MI->isSafeToMove(TII, AA, SawStore))
587    return false;
588
589  // FIXME: This should include support for sinking instructions within the
590  // block they are currently in to shorten the live ranges.  We often get
591  // instructions sunk into the top of a large block, but it would be better to
592  // also sink them down before their first use in the block.  This xform has to
593  // be careful not to *increase* register pressure though, e.g. sinking
594  // "x = y + z" down if it kills y and z would increase the live ranges of y
595  // and z and only shrink the live range of x.
596
597  bool BreakPHIEdge = false;
598  MachineBasicBlock *ParentBlock = MI->getParent();
599  MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge);
600
601  // If there are no outputs, it must have side-effects.
602  if (SuccToSinkTo == 0)
603    return false;
604
605
606  // If the instruction to move defines a dead physical register which is live
607  // when leaving the basic block, don't move it because it could turn into a
608  // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
609  for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
610    const MachineOperand &MO = MI->getOperand(I);
611    if (!MO.isReg()) continue;
612    unsigned Reg = MO.getReg();
613    if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
614    if (SuccToSinkTo->isLiveIn(Reg))
615      return false;
616  }
617
618  DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
619
620  // If the block has multiple predecessors, this would introduce computation on
621  // a path that it doesn't already exist.  We could split the critical edge,
622  // but for now we just punt.
623  if (SuccToSinkTo->pred_size() > 1) {
624    // We cannot sink a load across a critical edge - there may be stores in
625    // other code paths.
626    bool TryBreak = false;
627    bool store = true;
628    if (!MI->isSafeToMove(TII, AA, store)) {
629      DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
630      TryBreak = true;
631    }
632
633    // We don't want to sink across a critical edge if we don't dominate the
634    // successor. We could be introducing calculations to new code paths.
635    if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
636      DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
637      TryBreak = true;
638    }
639
640    // Don't sink instructions into a loop.
641    if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
642      DEBUG(dbgs() << " *** NOTE: Loop header found\n");
643      TryBreak = true;
644    }
645
646    // Otherwise we are OK with sinking along a critical edge.
647    if (!TryBreak)
648      DEBUG(dbgs() << "Sinking along critical edge.\n");
649    else {
650      MachineBasicBlock *NewSucc =
651        SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
652      if (!NewSucc) {
653        DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
654                        "break critical edge\n");
655        return false;
656      } else {
657        DEBUG(dbgs() << " *** Splitting critical edge:"
658              " BB#" << ParentBlock->getNumber()
659              << " -- BB#" << NewSucc->getNumber()
660              << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
661        SuccToSinkTo = NewSucc;
662        ++NumSplit;
663        BreakPHIEdge = false;
664      }
665    }
666  }
667
668  if (BreakPHIEdge) {
669    // BreakPHIEdge is true if all the uses are in the successor MBB being
670    // sunken into and they are all PHI nodes. In this case, machine-sink must
671    // break the critical edge first.
672    MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock,
673                                                   SuccToSinkTo, BreakPHIEdge);
674    if (!NewSucc) {
675      DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
676            "break critical edge\n");
677      return false;
678    }
679
680    DEBUG(dbgs() << " *** Splitting critical edge:"
681          " BB#" << ParentBlock->getNumber()
682          << " -- BB#" << NewSucc->getNumber()
683          << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
684    SuccToSinkTo = NewSucc;
685    ++NumSplit;
686  }
687
688  // Determine where to insert into. Skip phi nodes.
689  MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
690  while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
691    ++InsertPos;
692
693  // collect matching debug values.
694  SmallVector<MachineInstr *, 2> DbgValuesToSink;
695  collectDebugValues(MI, DbgValuesToSink);
696
697  // Move the instruction.
698  SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
699                       ++MachineBasicBlock::iterator(MI));
700
701  // Move debug values.
702  for (SmallVector<MachineInstr *, 2>::iterator DBI = DbgValuesToSink.begin(),
703         DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) {
704    MachineInstr *DbgMI = *DBI;
705    SuccToSinkTo->splice(InsertPos, ParentBlock,  DbgMI,
706                         ++MachineBasicBlock::iterator(DbgMI));
707  }
708
709  // Conservatively, clear any kill flags, since it's possible that they are no
710  // longer correct.
711  MI->clearKillInfo();
712
713  return true;
714}
715