LiveIntervalAnalysis.cpp revision 8a61da8a689ee95874c833af4c7aa965fab5c0a9
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53STATISTIC(numIntervals , "Number of original intervals");
54STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits    , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59                "Live Interval Analysis", false, false)
60INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68                "Live Interval Analysis", false, false)
69
70void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71  AU.setPreservesCFG();
72  AU.addRequired<AliasAnalysis>();
73  AU.addPreserved<AliasAnalysis>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreserved<LiveVariables>();
76  AU.addRequired<MachineLoopInfo>();
77  AU.addPreserved<MachineLoopInfo>();
78  AU.addPreservedID(MachineDominatorsID);
79
80  if (!StrongPHIElim) {
81    AU.addPreservedID(PHIEliminationID);
82    AU.addRequiredID(PHIEliminationID);
83  }
84
85  AU.addRequiredID(TwoAddressInstructionPassID);
86  AU.addPreserved<ProcessImplicitDefs>();
87  AU.addRequired<ProcessImplicitDefs>();
88  AU.addPreserved<SlotIndexes>();
89  AU.addRequiredTransitive<SlotIndexes>();
90  MachineFunctionPass::getAnalysisUsage(AU);
91}
92
93void LiveIntervals::releaseMemory() {
94  // Free the live intervals themselves.
95  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96       E = r2iMap_.end(); I != E; ++I)
97    delete I->second;
98
99  r2iMap_.clear();
100
101  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102  VNInfoAllocator.Reset();
103  while (!CloneMIs.empty()) {
104    MachineInstr *MI = CloneMIs.back();
105    CloneMIs.pop_back();
106    mf_->DeleteMachineInstr(MI);
107  }
108}
109
110/// runOnMachineFunction - Register allocate the whole function
111///
112bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113  mf_ = &fn;
114  mri_ = &mf_->getRegInfo();
115  tm_ = &fn.getTarget();
116  tri_ = tm_->getRegisterInfo();
117  tii_ = tm_->getInstrInfo();
118  aa_ = &getAnalysis<AliasAnalysis>();
119  lv_ = &getAnalysis<LiveVariables>();
120  indexes_ = &getAnalysis<SlotIndexes>();
121  allocatableRegs_ = tri_->getAllocatableSet(fn);
122
123  computeIntervals();
124
125  numIntervals += getNumIntervals();
126
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133  OS << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second->print(OS, tri_);
136    OS << "\n";
137  }
138
139  printInstrs(OS);
140}
141
142void LiveIntervals::printInstrs(raw_ostream &OS) const {
143  OS << "********** MACHINEINSTRS **********\n";
144  mf_->print(OS, indexes_);
145}
146
147void LiveIntervals::dumpInstrs() const {
148  printInstrs(dbgs());
149}
150
151bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152                                         VirtRegMap &vrm, unsigned reg) {
153  // We don't handle fancy stuff crossing basic block boundaries
154  if (li.ranges.size() != 1)
155    return true;
156  const LiveRange &range = li.ranges.front();
157  SlotIndex idx = range.start.getBaseIndex();
158  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
159
160  // Skip deleted instructions
161  MachineInstr *firstMI = getInstructionFromIndex(idx);
162  while (!firstMI && idx != end) {
163    idx = idx.getNextIndex();
164    firstMI = getInstructionFromIndex(idx);
165  }
166  if (!firstMI)
167    return false;
168
169  // Find last instruction in range
170  SlotIndex lastIdx = end.getPrevIndex();
171  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172  while (!lastMI && lastIdx != idx) {
173    lastIdx = lastIdx.getPrevIndex();
174    lastMI = getInstructionFromIndex(lastIdx);
175  }
176  if (!lastMI)
177    return false;
178
179  // Range cannot cross basic block boundaries or terminators
180  MachineBasicBlock *MBB = firstMI->getParent();
181  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
182    return true;
183
184  MachineBasicBlock::const_iterator E = lastMI;
185  ++E;
186  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187    const MachineInstr &MI = *I;
188
189    // Allow copies to and from li.reg
190    if (MI.isCopy())
191      if (MI.getOperand(0).getReg() == li.reg ||
192          MI.getOperand(1).getReg() == li.reg)
193        continue;
194
195    // Check for operands using reg
196    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
197      const MachineOperand& mop = MI.getOperand(i);
198      if (!mop.isReg())
199        continue;
200      unsigned PhysReg = mop.getReg();
201      if (PhysReg == 0 || PhysReg == li.reg)
202        continue;
203      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204        if (!vrm.hasPhys(PhysReg))
205          continue;
206        PhysReg = vrm.getPhys(PhysReg);
207      }
208      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
209        return true;
210    }
211  }
212
213  // No conflicts found.
214  return false;
215}
216
217bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219  for (LiveInterval::Ranges::const_iterator
220         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221    for (SlotIndex index = I->start.getBaseIndex(),
222           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
223           index != end;
224           index = index.getNextIndex()) {
225      MachineInstr *MI = getInstructionFromIndex(index);
226      if (!MI)
227        continue;               // skip deleted instructions
228
229      if (JoinedCopies.count(MI))
230        continue;
231      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232        MachineOperand& MO = MI->getOperand(i);
233        if (!MO.isReg())
234          continue;
235        unsigned PhysReg = MO.getReg();
236        if (PhysReg == 0 || PhysReg == Reg ||
237            TargetRegisterInfo::isVirtualRegister(PhysReg))
238          continue;
239        if (tri_->regsOverlap(Reg, PhysReg))
240          return true;
241      }
242    }
243  }
244
245  return false;
246}
247
248static
249bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250  unsigned Reg = MI.getOperand(MOIdx).getReg();
251  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252    const MachineOperand &MO = MI.getOperand(i);
253    if (!MO.isReg())
254      continue;
255    if (MO.getReg() == Reg && MO.isDef()) {
256      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257             MI.getOperand(MOIdx).getSubReg() &&
258             (MO.getSubReg() || MO.isImplicit()));
259      return true;
260    }
261  }
262  return false;
263}
264
265/// isPartialRedef - Return true if the specified def at the specific index is
266/// partially re-defining the specified live interval. A common case of this is
267/// a definition of the sub-register.
268bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269                                   LiveInterval &interval) {
270  if (!MO.getSubReg() || MO.isEarlyClobber())
271    return false;
272
273  SlotIndex RedefIndex = MIIdx.getDefIndex();
274  const LiveRange *OldLR =
275    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
277  if (DefMI != 0) {
278    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
279  }
280  return false;
281}
282
283void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284                                             MachineBasicBlock::iterator mi,
285                                             SlotIndex MIIdx,
286                                             MachineOperand& MO,
287                                             unsigned MOIdx,
288                                             LiveInterval &interval) {
289  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
290
291  // Virtual registers may be defined multiple times (due to phi
292  // elimination and 2-addr elimination).  Much of what we do only has to be
293  // done once for the vreg.  We use an empty interval to detect the first
294  // time we see a vreg.
295  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296  if (interval.empty()) {
297    // Get the Idx of the defining instructions.
298    SlotIndex defIndex = MIIdx.getDefIndex();
299    // Earlyclobbers move back one, so that they overlap the live range
300    // of inputs.
301    if (MO.isEarlyClobber())
302      defIndex = MIIdx.getUseIndex();
303
304    // Make sure the first definition is not a partial redefinition. Add an
305    // <imp-def> of the full register.
306    if (MO.getSubReg())
307      mi->addRegisterDefined(interval.reg);
308
309    MachineInstr *CopyMI = NULL;
310    if (mi->isCopyLike()) {
311      CopyMI = mi;
312    }
313
314    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315    assert(ValNo->id == 0 && "First value in interval is not 0?");
316
317    // Loop over all of the blocks that the vreg is defined in.  There are
318    // two cases we have to handle here.  The most common case is a vreg
319    // whose lifetime is contained within a basic block.  In this case there
320    // will be a single kill, in MBB, which comes after the definition.
321    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322      // FIXME: what about dead vars?
323      SlotIndex killIdx;
324      if (vi.Kills[0] != mi)
325        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
326      else
327        killIdx = defIndex.getStoreIndex();
328
329      // If the kill happens after the definition, we have an intra-block
330      // live range.
331      if (killIdx > defIndex) {
332        assert(vi.AliveBlocks.empty() &&
333               "Shouldn't be alive across any blocks!");
334        LiveRange LR(defIndex, killIdx, ValNo);
335        interval.addRange(LR);
336        DEBUG(dbgs() << " +" << LR << "\n");
337        return;
338      }
339    }
340
341    // The other case we handle is when a virtual register lives to the end
342    // of the defining block, potentially live across some blocks, then is
343    // live into some number of blocks, but gets killed.  Start by adding a
344    // range that goes from this definition to the end of the defining block.
345    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346    DEBUG(dbgs() << " +" << NewLR);
347    interval.addRange(NewLR);
348
349    bool PHIJoin = lv_->isPHIJoin(interval.reg);
350
351    if (PHIJoin) {
352      // A phi join register is killed at the end of the MBB and revived as a new
353      // valno in the killing blocks.
354      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355      DEBUG(dbgs() << " phi-join");
356      ValNo->setHasPHIKill(true);
357    } else {
358      // Iterate over all of the blocks that the variable is completely
359      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
360      // live interval.
361      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362               E = vi.AliveBlocks.end(); I != E; ++I) {
363        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365        interval.addRange(LR);
366        DEBUG(dbgs() << " +" << LR);
367      }
368    }
369
370    // Finally, this virtual register is live from the start of any killing
371    // block to the 'use' slot of the killing instruction.
372    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373      MachineInstr *Kill = vi.Kills[i];
374      SlotIndex Start = getMBBStartIdx(Kill->getParent());
375      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
376
377      // Create interval with one of a NEW value number.  Note that this value
378      // number isn't actually defined by an instruction, weird huh? :)
379      if (PHIJoin) {
380        assert(getInstructionFromIndex(Start) == 0 &&
381               "PHI def index points at actual instruction.");
382        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383        ValNo->setIsPHIDef(true);
384      }
385      LiveRange LR(Start, killIdx, ValNo);
386      interval.addRange(LR);
387      DEBUG(dbgs() << " +" << LR);
388    }
389
390  } else {
391    if (MultipleDefsBySameMI(*mi, MOIdx))
392      // Multiple defs of the same virtual register by the same instruction.
393      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394      // This is likely due to elimination of REG_SEQUENCE instructions. Return
395      // here since there is nothing to do.
396      return;
397
398    // If this is the second time we see a virtual register definition, it
399    // must be due to phi elimination or two addr elimination.  If this is
400    // the result of two address elimination, then the vreg is one of the
401    // def-and-use register operand.
402
403    // It may also be partial redef like this:
404    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408      // If this is a two-address definition, then we have already processed
409      // the live range.  The only problem is that we didn't realize there
410      // are actually two values in the live interval.  Because of this we
411      // need to take the LiveRegion that defines this register and split it
412      // into two values.
413      SlotIndex RedefIndex = MIIdx.getDefIndex();
414      if (MO.isEarlyClobber())
415        RedefIndex = MIIdx.getUseIndex();
416
417      const LiveRange *OldLR =
418        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419      VNInfo *OldValNo = OldLR->valno;
420      SlotIndex DefIndex = OldValNo->def.getDefIndex();
421
422      // Delete the previous value, which should be short and continuous,
423      // because the 2-addr copy must be in the same MBB as the redef.
424      interval.removeRange(DefIndex, RedefIndex);
425
426      // The new value number (#1) is defined by the instruction we claimed
427      // defined value #0.
428      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
429
430      // Value#0 is now defined by the 2-addr instruction.
431      OldValNo->def  = RedefIndex;
432      OldValNo->setCopy(0);
433
434      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435      if (PartReDef && mi->isCopyLike())
436        OldValNo->setCopy(&*mi);
437
438      // Add the new live interval which replaces the range for the input copy.
439      LiveRange LR(DefIndex, RedefIndex, ValNo);
440      DEBUG(dbgs() << " replace range with " << LR);
441      interval.addRange(LR);
442
443      // If this redefinition is dead, we need to add a dummy unit live
444      // range covering the def slot.
445      if (MO.isDead())
446        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
447                                    OldValNo));
448
449      DEBUG({
450          dbgs() << " RESULT: ";
451          interval.print(dbgs(), tri_);
452        });
453    } else if (lv_->isPHIJoin(interval.reg)) {
454      // In the case of PHI elimination, each variable definition is only
455      // live until the end of the block.  We've already taken care of the
456      // rest of the live range.
457
458      SlotIndex defIndex = MIIdx.getDefIndex();
459      if (MO.isEarlyClobber())
460        defIndex = MIIdx.getUseIndex();
461
462      VNInfo *ValNo;
463      MachineInstr *CopyMI = NULL;
464      if (mi->isCopyLike())
465        CopyMI = mi;
466      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
467
468      SlotIndex killIndex = getMBBEndIdx(mbb);
469      LiveRange LR(defIndex, killIndex, ValNo);
470      interval.addRange(LR);
471      ValNo->setHasPHIKill(true);
472      DEBUG(dbgs() << " phi-join +" << LR);
473    } else {
474      llvm_unreachable("Multiply defined register");
475    }
476  }
477
478  DEBUG(dbgs() << '\n');
479}
480
481void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482                                              MachineBasicBlock::iterator mi,
483                                              SlotIndex MIIdx,
484                                              MachineOperand& MO,
485                                              LiveInterval &interval,
486                                              MachineInstr *CopyMI) {
487  // A physical register cannot be live across basic block, so its
488  // lifetime must end somewhere in its defining basic block.
489  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
490
491  SlotIndex baseIndex = MIIdx;
492  SlotIndex start = baseIndex.getDefIndex();
493  // Earlyclobbers move back one.
494  if (MO.isEarlyClobber())
495    start = MIIdx.getUseIndex();
496  SlotIndex end = start;
497
498  // If it is not used after definition, it is considered dead at
499  // the instruction defining it. Hence its interval is:
500  // [defSlot(def), defSlot(def)+1)
501  // For earlyclobbers, the defSlot was pushed back one; the extra
502  // advance below compensates.
503  if (MO.isDead()) {
504    DEBUG(dbgs() << " dead");
505    end = start.getStoreIndex();
506    goto exit;
507  }
508
509  // If it is not dead on definition, it must be killed by a
510  // subsequent instruction. Hence its interval is:
511  // [defSlot(def), useSlot(kill)+1)
512  baseIndex = baseIndex.getNextIndex();
513  while (++mi != MBB->end()) {
514
515    if (mi->isDebugValue())
516      continue;
517    if (getInstructionFromIndex(baseIndex) == 0)
518      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
519
520    if (mi->killsRegister(interval.reg, tri_)) {
521      DEBUG(dbgs() << " killed");
522      end = baseIndex.getDefIndex();
523      goto exit;
524    } else {
525      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
526      if (DefIdx != -1) {
527        if (mi->isRegTiedToUseOperand(DefIdx)) {
528          // Two-address instruction.
529          end = baseIndex.getDefIndex();
530        } else {
531          // Another instruction redefines the register before it is ever read.
532          // Then the register is essentially dead at the instruction that
533          // defines it. Hence its interval is:
534          // [defSlot(def), defSlot(def)+1)
535          DEBUG(dbgs() << " dead");
536          end = start.getStoreIndex();
537        }
538        goto exit;
539      }
540    }
541
542    baseIndex = baseIndex.getNextIndex();
543  }
544
545  // The only case we should have a dead physreg here without a killing or
546  // instruction where we know it's dead is if it is live-in to the function
547  // and never used. Another possible case is the implicit use of the
548  // physical register has been deleted by two-address pass.
549  end = start.getStoreIndex();
550
551exit:
552  assert(start < end && "did not find end of interval?");
553
554  // Already exists? Extend old live interval.
555  VNInfo *ValNo = interval.getVNInfoAt(start);
556  bool Extend = ValNo != 0;
557  if (!Extend)
558    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559  if (Extend && MO.isEarlyClobber())
560    ValNo->setHasRedefByEC(true);
561  LiveRange LR(start, end, ValNo);
562  interval.addRange(LR);
563  DEBUG(dbgs() << " +" << LR << '\n');
564}
565
566void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567                                      MachineBasicBlock::iterator MI,
568                                      SlotIndex MIIdx,
569                                      MachineOperand& MO,
570                                      unsigned MOIdx) {
571  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573                             getOrCreateInterval(MO.getReg()));
574  else if (allocatableRegs_[MO.getReg()]) {
575    MachineInstr *CopyMI = NULL;
576    if (MI->isCopyLike())
577      CopyMI = MI;
578    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579                              getOrCreateInterval(MO.getReg()), CopyMI);
580    // Def of a register also defines its sub-registers.
581    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582      // If MI also modifies the sub-register explicitly, avoid processing it
583      // more than once. Do not pass in TRI here so it checks for exact match.
584      if (!MI->definesRegister(*AS))
585        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586                                  getOrCreateInterval(*AS), 0);
587  }
588}
589
590void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
591                                         SlotIndex MIIdx,
592                                         LiveInterval &interval, bool isAlias) {
593  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
594
595  // Look for kills, if it reaches a def before it's killed, then it shouldn't
596  // be considered a livein.
597  MachineBasicBlock::iterator mi = MBB->begin();
598  MachineBasicBlock::iterator E = MBB->end();
599  // Skip over DBG_VALUE at the start of the MBB.
600  if (mi != E && mi->isDebugValue()) {
601    while (++mi != E && mi->isDebugValue())
602      ;
603    if (mi == E)
604      // MBB is empty except for DBG_VALUE's.
605      return;
606  }
607
608  SlotIndex baseIndex = MIIdx;
609  SlotIndex start = baseIndex;
610  if (getInstructionFromIndex(baseIndex) == 0)
611    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
612
613  SlotIndex end = baseIndex;
614  bool SeenDefUse = false;
615
616  while (mi != E) {
617    if (mi->killsRegister(interval.reg, tri_)) {
618      DEBUG(dbgs() << " killed");
619      end = baseIndex.getDefIndex();
620      SeenDefUse = true;
621      break;
622    } else if (mi->definesRegister(interval.reg, tri_)) {
623      // Another instruction redefines the register before it is ever read.
624      // Then the register is essentially dead at the instruction that defines
625      // it. Hence its interval is:
626      // [defSlot(def), defSlot(def)+1)
627      DEBUG(dbgs() << " dead");
628      end = start.getStoreIndex();
629      SeenDefUse = true;
630      break;
631    }
632
633    while (++mi != E && mi->isDebugValue())
634      // Skip over DBG_VALUE.
635      ;
636    if (mi != E)
637      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
638  }
639
640  // Live-in register might not be used at all.
641  if (!SeenDefUse) {
642    if (isAlias) {
643      DEBUG(dbgs() << " dead");
644      end = MIIdx.getStoreIndex();
645    } else {
646      DEBUG(dbgs() << " live through");
647      end = baseIndex;
648    }
649  }
650
651  SlotIndex defIdx = getMBBStartIdx(MBB);
652  assert(getInstructionFromIndex(defIdx) == 0 &&
653         "PHI def index points at actual instruction.");
654  VNInfo *vni =
655    interval.getNextValue(defIdx, 0, VNInfoAllocator);
656  vni->setIsPHIDef(true);
657  LiveRange LR(start, end, vni);
658
659  interval.addRange(LR);
660  DEBUG(dbgs() << " +" << LR << '\n');
661}
662
663/// computeIntervals - computes the live intervals for virtual
664/// registers. for some ordering of the machine instructions [1,N] a
665/// live interval is an interval [i, j) where 1 <= i <= j < N for
666/// which a variable is live
667void LiveIntervals::computeIntervals() {
668  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669               << "********** Function: "
670               << ((Value*)mf_->getFunction())->getName() << '\n');
671
672  SmallVector<unsigned, 8> UndefUses;
673  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
674       MBBI != E; ++MBBI) {
675    MachineBasicBlock *MBB = MBBI;
676    if (MBB->empty())
677      continue;
678
679    // Track the index of the current machine instr.
680    SlotIndex MIIndex = getMBBStartIdx(MBB);
681    DEBUG(dbgs() << "BB#" << MBB->getNumber()
682          << ":\t\t# derived from " << MBB->getName() << "\n");
683
684    // Create intervals for live-ins to this BB first.
685    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686           LE = MBB->livein_end(); LI != LE; ++LI) {
687      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688      // Multiple live-ins can alias the same register.
689      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690        if (!hasInterval(*AS))
691          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692                               true);
693    }
694
695    // Skip over empty initial indices.
696    if (getInstructionFromIndex(MIIndex) == 0)
697      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
698
699    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
700         MI != miEnd; ++MI) {
701      DEBUG(dbgs() << MIIndex << "\t" << *MI);
702      if (MI->isDebugValue())
703        continue;
704
705      // Handle defs.
706      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707        MachineOperand &MO = MI->getOperand(i);
708        if (!MO.isReg() || !MO.getReg())
709          continue;
710
711        // handle register defs - build intervals
712        if (MO.isDef())
713          handleRegisterDef(MBB, MI, MIIndex, MO, i);
714        else if (MO.isUndef())
715          UndefUses.push_back(MO.getReg());
716      }
717
718      // Move to the next instr slot.
719      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
720    }
721  }
722
723  // Create empty intervals for registers defined by implicit_def's (except
724  // for those implicit_def that define values which are liveout of their
725  // blocks.
726  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727    unsigned UndefReg = UndefUses[i];
728    (void)getOrCreateInterval(UndefReg);
729  }
730}
731
732LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734  return new LiveInterval(reg, Weight);
735}
736
737/// dupInterval - Duplicate a live interval. The caller is responsible for
738/// managing the allocated memory.
739LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740  LiveInterval *NewLI = createInterval(li->reg);
741  NewLI->Copy(*li, mri_, getVNInfoAllocator());
742  return NewLI;
743}
744
745/// shrinkToUses - After removing some uses of a register, shrink its live
746/// range to just the remaining uses. This method does not compute reaching
747/// defs for new uses, and it doesn't remove dead defs.
748void LiveIntervals::shrinkToUses(LiveInterval *li) {
749  DEBUG(dbgs() << "Shrink: " << *li << '\n');
750  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
751         && "Can't only shrink physical registers");
752  // Find all the values used, including PHI kills.
753  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
754
755  // Visit all instructions reading li->reg.
756  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
757       MachineInstr *UseMI = I.skipInstruction();) {
758    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
759      continue;
760    SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
761    VNInfo *VNI = li->getVNInfoAt(Idx);
762    assert(VNI && "Live interval not live into reading instruction");
763    if (VNI->def == Idx) {
764      // Special case: An early-clobber tied operand reads and writes the
765      // register one slot early.
766      Idx = Idx.getPrevSlot();
767      VNI = li->getVNInfoAt(Idx);
768      assert(VNI && "Early-clobber tied value not available");
769    }
770    WorkList.push_back(std::make_pair(Idx, VNI));
771  }
772
773  // Create a new live interval with only minimal live segments per def.
774  LiveInterval NewLI(li->reg, 0);
775  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
776       I != E; ++I) {
777    VNInfo *VNI = *I;
778    if (VNI->isUnused())
779      continue;
780    NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
781  }
782
783  // Extend intervals to reach all uses in WorkList.
784  while (!WorkList.empty()) {
785    SlotIndex Idx = WorkList.back().first;
786    VNInfo *VNI = WorkList.back().second;
787    WorkList.pop_back();
788
789    // Extend the live range for VNI to be live at Idx.
790    LiveInterval::iterator I = NewLI.find(Idx);
791
792    // Already got it?
793    if (I != NewLI.end() && I->start <= Idx) {
794      assert(I->valno == VNI && "Unexpected existing value number");
795      continue;
796    }
797
798    // Is there already a live range in the block containing Idx?
799    const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
800    SlotIndex BlockStart = getMBBStartIdx(MBB);
801    DEBUG(dbgs() << "Shrink: Use val#" << VNI->id << " at " << Idx
802                 << " in BB#" << MBB->getNumber() << '@' << BlockStart);
803    if (I != NewLI.begin() && (--I)->end > BlockStart) {
804      assert(I->valno == VNI && "Wrong reaching def");
805      DEBUG(dbgs() << " extend [" << I->start << ';' << I->end << ")\n");
806      // Is this the first use of a PHIDef in its defining block?
807      if (VNI->isPHIDef() && I->end == VNI->def.getNextSlot()) {
808        // The PHI is live, make sure the predecessors are live-out.
809        for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
810             PE = MBB->pred_end(); PI != PE; ++PI) {
811          SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
812          VNInfo *PVNI = li->getVNInfoAt(Stop);
813          // A predecessor is not required to have a live-out value for a PHI.
814          if (PVNI) {
815            assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
816            WorkList.push_back(std::make_pair(Stop, PVNI));
817          }
818        }
819      }
820
821      // Extend the live range in the block to include Idx.
822      NewLI.addRange(LiveRange(I->end, Idx.getNextSlot(), VNI));
823      continue;
824    }
825
826    // VNI is live-in to MBB.
827    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
828    NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
829
830    // Make sure VNI is live-out from the predecessors.
831    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
832         PE = MBB->pred_end(); PI != PE; ++PI) {
833      SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
834      assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
835      WorkList.push_back(std::make_pair(Stop, VNI));
836    }
837  }
838
839  // Handle dead values.
840  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
841       I != E; ++I) {
842    VNInfo *VNI = *I;
843    if (VNI->isUnused())
844      continue;
845    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
846    assert(LII != NewLI.end() && "Missing live range for PHI");
847    if (LII->end != VNI->def.getNextSlot())
848      continue;
849    if (!VNI->isPHIDef()) {
850      // This is a dead PHI. Remove it.
851      VNI->setIsUnused(true);
852      NewLI.removeRange(*LII);
853    } else {
854      // This is a dead def. Make sure the instruction knows.
855      MachineInstr *MI = getInstructionFromIndex(VNI->def);
856      assert(MI && "No instruction defining live value");
857      MI->addRegisterDead(li->reg, tri_);
858    }
859  }
860
861  // Move the trimmed ranges back.
862  li->ranges.swap(NewLI.ranges);
863  DEBUG(dbgs() << "Shrink: " << *li << '\n');
864}
865
866
867//===----------------------------------------------------------------------===//
868// Register allocator hooks.
869//
870
871MachineBasicBlock::iterator
872LiveIntervals::getLastSplitPoint(const LiveInterval &li,
873                                 MachineBasicBlock *mbb) {
874  const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
875
876  // If li is not live into a landing pad, we can insert spill code before the
877  // first terminator.
878  if (!lpad || !isLiveInToMBB(li, lpad))
879    return mbb->getFirstTerminator();
880
881  // When there is a landing pad, spill code must go before the call instruction
882  // that can throw.
883  MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
884  while (I != B) {
885    --I;
886    if (I->getDesc().isCall())
887      return I;
888  }
889  // The block contains no calls that can throw, so use the first terminator.
890  return mbb->getFirstTerminator();
891}
892
893void LiveIntervals::addKillFlags() {
894  for (iterator I = begin(), E = end(); I != E; ++I) {
895    unsigned Reg = I->first;
896    if (TargetRegisterInfo::isPhysicalRegister(Reg))
897      continue;
898    if (mri_->reg_nodbg_empty(Reg))
899      continue;
900    LiveInterval *LI = I->second;
901
902    // Every instruction that kills Reg corresponds to a live range end point.
903    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
904         ++RI) {
905      // A LOAD index indicates an MBB edge.
906      if (RI->end.isLoad())
907        continue;
908      MachineInstr *MI = getInstructionFromIndex(RI->end);
909      if (!MI)
910        continue;
911      MI->addRegisterKilled(Reg, NULL);
912    }
913  }
914}
915
916/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
917/// allow one) virtual register operand, then its uses are implicitly using
918/// the register. Returns the virtual register.
919unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
920                                            MachineInstr *MI) const {
921  unsigned RegOp = 0;
922  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
923    MachineOperand &MO = MI->getOperand(i);
924    if (!MO.isReg() || !MO.isUse())
925      continue;
926    unsigned Reg = MO.getReg();
927    if (Reg == 0 || Reg == li.reg)
928      continue;
929
930    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
931        !allocatableRegs_[Reg])
932      continue;
933    // FIXME: For now, only remat MI with at most one register operand.
934    assert(!RegOp &&
935           "Can't rematerialize instruction with multiple register operand!");
936    RegOp = MO.getReg();
937#ifndef NDEBUG
938    break;
939#endif
940  }
941  return RegOp;
942}
943
944/// isValNoAvailableAt - Return true if the val# of the specified interval
945/// which reaches the given instruction also reaches the specified use index.
946bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
947                                       SlotIndex UseIdx) const {
948  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
949  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
950}
951
952/// isReMaterializable - Returns true if the definition MI of the specified
953/// val# of the specified interval is re-materializable.
954bool
955LiveIntervals::isReMaterializable(const LiveInterval &li,
956                                  const VNInfo *ValNo, MachineInstr *MI,
957                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
958                                  bool &isLoad) {
959  if (DisableReMat)
960    return false;
961
962  if (!tii_->isTriviallyReMaterializable(MI, aa_))
963    return false;
964
965  // Target-specific code can mark an instruction as being rematerializable
966  // if it has one virtual reg use, though it had better be something like
967  // a PIC base register which is likely to be live everywhere.
968  unsigned ImpUse = getReMatImplicitUse(li, MI);
969  if (ImpUse) {
970    const LiveInterval &ImpLi = getInterval(ImpUse);
971    for (MachineRegisterInfo::use_nodbg_iterator
972           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
973         ri != re; ++ri) {
974      MachineInstr *UseMI = &*ri;
975      SlotIndex UseIdx = getInstructionIndex(UseMI);
976      if (li.getVNInfoAt(UseIdx) != ValNo)
977        continue;
978      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
979        return false;
980    }
981
982    // If a register operand of the re-materialized instruction is going to
983    // be spilled next, then it's not legal to re-materialize this instruction.
984    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
985      if (ImpUse == SpillIs[i]->reg)
986        return false;
987  }
988  return true;
989}
990
991/// isReMaterializable - Returns true if the definition MI of the specified
992/// val# of the specified interval is re-materializable.
993bool LiveIntervals::isReMaterializable(const LiveInterval &li,
994                                       const VNInfo *ValNo, MachineInstr *MI) {
995  SmallVector<LiveInterval*, 4> Dummy1;
996  bool Dummy2;
997  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
998}
999
1000/// isReMaterializable - Returns true if every definition of MI of every
1001/// val# of the specified interval is re-materializable.
1002bool
1003LiveIntervals::isReMaterializable(const LiveInterval &li,
1004                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
1005                                  bool &isLoad) {
1006  isLoad = false;
1007  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1008       i != e; ++i) {
1009    const VNInfo *VNI = *i;
1010    if (VNI->isUnused())
1011      continue; // Dead val#.
1012    // Is the def for the val# rematerializable?
1013    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1014    if (!ReMatDefMI)
1015      return false;
1016    bool DefIsLoad = false;
1017    if (!ReMatDefMI ||
1018        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1019      return false;
1020    isLoad |= DefIsLoad;
1021  }
1022  return true;
1023}
1024
1025/// FilterFoldedOps - Filter out two-address use operands. Return
1026/// true if it finds any issue with the operands that ought to prevent
1027/// folding.
1028static bool FilterFoldedOps(MachineInstr *MI,
1029                            SmallVector<unsigned, 2> &Ops,
1030                            unsigned &MRInfo,
1031                            SmallVector<unsigned, 2> &FoldOps) {
1032  MRInfo = 0;
1033  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1034    unsigned OpIdx = Ops[i];
1035    MachineOperand &MO = MI->getOperand(OpIdx);
1036    // FIXME: fold subreg use.
1037    if (MO.getSubReg())
1038      return true;
1039    if (MO.isDef())
1040      MRInfo |= (unsigned)VirtRegMap::isMod;
1041    else {
1042      // Filter out two-address use operand(s).
1043      if (MI->isRegTiedToDefOperand(OpIdx)) {
1044        MRInfo = VirtRegMap::isModRef;
1045        continue;
1046      }
1047      MRInfo |= (unsigned)VirtRegMap::isRef;
1048    }
1049    FoldOps.push_back(OpIdx);
1050  }
1051  return false;
1052}
1053
1054
1055/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1056/// slot / to reg or any rematerialized load into ith operand of specified
1057/// MI. If it is successul, MI is updated with the newly created MI and
1058/// returns true.
1059bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1060                                         VirtRegMap &vrm, MachineInstr *DefMI,
1061                                         SlotIndex InstrIdx,
1062                                         SmallVector<unsigned, 2> &Ops,
1063                                         bool isSS, int Slot, unsigned Reg) {
1064  // If it is an implicit def instruction, just delete it.
1065  if (MI->isImplicitDef()) {
1066    RemoveMachineInstrFromMaps(MI);
1067    vrm.RemoveMachineInstrFromMaps(MI);
1068    MI->eraseFromParent();
1069    ++numFolds;
1070    return true;
1071  }
1072
1073  // Filter the list of operand indexes that are to be folded. Abort if
1074  // any operand will prevent folding.
1075  unsigned MRInfo = 0;
1076  SmallVector<unsigned, 2> FoldOps;
1077  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1078    return false;
1079
1080  // The only time it's safe to fold into a two address instruction is when
1081  // it's folding reload and spill from / into a spill stack slot.
1082  if (DefMI && (MRInfo & VirtRegMap::isMod))
1083    return false;
1084
1085  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1086                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1087  if (fmi) {
1088    // Remember this instruction uses the spill slot.
1089    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1090
1091    // Attempt to fold the memory reference into the instruction. If
1092    // we can do this, we don't need to insert spill code.
1093    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1094      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1095    vrm.transferSpillPts(MI, fmi);
1096    vrm.transferRestorePts(MI, fmi);
1097    vrm.transferEmergencySpills(MI, fmi);
1098    ReplaceMachineInstrInMaps(MI, fmi);
1099    MI->eraseFromParent();
1100    MI = fmi;
1101    ++numFolds;
1102    return true;
1103  }
1104  return false;
1105}
1106
1107/// canFoldMemoryOperand - Returns true if the specified load / store
1108/// folding is possible.
1109bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1110                                         SmallVector<unsigned, 2> &Ops,
1111                                         bool ReMat) const {
1112  // Filter the list of operand indexes that are to be folded. Abort if
1113  // any operand will prevent folding.
1114  unsigned MRInfo = 0;
1115  SmallVector<unsigned, 2> FoldOps;
1116  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1117    return false;
1118
1119  // It's only legal to remat for a use, not a def.
1120  if (ReMat && (MRInfo & VirtRegMap::isMod))
1121    return false;
1122
1123  return tii_->canFoldMemoryOperand(MI, FoldOps);
1124}
1125
1126bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1127  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1128
1129  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1130
1131  if (mbb == 0)
1132    return false;
1133
1134  for (++itr; itr != li.ranges.end(); ++itr) {
1135    MachineBasicBlock *mbb2 =
1136      indexes_->getMBBCoveringRange(itr->start, itr->end);
1137
1138    if (mbb2 != mbb)
1139      return false;
1140  }
1141
1142  return true;
1143}
1144
1145/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1146/// interval on to-be re-materialized operands of MI) with new register.
1147void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1148                                       MachineInstr *MI, unsigned NewVReg,
1149                                       VirtRegMap &vrm) {
1150  // There is an implicit use. That means one of the other operand is
1151  // being remat'ed and the remat'ed instruction has li.reg as an
1152  // use operand. Make sure we rewrite that as well.
1153  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1154    MachineOperand &MO = MI->getOperand(i);
1155    if (!MO.isReg())
1156      continue;
1157    unsigned Reg = MO.getReg();
1158    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1159      continue;
1160    if (!vrm.isReMaterialized(Reg))
1161      continue;
1162    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1163    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1164    if (UseMO)
1165      UseMO->setReg(NewVReg);
1166  }
1167}
1168
1169/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1170/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1171bool LiveIntervals::
1172rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1173                 bool TrySplit, SlotIndex index, SlotIndex end,
1174                 MachineInstr *MI,
1175                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1176                 unsigned Slot, int LdSlot,
1177                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1178                 VirtRegMap &vrm,
1179                 const TargetRegisterClass* rc,
1180                 SmallVector<int, 4> &ReMatIds,
1181                 const MachineLoopInfo *loopInfo,
1182                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1183                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1184                 std::vector<LiveInterval*> &NewLIs) {
1185  bool CanFold = false;
1186 RestartInstruction:
1187  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1188    MachineOperand& mop = MI->getOperand(i);
1189    if (!mop.isReg())
1190      continue;
1191    unsigned Reg = mop.getReg();
1192    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1193      continue;
1194    if (Reg != li.reg)
1195      continue;
1196
1197    bool TryFold = !DefIsReMat;
1198    bool FoldSS = true; // Default behavior unless it's a remat.
1199    int FoldSlot = Slot;
1200    if (DefIsReMat) {
1201      // If this is the rematerializable definition MI itself and
1202      // all of its uses are rematerialized, simply delete it.
1203      if (MI == ReMatOrigDefMI && CanDelete) {
1204        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1205                     << *MI << '\n');
1206        RemoveMachineInstrFromMaps(MI);
1207        vrm.RemoveMachineInstrFromMaps(MI);
1208        MI->eraseFromParent();
1209        break;
1210      }
1211
1212      // If def for this use can't be rematerialized, then try folding.
1213      // If def is rematerializable and it's a load, also try folding.
1214      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1215      if (isLoad) {
1216        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1217        FoldSS = isLoadSS;
1218        FoldSlot = LdSlot;
1219      }
1220    }
1221
1222    // Scan all of the operands of this instruction rewriting operands
1223    // to use NewVReg instead of li.reg as appropriate.  We do this for
1224    // two reasons:
1225    //
1226    //   1. If the instr reads the same spilled vreg multiple times, we
1227    //      want to reuse the NewVReg.
1228    //   2. If the instr is a two-addr instruction, we are required to
1229    //      keep the src/dst regs pinned.
1230    //
1231    // Keep track of whether we replace a use and/or def so that we can
1232    // create the spill interval with the appropriate range.
1233    SmallVector<unsigned, 2> Ops;
1234    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1235
1236    // Create a new virtual register for the spill interval.
1237    // Create the new register now so we can map the fold instruction
1238    // to the new register so when it is unfolded we get the correct
1239    // answer.
1240    bool CreatedNewVReg = false;
1241    if (NewVReg == 0) {
1242      NewVReg = mri_->createVirtualRegister(rc);
1243      vrm.grow();
1244      CreatedNewVReg = true;
1245
1246      // The new virtual register should get the same allocation hints as the
1247      // old one.
1248      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1249      if (Hint.first || Hint.second)
1250        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1251    }
1252
1253    if (!TryFold)
1254      CanFold = false;
1255    else {
1256      // Do not fold load / store here if we are splitting. We'll find an
1257      // optimal point to insert a load / store later.
1258      if (!TrySplit) {
1259        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1260                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1261          // Folding the load/store can completely change the instruction in
1262          // unpredictable ways, rescan it from the beginning.
1263
1264          if (FoldSS) {
1265            // We need to give the new vreg the same stack slot as the
1266            // spilled interval.
1267            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1268          }
1269
1270          HasUse = false;
1271          HasDef = false;
1272          CanFold = false;
1273          if (isNotInMIMap(MI))
1274            break;
1275          goto RestartInstruction;
1276        }
1277      } else {
1278        // We'll try to fold it later if it's profitable.
1279        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1280      }
1281    }
1282
1283    mop.setReg(NewVReg);
1284    if (mop.isImplicit())
1285      rewriteImplicitOps(li, MI, NewVReg, vrm);
1286
1287    // Reuse NewVReg for other reads.
1288    bool HasEarlyClobber = false;
1289    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1290      MachineOperand &mopj = MI->getOperand(Ops[j]);
1291      mopj.setReg(NewVReg);
1292      if (mopj.isImplicit())
1293        rewriteImplicitOps(li, MI, NewVReg, vrm);
1294      if (mopj.isEarlyClobber())
1295        HasEarlyClobber = true;
1296    }
1297
1298    if (CreatedNewVReg) {
1299      if (DefIsReMat) {
1300        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1301        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1302          // Each valnum may have its own remat id.
1303          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1304        } else {
1305          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1306        }
1307        if (!CanDelete || (HasUse && HasDef)) {
1308          // If this is a two-addr instruction then its use operands are
1309          // rematerializable but its def is not. It should be assigned a
1310          // stack slot.
1311          vrm.assignVirt2StackSlot(NewVReg, Slot);
1312        }
1313      } else {
1314        vrm.assignVirt2StackSlot(NewVReg, Slot);
1315      }
1316    } else if (HasUse && HasDef &&
1317               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1318      // If this interval hasn't been assigned a stack slot (because earlier
1319      // def is a deleted remat def), do it now.
1320      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1321      vrm.assignVirt2StackSlot(NewVReg, Slot);
1322    }
1323
1324    // Re-matting an instruction with virtual register use. Add the
1325    // register as an implicit use on the use MI.
1326    if (DefIsReMat && ImpUse)
1327      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1328
1329    // Create a new register interval for this spill / remat.
1330    LiveInterval &nI = getOrCreateInterval(NewVReg);
1331    if (CreatedNewVReg) {
1332      NewLIs.push_back(&nI);
1333      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1334      if (TrySplit)
1335        vrm.setIsSplitFromReg(NewVReg, li.reg);
1336    }
1337
1338    if (HasUse) {
1339      if (CreatedNewVReg) {
1340        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1341                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1342        DEBUG(dbgs() << " +" << LR);
1343        nI.addRange(LR);
1344      } else {
1345        // Extend the split live interval to this def / use.
1346        SlotIndex End = index.getDefIndex();
1347        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1348                     nI.getValNumInfo(nI.getNumValNums()-1));
1349        DEBUG(dbgs() << " +" << LR);
1350        nI.addRange(LR);
1351      }
1352    }
1353    if (HasDef) {
1354      // An early clobber starts at the use slot, except for an early clobber
1355      // tied to a use operand (yes, that is a thing).
1356      LiveRange LR(HasEarlyClobber && !HasUse ?
1357                   index.getUseIndex() : index.getDefIndex(),
1358                   index.getStoreIndex(),
1359                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1360      DEBUG(dbgs() << " +" << LR);
1361      nI.addRange(LR);
1362    }
1363
1364    DEBUG({
1365        dbgs() << "\t\t\t\tAdded new interval: ";
1366        nI.print(dbgs(), tri_);
1367        dbgs() << '\n';
1368      });
1369  }
1370  return CanFold;
1371}
1372bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1373                                   const VNInfo *VNI,
1374                                   MachineBasicBlock *MBB,
1375                                   SlotIndex Idx) const {
1376  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1377}
1378
1379/// RewriteInfo - Keep track of machine instrs that will be rewritten
1380/// during spilling.
1381namespace {
1382  struct RewriteInfo {
1383    SlotIndex Index;
1384    MachineInstr *MI;
1385    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1386  };
1387
1388  struct RewriteInfoCompare {
1389    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1390      return LHS.Index < RHS.Index;
1391    }
1392  };
1393}
1394
1395void LiveIntervals::
1396rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1397                    LiveInterval::Ranges::const_iterator &I,
1398                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1399                    unsigned Slot, int LdSlot,
1400                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1401                    VirtRegMap &vrm,
1402                    const TargetRegisterClass* rc,
1403                    SmallVector<int, 4> &ReMatIds,
1404                    const MachineLoopInfo *loopInfo,
1405                    BitVector &SpillMBBs,
1406                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1407                    BitVector &RestoreMBBs,
1408                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1409                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1410                    std::vector<LiveInterval*> &NewLIs) {
1411  bool AllCanFold = true;
1412  unsigned NewVReg = 0;
1413  SlotIndex start = I->start.getBaseIndex();
1414  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1415
1416  // First collect all the def / use in this live range that will be rewritten.
1417  // Make sure they are sorted according to instruction index.
1418  std::vector<RewriteInfo> RewriteMIs;
1419  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1420         re = mri_->reg_end(); ri != re; ) {
1421    MachineInstr *MI = &*ri;
1422    MachineOperand &O = ri.getOperand();
1423    ++ri;
1424    if (MI->isDebugValue()) {
1425      // Modify DBG_VALUE now that the value is in a spill slot.
1426      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1427        uint64_t Offset = MI->getOperand(1).getImm();
1428        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1429        DebugLoc DL = MI->getDebugLoc();
1430        int FI = isLoadSS ? LdSlot : (int)Slot;
1431        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1432                                                           Offset, MDPtr, DL)) {
1433          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1434          ReplaceMachineInstrInMaps(MI, NewDV);
1435          MachineBasicBlock *MBB = MI->getParent();
1436          MBB->insert(MBB->erase(MI), NewDV);
1437          continue;
1438        }
1439      }
1440
1441      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1442      RemoveMachineInstrFromMaps(MI);
1443      vrm.RemoveMachineInstrFromMaps(MI);
1444      MI->eraseFromParent();
1445      continue;
1446    }
1447    assert(!(O.isImplicit() && O.isUse()) &&
1448           "Spilling register that's used as implicit use?");
1449    SlotIndex index = getInstructionIndex(MI);
1450    if (index < start || index >= end)
1451      continue;
1452
1453    if (O.isUndef())
1454      // Must be defined by an implicit def. It should not be spilled. Note,
1455      // this is for correctness reason. e.g.
1456      // 8   %reg1024<def> = IMPLICIT_DEF
1457      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1458      // The live range [12, 14) are not part of the r1024 live interval since
1459      // it's defined by an implicit def. It will not conflicts with live
1460      // interval of r1025. Now suppose both registers are spilled, you can
1461      // easily see a situation where both registers are reloaded before
1462      // the INSERT_SUBREG and both target registers that would overlap.
1463      continue;
1464    RewriteMIs.push_back(RewriteInfo(index, MI));
1465  }
1466  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1467
1468  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1469  // Now rewrite the defs and uses.
1470  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1471    RewriteInfo &rwi = RewriteMIs[i];
1472    ++i;
1473    SlotIndex index = rwi.Index;
1474    MachineInstr *MI = rwi.MI;
1475    // If MI def and/or use the same register multiple times, then there
1476    // are multiple entries.
1477    while (i != e && RewriteMIs[i].MI == MI) {
1478      assert(RewriteMIs[i].Index == index);
1479      ++i;
1480    }
1481    MachineBasicBlock *MBB = MI->getParent();
1482
1483    if (ImpUse && MI != ReMatDefMI) {
1484      // Re-matting an instruction with virtual register use. Prevent interval
1485      // from being spilled.
1486      getInterval(ImpUse).markNotSpillable();
1487    }
1488
1489    unsigned MBBId = MBB->getNumber();
1490    unsigned ThisVReg = 0;
1491    if (TrySplit) {
1492      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1493      if (NVI != MBBVRegsMap.end()) {
1494        ThisVReg = NVI->second;
1495        // One common case:
1496        // x = use
1497        // ...
1498        // ...
1499        // def = ...
1500        //     = use
1501        // It's better to start a new interval to avoid artifically
1502        // extend the new interval.
1503        if (MI->readsWritesVirtualRegister(li.reg) ==
1504            std::make_pair(false,true)) {
1505          MBBVRegsMap.erase(MBB->getNumber());
1506          ThisVReg = 0;
1507        }
1508      }
1509    }
1510
1511    bool IsNew = ThisVReg == 0;
1512    if (IsNew) {
1513      // This ends the previous live interval. If all of its def / use
1514      // can be folded, give it a low spill weight.
1515      if (NewVReg && TrySplit && AllCanFold) {
1516        LiveInterval &nI = getOrCreateInterval(NewVReg);
1517        nI.weight /= 10.0F;
1518      }
1519      AllCanFold = true;
1520    }
1521    NewVReg = ThisVReg;
1522
1523    bool HasDef = false;
1524    bool HasUse = false;
1525    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1526                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1527                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1528                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1529                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1530    if (!HasDef && !HasUse)
1531      continue;
1532
1533    AllCanFold &= CanFold;
1534
1535    // Update weight of spill interval.
1536    LiveInterval &nI = getOrCreateInterval(NewVReg);
1537    if (!TrySplit) {
1538      // The spill weight is now infinity as it cannot be spilled again.
1539      nI.markNotSpillable();
1540      continue;
1541    }
1542
1543    // Keep track of the last def and first use in each MBB.
1544    if (HasDef) {
1545      if (MI != ReMatOrigDefMI || !CanDelete) {
1546        bool HasKill = false;
1547        if (!HasUse)
1548          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1549        else {
1550          // If this is a two-address code, then this index starts a new VNInfo.
1551          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1552          if (VNI)
1553            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1554        }
1555        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1556          SpillIdxes.find(MBBId);
1557        if (!HasKill) {
1558          if (SII == SpillIdxes.end()) {
1559            std::vector<SRInfo> S;
1560            S.push_back(SRInfo(index, NewVReg, true));
1561            SpillIdxes.insert(std::make_pair(MBBId, S));
1562          } else if (SII->second.back().vreg != NewVReg) {
1563            SII->second.push_back(SRInfo(index, NewVReg, true));
1564          } else if (index > SII->second.back().index) {
1565            // If there is an earlier def and this is a two-address
1566            // instruction, then it's not possible to fold the store (which
1567            // would also fold the load).
1568            SRInfo &Info = SII->second.back();
1569            Info.index = index;
1570            Info.canFold = !HasUse;
1571          }
1572          SpillMBBs.set(MBBId);
1573        } else if (SII != SpillIdxes.end() &&
1574                   SII->second.back().vreg == NewVReg &&
1575                   index > SII->second.back().index) {
1576          // There is an earlier def that's not killed (must be two-address).
1577          // The spill is no longer needed.
1578          SII->second.pop_back();
1579          if (SII->second.empty()) {
1580            SpillIdxes.erase(MBBId);
1581            SpillMBBs.reset(MBBId);
1582          }
1583        }
1584      }
1585    }
1586
1587    if (HasUse) {
1588      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1589        SpillIdxes.find(MBBId);
1590      if (SII != SpillIdxes.end() &&
1591          SII->second.back().vreg == NewVReg &&
1592          index > SII->second.back().index)
1593        // Use(s) following the last def, it's not safe to fold the spill.
1594        SII->second.back().canFold = false;
1595      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1596        RestoreIdxes.find(MBBId);
1597      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1598        // If we are splitting live intervals, only fold if it's the first
1599        // use and there isn't another use later in the MBB.
1600        RII->second.back().canFold = false;
1601      else if (IsNew) {
1602        // Only need a reload if there isn't an earlier def / use.
1603        if (RII == RestoreIdxes.end()) {
1604          std::vector<SRInfo> Infos;
1605          Infos.push_back(SRInfo(index, NewVReg, true));
1606          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1607        } else {
1608          RII->second.push_back(SRInfo(index, NewVReg, true));
1609        }
1610        RestoreMBBs.set(MBBId);
1611      }
1612    }
1613
1614    // Update spill weight.
1615    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1616    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1617  }
1618
1619  if (NewVReg && TrySplit && AllCanFold) {
1620    // If all of its def / use can be folded, give it a low spill weight.
1621    LiveInterval &nI = getOrCreateInterval(NewVReg);
1622    nI.weight /= 10.0F;
1623  }
1624}
1625
1626bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1627                        unsigned vr, BitVector &RestoreMBBs,
1628                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1629  if (!RestoreMBBs[Id])
1630    return false;
1631  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1632  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1633    if (Restores[i].index == index &&
1634        Restores[i].vreg == vr &&
1635        Restores[i].canFold)
1636      return true;
1637  return false;
1638}
1639
1640void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1641                        unsigned vr, BitVector &RestoreMBBs,
1642                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1643  if (!RestoreMBBs[Id])
1644    return;
1645  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1646  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1647    if (Restores[i].index == index && Restores[i].vreg)
1648      Restores[i].index = SlotIndex();
1649}
1650
1651/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1652/// spilled and create empty intervals for their uses.
1653void
1654LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1655                                    const TargetRegisterClass* rc,
1656                                    std::vector<LiveInterval*> &NewLIs) {
1657  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1658         re = mri_->reg_end(); ri != re; ) {
1659    MachineOperand &O = ri.getOperand();
1660    MachineInstr *MI = &*ri;
1661    ++ri;
1662    if (MI->isDebugValue()) {
1663      // Remove debug info for now.
1664      O.setReg(0U);
1665      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1666      continue;
1667    }
1668    if (O.isDef()) {
1669      assert(MI->isImplicitDef() &&
1670             "Register def was not rewritten?");
1671      RemoveMachineInstrFromMaps(MI);
1672      vrm.RemoveMachineInstrFromMaps(MI);
1673      MI->eraseFromParent();
1674    } else {
1675      // This must be an use of an implicit_def so it's not part of the live
1676      // interval. Create a new empty live interval for it.
1677      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1678      unsigned NewVReg = mri_->createVirtualRegister(rc);
1679      vrm.grow();
1680      vrm.setIsImplicitlyDefined(NewVReg);
1681      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1682      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1683        MachineOperand &MO = MI->getOperand(i);
1684        if (MO.isReg() && MO.getReg() == li.reg) {
1685          MO.setReg(NewVReg);
1686          MO.setIsUndef();
1687        }
1688      }
1689    }
1690  }
1691}
1692
1693float
1694LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1695  // Limit the loop depth ridiculousness.
1696  if (loopDepth > 200)
1697    loopDepth = 200;
1698
1699  // The loop depth is used to roughly estimate the number of times the
1700  // instruction is executed. Something like 10^d is simple, but will quickly
1701  // overflow a float. This expression behaves like 10^d for small d, but is
1702  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1703  // headroom before overflow.
1704  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1705
1706  return (isDef + isUse) * lc;
1707}
1708
1709void
1710LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1711  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1712    normalizeSpillWeight(*NewLIs[i]);
1713}
1714
1715std::vector<LiveInterval*> LiveIntervals::
1716addIntervalsForSpills(const LiveInterval &li,
1717                      const SmallVectorImpl<LiveInterval*> &SpillIs,
1718                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1719  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1720
1721  DEBUG({
1722      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1723      li.print(dbgs(), tri_);
1724      dbgs() << '\n';
1725    });
1726
1727  // Each bit specify whether a spill is required in the MBB.
1728  BitVector SpillMBBs(mf_->getNumBlockIDs());
1729  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1730  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1731  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1732  DenseMap<unsigned,unsigned> MBBVRegsMap;
1733  std::vector<LiveInterval*> NewLIs;
1734  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1735
1736  unsigned NumValNums = li.getNumValNums();
1737  SmallVector<MachineInstr*, 4> ReMatDefs;
1738  ReMatDefs.resize(NumValNums, NULL);
1739  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1740  ReMatOrigDefs.resize(NumValNums, NULL);
1741  SmallVector<int, 4> ReMatIds;
1742  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1743  BitVector ReMatDelete(NumValNums);
1744  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1745
1746  // Spilling a split live interval. It cannot be split any further. Also,
1747  // it's also guaranteed to be a single val# / range interval.
1748  if (vrm.getPreSplitReg(li.reg)) {
1749    vrm.setIsSplitFromReg(li.reg, 0);
1750    // Unset the split kill marker on the last use.
1751    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1752    if (KillIdx != SlotIndex()) {
1753      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1754      assert(KillMI && "Last use disappeared?");
1755      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1756      assert(KillOp != -1 && "Last use disappeared?");
1757      KillMI->getOperand(KillOp).setIsKill(false);
1758    }
1759    vrm.removeKillPoint(li.reg);
1760    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1761    Slot = vrm.getStackSlot(li.reg);
1762    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1763    MachineInstr *ReMatDefMI = DefIsReMat ?
1764      vrm.getReMaterializedMI(li.reg) : NULL;
1765    int LdSlot = 0;
1766    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1767    bool isLoad = isLoadSS ||
1768      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1769    bool IsFirstRange = true;
1770    for (LiveInterval::Ranges::const_iterator
1771           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1772      // If this is a split live interval with multiple ranges, it means there
1773      // are two-address instructions that re-defined the value. Only the
1774      // first def can be rematerialized!
1775      if (IsFirstRange) {
1776        // Note ReMatOrigDefMI has already been deleted.
1777        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1778                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1779                             false, vrm, rc, ReMatIds, loopInfo,
1780                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1781                             MBBVRegsMap, NewLIs);
1782      } else {
1783        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1784                             Slot, 0, false, false, false,
1785                             false, vrm, rc, ReMatIds, loopInfo,
1786                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1787                             MBBVRegsMap, NewLIs);
1788      }
1789      IsFirstRange = false;
1790    }
1791
1792    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1793    normalizeSpillWeights(NewLIs);
1794    return NewLIs;
1795  }
1796
1797  bool TrySplit = !intervalIsInOneMBB(li);
1798  if (TrySplit)
1799    ++numSplits;
1800  bool NeedStackSlot = false;
1801  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1802       i != e; ++i) {
1803    const VNInfo *VNI = *i;
1804    unsigned VN = VNI->id;
1805    if (VNI->isUnused())
1806      continue; // Dead val#.
1807    // Is the def for the val# rematerializable?
1808    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1809    bool dummy;
1810    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1811      // Remember how to remat the def of this val#.
1812      ReMatOrigDefs[VN] = ReMatDefMI;
1813      // Original def may be modified so we have to make a copy here.
1814      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1815      CloneMIs.push_back(Clone);
1816      ReMatDefs[VN] = Clone;
1817
1818      bool CanDelete = true;
1819      if (VNI->hasPHIKill()) {
1820        // A kill is a phi node, not all of its uses can be rematerialized.
1821        // It must not be deleted.
1822        CanDelete = false;
1823        // Need a stack slot if there is any live range where uses cannot be
1824        // rematerialized.
1825        NeedStackSlot = true;
1826      }
1827      if (CanDelete)
1828        ReMatDelete.set(VN);
1829    } else {
1830      // Need a stack slot if there is any live range where uses cannot be
1831      // rematerialized.
1832      NeedStackSlot = true;
1833    }
1834  }
1835
1836  // One stack slot per live interval.
1837  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1838    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1839      Slot = vrm.assignVirt2StackSlot(li.reg);
1840
1841    // This case only occurs when the prealloc splitter has already assigned
1842    // a stack slot to this vreg.
1843    else
1844      Slot = vrm.getStackSlot(li.reg);
1845  }
1846
1847  // Create new intervals and rewrite defs and uses.
1848  for (LiveInterval::Ranges::const_iterator
1849         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1850    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1851    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1852    bool DefIsReMat = ReMatDefMI != NULL;
1853    bool CanDelete = ReMatDelete[I->valno->id];
1854    int LdSlot = 0;
1855    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1856    bool isLoad = isLoadSS ||
1857      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1858    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1859                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1860                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1861                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1862                               MBBVRegsMap, NewLIs);
1863  }
1864
1865  // Insert spills / restores if we are splitting.
1866  if (!TrySplit) {
1867    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1868    normalizeSpillWeights(NewLIs);
1869    return NewLIs;
1870  }
1871
1872  SmallPtrSet<LiveInterval*, 4> AddedKill;
1873  SmallVector<unsigned, 2> Ops;
1874  if (NeedStackSlot) {
1875    int Id = SpillMBBs.find_first();
1876    while (Id != -1) {
1877      std::vector<SRInfo> &spills = SpillIdxes[Id];
1878      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1879        SlotIndex index = spills[i].index;
1880        unsigned VReg = spills[i].vreg;
1881        LiveInterval &nI = getOrCreateInterval(VReg);
1882        bool isReMat = vrm.isReMaterialized(VReg);
1883        MachineInstr *MI = getInstructionFromIndex(index);
1884        bool CanFold = false;
1885        bool FoundUse = false;
1886        Ops.clear();
1887        if (spills[i].canFold) {
1888          CanFold = true;
1889          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1890            MachineOperand &MO = MI->getOperand(j);
1891            if (!MO.isReg() || MO.getReg() != VReg)
1892              continue;
1893
1894            Ops.push_back(j);
1895            if (MO.isDef())
1896              continue;
1897            if (isReMat ||
1898                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1899                                                RestoreMBBs, RestoreIdxes))) {
1900              // MI has two-address uses of the same register. If the use
1901              // isn't the first and only use in the BB, then we can't fold
1902              // it. FIXME: Move this to rewriteInstructionsForSpills.
1903              CanFold = false;
1904              break;
1905            }
1906            FoundUse = true;
1907          }
1908        }
1909        // Fold the store into the def if possible.
1910        bool Folded = false;
1911        if (CanFold && !Ops.empty()) {
1912          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1913            Folded = true;
1914            if (FoundUse) {
1915              // Also folded uses, do not issue a load.
1916              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1917              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1918            }
1919            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1920          }
1921        }
1922
1923        // Otherwise tell the spiller to issue a spill.
1924        if (!Folded) {
1925          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1926          bool isKill = LR->end == index.getStoreIndex();
1927          if (!MI->registerDefIsDead(nI.reg))
1928            // No need to spill a dead def.
1929            vrm.addSpillPoint(VReg, isKill, MI);
1930          if (isKill)
1931            AddedKill.insert(&nI);
1932        }
1933      }
1934      Id = SpillMBBs.find_next(Id);
1935    }
1936  }
1937
1938  int Id = RestoreMBBs.find_first();
1939  while (Id != -1) {
1940    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1941    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1942      SlotIndex index = restores[i].index;
1943      if (index == SlotIndex())
1944        continue;
1945      unsigned VReg = restores[i].vreg;
1946      LiveInterval &nI = getOrCreateInterval(VReg);
1947      bool isReMat = vrm.isReMaterialized(VReg);
1948      MachineInstr *MI = getInstructionFromIndex(index);
1949      bool CanFold = false;
1950      Ops.clear();
1951      if (restores[i].canFold) {
1952        CanFold = true;
1953        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1954          MachineOperand &MO = MI->getOperand(j);
1955          if (!MO.isReg() || MO.getReg() != VReg)
1956            continue;
1957
1958          if (MO.isDef()) {
1959            // If this restore were to be folded, it would have been folded
1960            // already.
1961            CanFold = false;
1962            break;
1963          }
1964          Ops.push_back(j);
1965        }
1966      }
1967
1968      // Fold the load into the use if possible.
1969      bool Folded = false;
1970      if (CanFold && !Ops.empty()) {
1971        if (!isReMat)
1972          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1973        else {
1974          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1975          int LdSlot = 0;
1976          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1977          // If the rematerializable def is a load, also try to fold it.
1978          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1979            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1980                                          Ops, isLoadSS, LdSlot, VReg);
1981          if (!Folded) {
1982            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1983            if (ImpUse) {
1984              // Re-matting an instruction with virtual register use. Add the
1985              // register as an implicit use on the use MI and mark the register
1986              // interval as unspillable.
1987              LiveInterval &ImpLi = getInterval(ImpUse);
1988              ImpLi.markNotSpillable();
1989              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1990            }
1991          }
1992        }
1993      }
1994      // If folding is not possible / failed, then tell the spiller to issue a
1995      // load / rematerialization for us.
1996      if (Folded)
1997        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1998      else
1999        vrm.addRestorePoint(VReg, MI);
2000    }
2001    Id = RestoreMBBs.find_next(Id);
2002  }
2003
2004  // Finalize intervals: add kills, finalize spill weights, and filter out
2005  // dead intervals.
2006  std::vector<LiveInterval*> RetNewLIs;
2007  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2008    LiveInterval *LI = NewLIs[i];
2009    if (!LI->empty()) {
2010      if (!AddedKill.count(LI)) {
2011        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2012        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2013        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2014        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2015        assert(UseIdx != -1);
2016        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2017          LastUse->getOperand(UseIdx).setIsKill();
2018          vrm.addKillPoint(LI->reg, LastUseIdx);
2019        }
2020      }
2021      RetNewLIs.push_back(LI);
2022    }
2023  }
2024
2025  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2026  normalizeSpillWeights(RetNewLIs);
2027  return RetNewLIs;
2028}
2029
2030/// hasAllocatableSuperReg - Return true if the specified physical register has
2031/// any super register that's allocatable.
2032bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2033  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2034    if (allocatableRegs_[*AS] && hasInterval(*AS))
2035      return true;
2036  return false;
2037}
2038
2039/// getRepresentativeReg - Find the largest super register of the specified
2040/// physical register.
2041unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2042  // Find the largest super-register that is allocatable.
2043  unsigned BestReg = Reg;
2044  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2045    unsigned SuperReg = *AS;
2046    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2047      BestReg = SuperReg;
2048      break;
2049    }
2050  }
2051  return BestReg;
2052}
2053
2054/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2055/// specified interval that conflicts with the specified physical register.
2056unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2057                                                   unsigned PhysReg) const {
2058  unsigned NumConflicts = 0;
2059  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2060  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2061         E = mri_->reg_end(); I != E; ++I) {
2062    MachineOperand &O = I.getOperand();
2063    MachineInstr *MI = O.getParent();
2064    if (MI->isDebugValue())
2065      continue;
2066    SlotIndex Index = getInstructionIndex(MI);
2067    if (pli.liveAt(Index))
2068      ++NumConflicts;
2069  }
2070  return NumConflicts;
2071}
2072
2073/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2074/// around all defs and uses of the specified interval. Return true if it
2075/// was able to cut its interval.
2076bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2077                                            unsigned PhysReg, VirtRegMap &vrm) {
2078  unsigned SpillReg = getRepresentativeReg(PhysReg);
2079
2080  DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2081               << " represented by " << tri_->getName(SpillReg) << '\n');
2082
2083  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2084    // If there are registers which alias PhysReg, but which are not a
2085    // sub-register of the chosen representative super register. Assert
2086    // since we can't handle it yet.
2087    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2088           tri_->isSuperRegister(*AS, SpillReg));
2089
2090  bool Cut = false;
2091  SmallVector<unsigned, 4> PRegs;
2092  if (hasInterval(SpillReg))
2093    PRegs.push_back(SpillReg);
2094  for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2095    if (hasInterval(*SR))
2096      PRegs.push_back(*SR);
2097
2098  DEBUG({
2099    dbgs() << "Trying to spill:";
2100    for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2101      dbgs() << ' ' << tri_->getName(PRegs[i]);
2102    dbgs() << '\n';
2103  });
2104
2105  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2106  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2107         E = mri_->reg_end(); I != E; ++I) {
2108    MachineOperand &O = I.getOperand();
2109    MachineInstr *MI = O.getParent();
2110    if (MI->isDebugValue() || SeenMIs.count(MI))
2111      continue;
2112    SeenMIs.insert(MI);
2113    SlotIndex Index = getInstructionIndex(MI);
2114    bool LiveReg = false;
2115    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2116      unsigned PReg = PRegs[i];
2117      LiveInterval &pli = getInterval(PReg);
2118      if (!pli.liveAt(Index))
2119        continue;
2120      LiveReg = true;
2121      SlotIndex StartIdx = Index.getLoadIndex();
2122      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2123      if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2124        std::string msg;
2125        raw_string_ostream Msg(msg);
2126        Msg << "Ran out of registers during register allocation!";
2127        if (MI->isInlineAsm()) {
2128          Msg << "\nPlease check your inline asm statement for invalid "
2129              << "constraints:\n";
2130          MI->print(Msg, tm_);
2131        }
2132        report_fatal_error(Msg.str());
2133      }
2134      pli.removeRange(StartIdx, EndIdx);
2135      LiveReg = true;
2136    }
2137    if (!LiveReg)
2138      continue;
2139    DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2140    vrm.addEmergencySpill(SpillReg, MI);
2141    Cut = true;
2142  }
2143  return Cut;
2144}
2145
2146LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2147                                                  MachineInstr* startInst) {
2148  LiveInterval& Interval = getOrCreateInterval(reg);
2149  VNInfo* VN = Interval.getNextValue(
2150    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2151    startInst, getVNInfoAllocator());
2152  VN->setHasPHIKill(true);
2153  LiveRange LR(
2154     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2155     getMBBEndIdx(startInst->getParent()), VN);
2156  Interval.addRange(LR);
2157
2158  return LR;
2159}
2160
2161