LiveIntervalAnalysis.cpp revision c9df025e33ac435adb3b3318d237c36ca7cec659
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53STATISTIC(numIntervals , "Number of original intervals");
54STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits    , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59                "Live Interval Analysis", false, false)
60INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68                "Live Interval Analysis", false, false)
69
70void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71  AU.setPreservesCFG();
72  AU.addRequired<AliasAnalysis>();
73  AU.addPreserved<AliasAnalysis>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreserved<LiveVariables>();
76  AU.addRequired<MachineLoopInfo>();
77  AU.addPreserved<MachineLoopInfo>();
78  AU.addPreservedID(MachineDominatorsID);
79
80  if (!StrongPHIElim) {
81    AU.addPreservedID(PHIEliminationID);
82    AU.addRequiredID(PHIEliminationID);
83  }
84
85  AU.addRequiredID(TwoAddressInstructionPassID);
86  AU.addPreserved<ProcessImplicitDefs>();
87  AU.addRequired<ProcessImplicitDefs>();
88  AU.addPreserved<SlotIndexes>();
89  AU.addRequiredTransitive<SlotIndexes>();
90  MachineFunctionPass::getAnalysisUsage(AU);
91}
92
93void LiveIntervals::releaseMemory() {
94  // Free the live intervals themselves.
95  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96       E = r2iMap_.end(); I != E; ++I)
97    delete I->second;
98
99  r2iMap_.clear();
100
101  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102  VNInfoAllocator.Reset();
103  while (!CloneMIs.empty()) {
104    MachineInstr *MI = CloneMIs.back();
105    CloneMIs.pop_back();
106    mf_->DeleteMachineInstr(MI);
107  }
108}
109
110/// runOnMachineFunction - Register allocate the whole function
111///
112bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113  mf_ = &fn;
114  mri_ = &mf_->getRegInfo();
115  tm_ = &fn.getTarget();
116  tri_ = tm_->getRegisterInfo();
117  tii_ = tm_->getInstrInfo();
118  aa_ = &getAnalysis<AliasAnalysis>();
119  lv_ = &getAnalysis<LiveVariables>();
120  indexes_ = &getAnalysis<SlotIndexes>();
121  allocatableRegs_ = tri_->getAllocatableSet(fn);
122
123  computeIntervals();
124
125  numIntervals += getNumIntervals();
126
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133  OS << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second->print(OS, tri_);
136    OS << "\n";
137  }
138
139  printInstrs(OS);
140}
141
142void LiveIntervals::printInstrs(raw_ostream &OS) const {
143  OS << "********** MACHINEINSTRS **********\n";
144  mf_->print(OS, indexes_);
145}
146
147void LiveIntervals::dumpInstrs() const {
148  printInstrs(dbgs());
149}
150
151bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152                                         VirtRegMap &vrm, unsigned reg) {
153  // We don't handle fancy stuff crossing basic block boundaries
154  if (li.ranges.size() != 1)
155    return true;
156  const LiveRange &range = li.ranges.front();
157  SlotIndex idx = range.start.getBaseIndex();
158  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
159
160  // Skip deleted instructions
161  MachineInstr *firstMI = getInstructionFromIndex(idx);
162  while (!firstMI && idx != end) {
163    idx = idx.getNextIndex();
164    firstMI = getInstructionFromIndex(idx);
165  }
166  if (!firstMI)
167    return false;
168
169  // Find last instruction in range
170  SlotIndex lastIdx = end.getPrevIndex();
171  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172  while (!lastMI && lastIdx != idx) {
173    lastIdx = lastIdx.getPrevIndex();
174    lastMI = getInstructionFromIndex(lastIdx);
175  }
176  if (!lastMI)
177    return false;
178
179  // Range cannot cross basic block boundaries or terminators
180  MachineBasicBlock *MBB = firstMI->getParent();
181  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
182    return true;
183
184  MachineBasicBlock::const_iterator E = lastMI;
185  ++E;
186  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187    const MachineInstr &MI = *I;
188
189    // Allow copies to and from li.reg
190    if (MI.isCopy())
191      if (MI.getOperand(0).getReg() == li.reg ||
192          MI.getOperand(1).getReg() == li.reg)
193        continue;
194
195    // Check for operands using reg
196    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
197      const MachineOperand& mop = MI.getOperand(i);
198      if (!mop.isReg())
199        continue;
200      unsigned PhysReg = mop.getReg();
201      if (PhysReg == 0 || PhysReg == li.reg)
202        continue;
203      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204        if (!vrm.hasPhys(PhysReg))
205          continue;
206        PhysReg = vrm.getPhys(PhysReg);
207      }
208      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
209        return true;
210    }
211  }
212
213  // No conflicts found.
214  return false;
215}
216
217bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219  for (LiveInterval::Ranges::const_iterator
220         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221    for (SlotIndex index = I->start.getBaseIndex(),
222           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
223           index != end;
224           index = index.getNextIndex()) {
225      MachineInstr *MI = getInstructionFromIndex(index);
226      if (!MI)
227        continue;               // skip deleted instructions
228
229      if (JoinedCopies.count(MI))
230        continue;
231      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232        MachineOperand& MO = MI->getOperand(i);
233        if (!MO.isReg())
234          continue;
235        unsigned PhysReg = MO.getReg();
236        if (PhysReg == 0 || PhysReg == Reg ||
237            TargetRegisterInfo::isVirtualRegister(PhysReg))
238          continue;
239        if (tri_->regsOverlap(Reg, PhysReg))
240          return true;
241      }
242    }
243  }
244
245  return false;
246}
247
248static
249bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
250  unsigned Reg = MI.getOperand(MOIdx).getReg();
251  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
252    const MachineOperand &MO = MI.getOperand(i);
253    if (!MO.isReg())
254      continue;
255    if (MO.getReg() == Reg && MO.isDef()) {
256      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
257             MI.getOperand(MOIdx).getSubReg() &&
258             (MO.getSubReg() || MO.isImplicit()));
259      return true;
260    }
261  }
262  return false;
263}
264
265/// isPartialRedef - Return true if the specified def at the specific index is
266/// partially re-defining the specified live interval. A common case of this is
267/// a definition of the sub-register.
268bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
269                                   LiveInterval &interval) {
270  if (!MO.getSubReg() || MO.isEarlyClobber())
271    return false;
272
273  SlotIndex RedefIndex = MIIdx.getDefIndex();
274  const LiveRange *OldLR =
275    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
276  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
277  if (DefMI != 0) {
278    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
279  }
280  return false;
281}
282
283void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
284                                             MachineBasicBlock::iterator mi,
285                                             SlotIndex MIIdx,
286                                             MachineOperand& MO,
287                                             unsigned MOIdx,
288                                             LiveInterval &interval) {
289  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
290
291  // Virtual registers may be defined multiple times (due to phi
292  // elimination and 2-addr elimination).  Much of what we do only has to be
293  // done once for the vreg.  We use an empty interval to detect the first
294  // time we see a vreg.
295  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
296  if (interval.empty()) {
297    // Get the Idx of the defining instructions.
298    SlotIndex defIndex = MIIdx.getDefIndex();
299    // Earlyclobbers move back one, so that they overlap the live range
300    // of inputs.
301    if (MO.isEarlyClobber())
302      defIndex = MIIdx.getUseIndex();
303
304    // Make sure the first definition is not a partial redefinition. Add an
305    // <imp-def> of the full register.
306    if (MO.getSubReg())
307      mi->addRegisterDefined(interval.reg);
308
309    MachineInstr *CopyMI = NULL;
310    if (mi->isCopyLike()) {
311      CopyMI = mi;
312    }
313
314    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
315    assert(ValNo->id == 0 && "First value in interval is not 0?");
316
317    // Loop over all of the blocks that the vreg is defined in.  There are
318    // two cases we have to handle here.  The most common case is a vreg
319    // whose lifetime is contained within a basic block.  In this case there
320    // will be a single kill, in MBB, which comes after the definition.
321    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
322      // FIXME: what about dead vars?
323      SlotIndex killIdx;
324      if (vi.Kills[0] != mi)
325        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
326      else
327        killIdx = defIndex.getStoreIndex();
328
329      // If the kill happens after the definition, we have an intra-block
330      // live range.
331      if (killIdx > defIndex) {
332        assert(vi.AliveBlocks.empty() &&
333               "Shouldn't be alive across any blocks!");
334        LiveRange LR(defIndex, killIdx, ValNo);
335        interval.addRange(LR);
336        DEBUG(dbgs() << " +" << LR << "\n");
337        return;
338      }
339    }
340
341    // The other case we handle is when a virtual register lives to the end
342    // of the defining block, potentially live across some blocks, then is
343    // live into some number of blocks, but gets killed.  Start by adding a
344    // range that goes from this definition to the end of the defining block.
345    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
346    DEBUG(dbgs() << " +" << NewLR);
347    interval.addRange(NewLR);
348
349    bool PHIJoin = lv_->isPHIJoin(interval.reg);
350
351    if (PHIJoin) {
352      // A phi join register is killed at the end of the MBB and revived as a new
353      // valno in the killing blocks.
354      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
355      DEBUG(dbgs() << " phi-join");
356      ValNo->setHasPHIKill(true);
357    } else {
358      // Iterate over all of the blocks that the variable is completely
359      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
360      // live interval.
361      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
362               E = vi.AliveBlocks.end(); I != E; ++I) {
363        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
364        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
365        interval.addRange(LR);
366        DEBUG(dbgs() << " +" << LR);
367      }
368    }
369
370    // Finally, this virtual register is live from the start of any killing
371    // block to the 'use' slot of the killing instruction.
372    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
373      MachineInstr *Kill = vi.Kills[i];
374      SlotIndex Start = getMBBStartIdx(Kill->getParent());
375      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
376
377      // Create interval with one of a NEW value number.  Note that this value
378      // number isn't actually defined by an instruction, weird huh? :)
379      if (PHIJoin) {
380        assert(getInstructionFromIndex(Start) == 0 &&
381               "PHI def index points at actual instruction.");
382        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
383        ValNo->setIsPHIDef(true);
384      }
385      LiveRange LR(Start, killIdx, ValNo);
386      interval.addRange(LR);
387      DEBUG(dbgs() << " +" << LR);
388    }
389
390  } else {
391    if (MultipleDefsBySameMI(*mi, MOIdx))
392      // Multiple defs of the same virtual register by the same instruction.
393      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
394      // This is likely due to elimination of REG_SEQUENCE instructions. Return
395      // here since there is nothing to do.
396      return;
397
398    // If this is the second time we see a virtual register definition, it
399    // must be due to phi elimination or two addr elimination.  If this is
400    // the result of two address elimination, then the vreg is one of the
401    // def-and-use register operand.
402
403    // It may also be partial redef like this:
404    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
405    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
406    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
407    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
408      // If this is a two-address definition, then we have already processed
409      // the live range.  The only problem is that we didn't realize there
410      // are actually two values in the live interval.  Because of this we
411      // need to take the LiveRegion that defines this register and split it
412      // into two values.
413      SlotIndex RedefIndex = MIIdx.getDefIndex();
414      if (MO.isEarlyClobber())
415        RedefIndex = MIIdx.getUseIndex();
416
417      const LiveRange *OldLR =
418        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
419      VNInfo *OldValNo = OldLR->valno;
420      SlotIndex DefIndex = OldValNo->def.getDefIndex();
421
422      // Delete the previous value, which should be short and continuous,
423      // because the 2-addr copy must be in the same MBB as the redef.
424      interval.removeRange(DefIndex, RedefIndex);
425
426      // The new value number (#1) is defined by the instruction we claimed
427      // defined value #0.
428      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
429
430      // Value#0 is now defined by the 2-addr instruction.
431      OldValNo->def  = RedefIndex;
432      OldValNo->setCopy(0);
433
434      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
435      if (PartReDef && mi->isCopyLike())
436        OldValNo->setCopy(&*mi);
437
438      // Add the new live interval which replaces the range for the input copy.
439      LiveRange LR(DefIndex, RedefIndex, ValNo);
440      DEBUG(dbgs() << " replace range with " << LR);
441      interval.addRange(LR);
442
443      // If this redefinition is dead, we need to add a dummy unit live
444      // range covering the def slot.
445      if (MO.isDead())
446        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
447                                    OldValNo));
448
449      DEBUG({
450          dbgs() << " RESULT: ";
451          interval.print(dbgs(), tri_);
452        });
453    } else if (lv_->isPHIJoin(interval.reg)) {
454      // In the case of PHI elimination, each variable definition is only
455      // live until the end of the block.  We've already taken care of the
456      // rest of the live range.
457
458      SlotIndex defIndex = MIIdx.getDefIndex();
459      if (MO.isEarlyClobber())
460        defIndex = MIIdx.getUseIndex();
461
462      VNInfo *ValNo;
463      MachineInstr *CopyMI = NULL;
464      if (mi->isCopyLike())
465        CopyMI = mi;
466      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
467
468      SlotIndex killIndex = getMBBEndIdx(mbb);
469      LiveRange LR(defIndex, killIndex, ValNo);
470      interval.addRange(LR);
471      ValNo->setHasPHIKill(true);
472      DEBUG(dbgs() << " phi-join +" << LR);
473    } else {
474      llvm_unreachable("Multiply defined register");
475    }
476  }
477
478  DEBUG(dbgs() << '\n');
479}
480
481void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
482                                              MachineBasicBlock::iterator mi,
483                                              SlotIndex MIIdx,
484                                              MachineOperand& MO,
485                                              LiveInterval &interval,
486                                              MachineInstr *CopyMI) {
487  // A physical register cannot be live across basic block, so its
488  // lifetime must end somewhere in its defining basic block.
489  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
490
491  SlotIndex baseIndex = MIIdx;
492  SlotIndex start = baseIndex.getDefIndex();
493  // Earlyclobbers move back one.
494  if (MO.isEarlyClobber())
495    start = MIIdx.getUseIndex();
496  SlotIndex end = start;
497
498  // If it is not used after definition, it is considered dead at
499  // the instruction defining it. Hence its interval is:
500  // [defSlot(def), defSlot(def)+1)
501  // For earlyclobbers, the defSlot was pushed back one; the extra
502  // advance below compensates.
503  if (MO.isDead()) {
504    DEBUG(dbgs() << " dead");
505    end = start.getStoreIndex();
506    goto exit;
507  }
508
509  // If it is not dead on definition, it must be killed by a
510  // subsequent instruction. Hence its interval is:
511  // [defSlot(def), useSlot(kill)+1)
512  baseIndex = baseIndex.getNextIndex();
513  while (++mi != MBB->end()) {
514
515    if (mi->isDebugValue())
516      continue;
517    if (getInstructionFromIndex(baseIndex) == 0)
518      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
519
520    if (mi->killsRegister(interval.reg, tri_)) {
521      DEBUG(dbgs() << " killed");
522      end = baseIndex.getDefIndex();
523      goto exit;
524    } else {
525      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
526      if (DefIdx != -1) {
527        if (mi->isRegTiedToUseOperand(DefIdx)) {
528          // Two-address instruction.
529          end = baseIndex.getDefIndex();
530        } else {
531          // Another instruction redefines the register before it is ever read.
532          // Then the register is essentially dead at the instruction that
533          // defines it. Hence its interval is:
534          // [defSlot(def), defSlot(def)+1)
535          DEBUG(dbgs() << " dead");
536          end = start.getStoreIndex();
537        }
538        goto exit;
539      }
540    }
541
542    baseIndex = baseIndex.getNextIndex();
543  }
544
545  // The only case we should have a dead physreg here without a killing or
546  // instruction where we know it's dead is if it is live-in to the function
547  // and never used. Another possible case is the implicit use of the
548  // physical register has been deleted by two-address pass.
549  end = start.getStoreIndex();
550
551exit:
552  assert(start < end && "did not find end of interval?");
553
554  // Already exists? Extend old live interval.
555  VNInfo *ValNo = interval.getVNInfoAt(start);
556  bool Extend = ValNo != 0;
557  if (!Extend)
558    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
559  if (Extend && MO.isEarlyClobber())
560    ValNo->setHasRedefByEC(true);
561  LiveRange LR(start, end, ValNo);
562  interval.addRange(LR);
563  DEBUG(dbgs() << " +" << LR << '\n');
564}
565
566void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
567                                      MachineBasicBlock::iterator MI,
568                                      SlotIndex MIIdx,
569                                      MachineOperand& MO,
570                                      unsigned MOIdx) {
571  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
572    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
573                             getOrCreateInterval(MO.getReg()));
574  else if (allocatableRegs_[MO.getReg()]) {
575    MachineInstr *CopyMI = NULL;
576    if (MI->isCopyLike())
577      CopyMI = MI;
578    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
579                              getOrCreateInterval(MO.getReg()), CopyMI);
580    // Def of a register also defines its sub-registers.
581    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
582      // If MI also modifies the sub-register explicitly, avoid processing it
583      // more than once. Do not pass in TRI here so it checks for exact match.
584      if (!MI->definesRegister(*AS))
585        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586                                  getOrCreateInterval(*AS), 0);
587  }
588}
589
590void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
591                                         SlotIndex MIIdx,
592                                         LiveInterval &interval, bool isAlias) {
593  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
594
595  // Look for kills, if it reaches a def before it's killed, then it shouldn't
596  // be considered a livein.
597  MachineBasicBlock::iterator mi = MBB->begin();
598  MachineBasicBlock::iterator E = MBB->end();
599  // Skip over DBG_VALUE at the start of the MBB.
600  if (mi != E && mi->isDebugValue()) {
601    while (++mi != E && mi->isDebugValue())
602      ;
603    if (mi == E)
604      // MBB is empty except for DBG_VALUE's.
605      return;
606  }
607
608  SlotIndex baseIndex = MIIdx;
609  SlotIndex start = baseIndex;
610  if (getInstructionFromIndex(baseIndex) == 0)
611    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
612
613  SlotIndex end = baseIndex;
614  bool SeenDefUse = false;
615
616  while (mi != E) {
617    if (mi->killsRegister(interval.reg, tri_)) {
618      DEBUG(dbgs() << " killed");
619      end = baseIndex.getDefIndex();
620      SeenDefUse = true;
621      break;
622    } else if (mi->definesRegister(interval.reg, tri_)) {
623      // Another instruction redefines the register before it is ever read.
624      // Then the register is essentially dead at the instruction that defines
625      // it. Hence its interval is:
626      // [defSlot(def), defSlot(def)+1)
627      DEBUG(dbgs() << " dead");
628      end = start.getStoreIndex();
629      SeenDefUse = true;
630      break;
631    }
632
633    while (++mi != E && mi->isDebugValue())
634      // Skip over DBG_VALUE.
635      ;
636    if (mi != E)
637      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
638  }
639
640  // Live-in register might not be used at all.
641  if (!SeenDefUse) {
642    if (isAlias) {
643      DEBUG(dbgs() << " dead");
644      end = MIIdx.getStoreIndex();
645    } else {
646      DEBUG(dbgs() << " live through");
647      end = baseIndex;
648    }
649  }
650
651  SlotIndex defIdx = getMBBStartIdx(MBB);
652  assert(getInstructionFromIndex(defIdx) == 0 &&
653         "PHI def index points at actual instruction.");
654  VNInfo *vni =
655    interval.getNextValue(defIdx, 0, VNInfoAllocator);
656  vni->setIsPHIDef(true);
657  LiveRange LR(start, end, vni);
658
659  interval.addRange(LR);
660  DEBUG(dbgs() << " +" << LR << '\n');
661}
662
663/// computeIntervals - computes the live intervals for virtual
664/// registers. for some ordering of the machine instructions [1,N] a
665/// live interval is an interval [i, j) where 1 <= i <= j < N for
666/// which a variable is live
667void LiveIntervals::computeIntervals() {
668  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
669               << "********** Function: "
670               << ((Value*)mf_->getFunction())->getName() << '\n');
671
672  SmallVector<unsigned, 8> UndefUses;
673  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
674       MBBI != E; ++MBBI) {
675    MachineBasicBlock *MBB = MBBI;
676    if (MBB->empty())
677      continue;
678
679    // Track the index of the current machine instr.
680    SlotIndex MIIndex = getMBBStartIdx(MBB);
681    DEBUG(dbgs() << "BB#" << MBB->getNumber()
682          << ":\t\t# derived from " << MBB->getName() << "\n");
683
684    // Create intervals for live-ins to this BB first.
685    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
686           LE = MBB->livein_end(); LI != LE; ++LI) {
687      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
688      // Multiple live-ins can alias the same register.
689      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
690        if (!hasInterval(*AS))
691          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692                               true);
693    }
694
695    // Skip over empty initial indices.
696    if (getInstructionFromIndex(MIIndex) == 0)
697      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
698
699    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
700         MI != miEnd; ++MI) {
701      DEBUG(dbgs() << MIIndex << "\t" << *MI);
702      if (MI->isDebugValue())
703        continue;
704
705      // Handle defs.
706      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707        MachineOperand &MO = MI->getOperand(i);
708        if (!MO.isReg() || !MO.getReg())
709          continue;
710
711        // handle register defs - build intervals
712        if (MO.isDef())
713          handleRegisterDef(MBB, MI, MIIndex, MO, i);
714        else if (MO.isUndef())
715          UndefUses.push_back(MO.getReg());
716      }
717
718      // Move to the next instr slot.
719      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
720    }
721  }
722
723  // Create empty intervals for registers defined by implicit_def's (except
724  // for those implicit_def that define values which are liveout of their
725  // blocks.
726  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
727    unsigned UndefReg = UndefUses[i];
728    (void)getOrCreateInterval(UndefReg);
729  }
730}
731
732LiveInterval* LiveIntervals::createInterval(unsigned reg) {
733  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
734  return new LiveInterval(reg, Weight);
735}
736
737/// dupInterval - Duplicate a live interval. The caller is responsible for
738/// managing the allocated memory.
739LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
740  LiveInterval *NewLI = createInterval(li->reg);
741  NewLI->Copy(*li, mri_, getVNInfoAllocator());
742  return NewLI;
743}
744
745//===----------------------------------------------------------------------===//
746// Register allocator hooks.
747//
748
749/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
750/// allow one) virtual register operand, then its uses are implicitly using
751/// the register. Returns the virtual register.
752unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
753                                            MachineInstr *MI) const {
754  unsigned RegOp = 0;
755  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
756    MachineOperand &MO = MI->getOperand(i);
757    if (!MO.isReg() || !MO.isUse())
758      continue;
759    unsigned Reg = MO.getReg();
760    if (Reg == 0 || Reg == li.reg)
761      continue;
762
763    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
764        !allocatableRegs_[Reg])
765      continue;
766    // FIXME: For now, only remat MI with at most one register operand.
767    assert(!RegOp &&
768           "Can't rematerialize instruction with multiple register operand!");
769    RegOp = MO.getReg();
770#ifndef NDEBUG
771    break;
772#endif
773  }
774  return RegOp;
775}
776
777/// isValNoAvailableAt - Return true if the val# of the specified interval
778/// which reaches the given instruction also reaches the specified use index.
779bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
780                                       SlotIndex UseIdx) const {
781  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
782  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
783}
784
785/// isReMaterializable - Returns true if the definition MI of the specified
786/// val# of the specified interval is re-materializable.
787bool
788LiveIntervals::isReMaterializable(const LiveInterval &li,
789                                  const VNInfo *ValNo, MachineInstr *MI,
790                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
791                                  bool &isLoad) {
792  if (DisableReMat)
793    return false;
794
795  if (!tii_->isTriviallyReMaterializable(MI, aa_))
796    return false;
797
798  // Target-specific code can mark an instruction as being rematerializable
799  // if it has one virtual reg use, though it had better be something like
800  // a PIC base register which is likely to be live everywhere.
801  unsigned ImpUse = getReMatImplicitUse(li, MI);
802  if (ImpUse) {
803    const LiveInterval &ImpLi = getInterval(ImpUse);
804    for (MachineRegisterInfo::use_nodbg_iterator
805           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
806         ri != re; ++ri) {
807      MachineInstr *UseMI = &*ri;
808      SlotIndex UseIdx = getInstructionIndex(UseMI);
809      if (li.getVNInfoAt(UseIdx) != ValNo)
810        continue;
811      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
812        return false;
813    }
814
815    // If a register operand of the re-materialized instruction is going to
816    // be spilled next, then it's not legal to re-materialize this instruction.
817    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
818      if (ImpUse == SpillIs[i]->reg)
819        return false;
820  }
821  return true;
822}
823
824/// isReMaterializable - Returns true if the definition MI of the specified
825/// val# of the specified interval is re-materializable.
826bool LiveIntervals::isReMaterializable(const LiveInterval &li,
827                                       const VNInfo *ValNo, MachineInstr *MI) {
828  SmallVector<LiveInterval*, 4> Dummy1;
829  bool Dummy2;
830  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
831}
832
833/// isReMaterializable - Returns true if every definition of MI of every
834/// val# of the specified interval is re-materializable.
835bool
836LiveIntervals::isReMaterializable(const LiveInterval &li,
837                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
838                                  bool &isLoad) {
839  isLoad = false;
840  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
841       i != e; ++i) {
842    const VNInfo *VNI = *i;
843    if (VNI->isUnused())
844      continue; // Dead val#.
845    // Is the def for the val# rematerializable?
846    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
847    if (!ReMatDefMI)
848      return false;
849    bool DefIsLoad = false;
850    if (!ReMatDefMI ||
851        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
852      return false;
853    isLoad |= DefIsLoad;
854  }
855  return true;
856}
857
858/// FilterFoldedOps - Filter out two-address use operands. Return
859/// true if it finds any issue with the operands that ought to prevent
860/// folding.
861static bool FilterFoldedOps(MachineInstr *MI,
862                            SmallVector<unsigned, 2> &Ops,
863                            unsigned &MRInfo,
864                            SmallVector<unsigned, 2> &FoldOps) {
865  MRInfo = 0;
866  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
867    unsigned OpIdx = Ops[i];
868    MachineOperand &MO = MI->getOperand(OpIdx);
869    // FIXME: fold subreg use.
870    if (MO.getSubReg())
871      return true;
872    if (MO.isDef())
873      MRInfo |= (unsigned)VirtRegMap::isMod;
874    else {
875      // Filter out two-address use operand(s).
876      if (MI->isRegTiedToDefOperand(OpIdx)) {
877        MRInfo = VirtRegMap::isModRef;
878        continue;
879      }
880      MRInfo |= (unsigned)VirtRegMap::isRef;
881    }
882    FoldOps.push_back(OpIdx);
883  }
884  return false;
885}
886
887
888/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
889/// slot / to reg or any rematerialized load into ith operand of specified
890/// MI. If it is successul, MI is updated with the newly created MI and
891/// returns true.
892bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
893                                         VirtRegMap &vrm, MachineInstr *DefMI,
894                                         SlotIndex InstrIdx,
895                                         SmallVector<unsigned, 2> &Ops,
896                                         bool isSS, int Slot, unsigned Reg) {
897  // If it is an implicit def instruction, just delete it.
898  if (MI->isImplicitDef()) {
899    RemoveMachineInstrFromMaps(MI);
900    vrm.RemoveMachineInstrFromMaps(MI);
901    MI->eraseFromParent();
902    ++numFolds;
903    return true;
904  }
905
906  // Filter the list of operand indexes that are to be folded. Abort if
907  // any operand will prevent folding.
908  unsigned MRInfo = 0;
909  SmallVector<unsigned, 2> FoldOps;
910  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
911    return false;
912
913  // The only time it's safe to fold into a two address instruction is when
914  // it's folding reload and spill from / into a spill stack slot.
915  if (DefMI && (MRInfo & VirtRegMap::isMod))
916    return false;
917
918  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
919                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
920  if (fmi) {
921    // Remember this instruction uses the spill slot.
922    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
923
924    // Attempt to fold the memory reference into the instruction. If
925    // we can do this, we don't need to insert spill code.
926    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
927      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
928    vrm.transferSpillPts(MI, fmi);
929    vrm.transferRestorePts(MI, fmi);
930    vrm.transferEmergencySpills(MI, fmi);
931    ReplaceMachineInstrInMaps(MI, fmi);
932    MI->eraseFromParent();
933    MI = fmi;
934    ++numFolds;
935    return true;
936  }
937  return false;
938}
939
940/// canFoldMemoryOperand - Returns true if the specified load / store
941/// folding is possible.
942bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
943                                         SmallVector<unsigned, 2> &Ops,
944                                         bool ReMat) const {
945  // Filter the list of operand indexes that are to be folded. Abort if
946  // any operand will prevent folding.
947  unsigned MRInfo = 0;
948  SmallVector<unsigned, 2> FoldOps;
949  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
950    return false;
951
952  // It's only legal to remat for a use, not a def.
953  if (ReMat && (MRInfo & VirtRegMap::isMod))
954    return false;
955
956  return tii_->canFoldMemoryOperand(MI, FoldOps);
957}
958
959bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
960  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
961
962  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
963
964  if (mbb == 0)
965    return false;
966
967  for (++itr; itr != li.ranges.end(); ++itr) {
968    MachineBasicBlock *mbb2 =
969      indexes_->getMBBCoveringRange(itr->start, itr->end);
970
971    if (mbb2 != mbb)
972      return false;
973  }
974
975  return true;
976}
977
978/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
979/// interval on to-be re-materialized operands of MI) with new register.
980void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
981                                       MachineInstr *MI, unsigned NewVReg,
982                                       VirtRegMap &vrm) {
983  // There is an implicit use. That means one of the other operand is
984  // being remat'ed and the remat'ed instruction has li.reg as an
985  // use operand. Make sure we rewrite that as well.
986  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
987    MachineOperand &MO = MI->getOperand(i);
988    if (!MO.isReg())
989      continue;
990    unsigned Reg = MO.getReg();
991    if (!TargetRegisterInfo::isVirtualRegister(Reg))
992      continue;
993    if (!vrm.isReMaterialized(Reg))
994      continue;
995    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
996    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
997    if (UseMO)
998      UseMO->setReg(NewVReg);
999  }
1000}
1001
1002/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1003/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1004bool LiveIntervals::
1005rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1006                 bool TrySplit, SlotIndex index, SlotIndex end,
1007                 MachineInstr *MI,
1008                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1009                 unsigned Slot, int LdSlot,
1010                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1011                 VirtRegMap &vrm,
1012                 const TargetRegisterClass* rc,
1013                 SmallVector<int, 4> &ReMatIds,
1014                 const MachineLoopInfo *loopInfo,
1015                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1016                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1017                 std::vector<LiveInterval*> &NewLIs) {
1018  bool CanFold = false;
1019 RestartInstruction:
1020  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1021    MachineOperand& mop = MI->getOperand(i);
1022    if (!mop.isReg())
1023      continue;
1024    unsigned Reg = mop.getReg();
1025    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1026      continue;
1027    if (Reg != li.reg)
1028      continue;
1029
1030    bool TryFold = !DefIsReMat;
1031    bool FoldSS = true; // Default behavior unless it's a remat.
1032    int FoldSlot = Slot;
1033    if (DefIsReMat) {
1034      // If this is the rematerializable definition MI itself and
1035      // all of its uses are rematerialized, simply delete it.
1036      if (MI == ReMatOrigDefMI && CanDelete) {
1037        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1038                     << *MI << '\n');
1039        RemoveMachineInstrFromMaps(MI);
1040        vrm.RemoveMachineInstrFromMaps(MI);
1041        MI->eraseFromParent();
1042        break;
1043      }
1044
1045      // If def for this use can't be rematerialized, then try folding.
1046      // If def is rematerializable and it's a load, also try folding.
1047      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1048      if (isLoad) {
1049        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1050        FoldSS = isLoadSS;
1051        FoldSlot = LdSlot;
1052      }
1053    }
1054
1055    // Scan all of the operands of this instruction rewriting operands
1056    // to use NewVReg instead of li.reg as appropriate.  We do this for
1057    // two reasons:
1058    //
1059    //   1. If the instr reads the same spilled vreg multiple times, we
1060    //      want to reuse the NewVReg.
1061    //   2. If the instr is a two-addr instruction, we are required to
1062    //      keep the src/dst regs pinned.
1063    //
1064    // Keep track of whether we replace a use and/or def so that we can
1065    // create the spill interval with the appropriate range.
1066    SmallVector<unsigned, 2> Ops;
1067    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1068
1069    // Create a new virtual register for the spill interval.
1070    // Create the new register now so we can map the fold instruction
1071    // to the new register so when it is unfolded we get the correct
1072    // answer.
1073    bool CreatedNewVReg = false;
1074    if (NewVReg == 0) {
1075      NewVReg = mri_->createVirtualRegister(rc);
1076      vrm.grow();
1077      CreatedNewVReg = true;
1078
1079      // The new virtual register should get the same allocation hints as the
1080      // old one.
1081      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1082      if (Hint.first || Hint.second)
1083        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1084    }
1085
1086    if (!TryFold)
1087      CanFold = false;
1088    else {
1089      // Do not fold load / store here if we are splitting. We'll find an
1090      // optimal point to insert a load / store later.
1091      if (!TrySplit) {
1092        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1093                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1094          // Folding the load/store can completely change the instruction in
1095          // unpredictable ways, rescan it from the beginning.
1096
1097          if (FoldSS) {
1098            // We need to give the new vreg the same stack slot as the
1099            // spilled interval.
1100            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1101          }
1102
1103          HasUse = false;
1104          HasDef = false;
1105          CanFold = false;
1106          if (isNotInMIMap(MI))
1107            break;
1108          goto RestartInstruction;
1109        }
1110      } else {
1111        // We'll try to fold it later if it's profitable.
1112        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1113      }
1114    }
1115
1116    mop.setReg(NewVReg);
1117    if (mop.isImplicit())
1118      rewriteImplicitOps(li, MI, NewVReg, vrm);
1119
1120    // Reuse NewVReg for other reads.
1121    bool HasEarlyClobber = false;
1122    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1123      MachineOperand &mopj = MI->getOperand(Ops[j]);
1124      mopj.setReg(NewVReg);
1125      if (mopj.isImplicit())
1126        rewriteImplicitOps(li, MI, NewVReg, vrm);
1127      if (mopj.isEarlyClobber())
1128        HasEarlyClobber = true;
1129    }
1130
1131    if (CreatedNewVReg) {
1132      if (DefIsReMat) {
1133        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1134        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1135          // Each valnum may have its own remat id.
1136          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1137        } else {
1138          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1139        }
1140        if (!CanDelete || (HasUse && HasDef)) {
1141          // If this is a two-addr instruction then its use operands are
1142          // rematerializable but its def is not. It should be assigned a
1143          // stack slot.
1144          vrm.assignVirt2StackSlot(NewVReg, Slot);
1145        }
1146      } else {
1147        vrm.assignVirt2StackSlot(NewVReg, Slot);
1148      }
1149    } else if (HasUse && HasDef &&
1150               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1151      // If this interval hasn't been assigned a stack slot (because earlier
1152      // def is a deleted remat def), do it now.
1153      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1154      vrm.assignVirt2StackSlot(NewVReg, Slot);
1155    }
1156
1157    // Re-matting an instruction with virtual register use. Add the
1158    // register as an implicit use on the use MI.
1159    if (DefIsReMat && ImpUse)
1160      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1161
1162    // Create a new register interval for this spill / remat.
1163    LiveInterval &nI = getOrCreateInterval(NewVReg);
1164    if (CreatedNewVReg) {
1165      NewLIs.push_back(&nI);
1166      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1167      if (TrySplit)
1168        vrm.setIsSplitFromReg(NewVReg, li.reg);
1169    }
1170
1171    if (HasUse) {
1172      if (CreatedNewVReg) {
1173        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1174                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1175        DEBUG(dbgs() << " +" << LR);
1176        nI.addRange(LR);
1177      } else {
1178        // Extend the split live interval to this def / use.
1179        SlotIndex End = index.getDefIndex();
1180        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1181                     nI.getValNumInfo(nI.getNumValNums()-1));
1182        DEBUG(dbgs() << " +" << LR);
1183        nI.addRange(LR);
1184      }
1185    }
1186    if (HasDef) {
1187      // An early clobber starts at the use slot, except for an early clobber
1188      // tied to a use operand (yes, that is a thing).
1189      LiveRange LR(HasEarlyClobber && !HasUse ?
1190                   index.getUseIndex() : index.getDefIndex(),
1191                   index.getStoreIndex(),
1192                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1193      DEBUG(dbgs() << " +" << LR);
1194      nI.addRange(LR);
1195    }
1196
1197    DEBUG({
1198        dbgs() << "\t\t\t\tAdded new interval: ";
1199        nI.print(dbgs(), tri_);
1200        dbgs() << '\n';
1201      });
1202  }
1203  return CanFold;
1204}
1205bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1206                                   const VNInfo *VNI,
1207                                   MachineBasicBlock *MBB,
1208                                   SlotIndex Idx) const {
1209  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1210}
1211
1212/// RewriteInfo - Keep track of machine instrs that will be rewritten
1213/// during spilling.
1214namespace {
1215  struct RewriteInfo {
1216    SlotIndex Index;
1217    MachineInstr *MI;
1218    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1219  };
1220
1221  struct RewriteInfoCompare {
1222    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1223      return LHS.Index < RHS.Index;
1224    }
1225  };
1226}
1227
1228void LiveIntervals::
1229rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1230                    LiveInterval::Ranges::const_iterator &I,
1231                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1232                    unsigned Slot, int LdSlot,
1233                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1234                    VirtRegMap &vrm,
1235                    const TargetRegisterClass* rc,
1236                    SmallVector<int, 4> &ReMatIds,
1237                    const MachineLoopInfo *loopInfo,
1238                    BitVector &SpillMBBs,
1239                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1240                    BitVector &RestoreMBBs,
1241                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1242                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1243                    std::vector<LiveInterval*> &NewLIs) {
1244  bool AllCanFold = true;
1245  unsigned NewVReg = 0;
1246  SlotIndex start = I->start.getBaseIndex();
1247  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1248
1249  // First collect all the def / use in this live range that will be rewritten.
1250  // Make sure they are sorted according to instruction index.
1251  std::vector<RewriteInfo> RewriteMIs;
1252  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1253         re = mri_->reg_end(); ri != re; ) {
1254    MachineInstr *MI = &*ri;
1255    MachineOperand &O = ri.getOperand();
1256    ++ri;
1257    if (MI->isDebugValue()) {
1258      // Modify DBG_VALUE now that the value is in a spill slot.
1259      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1260        uint64_t Offset = MI->getOperand(1).getImm();
1261        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1262        DebugLoc DL = MI->getDebugLoc();
1263        int FI = isLoadSS ? LdSlot : (int)Slot;
1264        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1265                                                           Offset, MDPtr, DL)) {
1266          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1267          ReplaceMachineInstrInMaps(MI, NewDV);
1268          MachineBasicBlock *MBB = MI->getParent();
1269          MBB->insert(MBB->erase(MI), NewDV);
1270          continue;
1271        }
1272      }
1273
1274      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1275      RemoveMachineInstrFromMaps(MI);
1276      vrm.RemoveMachineInstrFromMaps(MI);
1277      MI->eraseFromParent();
1278      continue;
1279    }
1280    assert(!(O.isImplicit() && O.isUse()) &&
1281           "Spilling register that's used as implicit use?");
1282    SlotIndex index = getInstructionIndex(MI);
1283    if (index < start || index >= end)
1284      continue;
1285
1286    if (O.isUndef())
1287      // Must be defined by an implicit def. It should not be spilled. Note,
1288      // this is for correctness reason. e.g.
1289      // 8   %reg1024<def> = IMPLICIT_DEF
1290      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1291      // The live range [12, 14) are not part of the r1024 live interval since
1292      // it's defined by an implicit def. It will not conflicts with live
1293      // interval of r1025. Now suppose both registers are spilled, you can
1294      // easily see a situation where both registers are reloaded before
1295      // the INSERT_SUBREG and both target registers that would overlap.
1296      continue;
1297    RewriteMIs.push_back(RewriteInfo(index, MI));
1298  }
1299  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1300
1301  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1302  // Now rewrite the defs and uses.
1303  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1304    RewriteInfo &rwi = RewriteMIs[i];
1305    ++i;
1306    SlotIndex index = rwi.Index;
1307    MachineInstr *MI = rwi.MI;
1308    // If MI def and/or use the same register multiple times, then there
1309    // are multiple entries.
1310    while (i != e && RewriteMIs[i].MI == MI) {
1311      assert(RewriteMIs[i].Index == index);
1312      ++i;
1313    }
1314    MachineBasicBlock *MBB = MI->getParent();
1315
1316    if (ImpUse && MI != ReMatDefMI) {
1317      // Re-matting an instruction with virtual register use. Prevent interval
1318      // from being spilled.
1319      getInterval(ImpUse).markNotSpillable();
1320    }
1321
1322    unsigned MBBId = MBB->getNumber();
1323    unsigned ThisVReg = 0;
1324    if (TrySplit) {
1325      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1326      if (NVI != MBBVRegsMap.end()) {
1327        ThisVReg = NVI->second;
1328        // One common case:
1329        // x = use
1330        // ...
1331        // ...
1332        // def = ...
1333        //     = use
1334        // It's better to start a new interval to avoid artifically
1335        // extend the new interval.
1336        if (MI->readsWritesVirtualRegister(li.reg) ==
1337            std::make_pair(false,true)) {
1338          MBBVRegsMap.erase(MBB->getNumber());
1339          ThisVReg = 0;
1340        }
1341      }
1342    }
1343
1344    bool IsNew = ThisVReg == 0;
1345    if (IsNew) {
1346      // This ends the previous live interval. If all of its def / use
1347      // can be folded, give it a low spill weight.
1348      if (NewVReg && TrySplit && AllCanFold) {
1349        LiveInterval &nI = getOrCreateInterval(NewVReg);
1350        nI.weight /= 10.0F;
1351      }
1352      AllCanFold = true;
1353    }
1354    NewVReg = ThisVReg;
1355
1356    bool HasDef = false;
1357    bool HasUse = false;
1358    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1359                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1360                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1361                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1362                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1363    if (!HasDef && !HasUse)
1364      continue;
1365
1366    AllCanFold &= CanFold;
1367
1368    // Update weight of spill interval.
1369    LiveInterval &nI = getOrCreateInterval(NewVReg);
1370    if (!TrySplit) {
1371      // The spill weight is now infinity as it cannot be spilled again.
1372      nI.markNotSpillable();
1373      continue;
1374    }
1375
1376    // Keep track of the last def and first use in each MBB.
1377    if (HasDef) {
1378      if (MI != ReMatOrigDefMI || !CanDelete) {
1379        bool HasKill = false;
1380        if (!HasUse)
1381          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1382        else {
1383          // If this is a two-address code, then this index starts a new VNInfo.
1384          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1385          if (VNI)
1386            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1387        }
1388        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1389          SpillIdxes.find(MBBId);
1390        if (!HasKill) {
1391          if (SII == SpillIdxes.end()) {
1392            std::vector<SRInfo> S;
1393            S.push_back(SRInfo(index, NewVReg, true));
1394            SpillIdxes.insert(std::make_pair(MBBId, S));
1395          } else if (SII->second.back().vreg != NewVReg) {
1396            SII->second.push_back(SRInfo(index, NewVReg, true));
1397          } else if (index > SII->second.back().index) {
1398            // If there is an earlier def and this is a two-address
1399            // instruction, then it's not possible to fold the store (which
1400            // would also fold the load).
1401            SRInfo &Info = SII->second.back();
1402            Info.index = index;
1403            Info.canFold = !HasUse;
1404          }
1405          SpillMBBs.set(MBBId);
1406        } else if (SII != SpillIdxes.end() &&
1407                   SII->second.back().vreg == NewVReg &&
1408                   index > SII->second.back().index) {
1409          // There is an earlier def that's not killed (must be two-address).
1410          // The spill is no longer needed.
1411          SII->second.pop_back();
1412          if (SII->second.empty()) {
1413            SpillIdxes.erase(MBBId);
1414            SpillMBBs.reset(MBBId);
1415          }
1416        }
1417      }
1418    }
1419
1420    if (HasUse) {
1421      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1422        SpillIdxes.find(MBBId);
1423      if (SII != SpillIdxes.end() &&
1424          SII->second.back().vreg == NewVReg &&
1425          index > SII->second.back().index)
1426        // Use(s) following the last def, it's not safe to fold the spill.
1427        SII->second.back().canFold = false;
1428      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1429        RestoreIdxes.find(MBBId);
1430      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1431        // If we are splitting live intervals, only fold if it's the first
1432        // use and there isn't another use later in the MBB.
1433        RII->second.back().canFold = false;
1434      else if (IsNew) {
1435        // Only need a reload if there isn't an earlier def / use.
1436        if (RII == RestoreIdxes.end()) {
1437          std::vector<SRInfo> Infos;
1438          Infos.push_back(SRInfo(index, NewVReg, true));
1439          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1440        } else {
1441          RII->second.push_back(SRInfo(index, NewVReg, true));
1442        }
1443        RestoreMBBs.set(MBBId);
1444      }
1445    }
1446
1447    // Update spill weight.
1448    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1449    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1450  }
1451
1452  if (NewVReg && TrySplit && AllCanFold) {
1453    // If all of its def / use can be folded, give it a low spill weight.
1454    LiveInterval &nI = getOrCreateInterval(NewVReg);
1455    nI.weight /= 10.0F;
1456  }
1457}
1458
1459bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1460                        unsigned vr, BitVector &RestoreMBBs,
1461                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1462  if (!RestoreMBBs[Id])
1463    return false;
1464  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1465  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1466    if (Restores[i].index == index &&
1467        Restores[i].vreg == vr &&
1468        Restores[i].canFold)
1469      return true;
1470  return false;
1471}
1472
1473void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1474                        unsigned vr, BitVector &RestoreMBBs,
1475                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1476  if (!RestoreMBBs[Id])
1477    return;
1478  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1479  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1480    if (Restores[i].index == index && Restores[i].vreg)
1481      Restores[i].index = SlotIndex();
1482}
1483
1484/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1485/// spilled and create empty intervals for their uses.
1486void
1487LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1488                                    const TargetRegisterClass* rc,
1489                                    std::vector<LiveInterval*> &NewLIs) {
1490  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1491         re = mri_->reg_end(); ri != re; ) {
1492    MachineOperand &O = ri.getOperand();
1493    MachineInstr *MI = &*ri;
1494    ++ri;
1495    if (MI->isDebugValue()) {
1496      // Remove debug info for now.
1497      O.setReg(0U);
1498      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1499      continue;
1500    }
1501    if (O.isDef()) {
1502      assert(MI->isImplicitDef() &&
1503             "Register def was not rewritten?");
1504      RemoveMachineInstrFromMaps(MI);
1505      vrm.RemoveMachineInstrFromMaps(MI);
1506      MI->eraseFromParent();
1507    } else {
1508      // This must be an use of an implicit_def so it's not part of the live
1509      // interval. Create a new empty live interval for it.
1510      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1511      unsigned NewVReg = mri_->createVirtualRegister(rc);
1512      vrm.grow();
1513      vrm.setIsImplicitlyDefined(NewVReg);
1514      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1515      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1516        MachineOperand &MO = MI->getOperand(i);
1517        if (MO.isReg() && MO.getReg() == li.reg) {
1518          MO.setReg(NewVReg);
1519          MO.setIsUndef();
1520        }
1521      }
1522    }
1523  }
1524}
1525
1526float
1527LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1528  // Limit the loop depth ridiculousness.
1529  if (loopDepth > 200)
1530    loopDepth = 200;
1531
1532  // The loop depth is used to roughly estimate the number of times the
1533  // instruction is executed. Something like 10^d is simple, but will quickly
1534  // overflow a float. This expression behaves like 10^d for small d, but is
1535  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1536  // headroom before overflow.
1537  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1538
1539  return (isDef + isUse) * lc;
1540}
1541
1542void
1543LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1544  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1545    normalizeSpillWeight(*NewLIs[i]);
1546}
1547
1548std::vector<LiveInterval*> LiveIntervals::
1549addIntervalsForSpills(const LiveInterval &li,
1550                      const SmallVectorImpl<LiveInterval*> &SpillIs,
1551                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1552  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1553
1554  DEBUG({
1555      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1556      li.print(dbgs(), tri_);
1557      dbgs() << '\n';
1558    });
1559
1560  // Each bit specify whether a spill is required in the MBB.
1561  BitVector SpillMBBs(mf_->getNumBlockIDs());
1562  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1563  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1564  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1565  DenseMap<unsigned,unsigned> MBBVRegsMap;
1566  std::vector<LiveInterval*> NewLIs;
1567  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1568
1569  unsigned NumValNums = li.getNumValNums();
1570  SmallVector<MachineInstr*, 4> ReMatDefs;
1571  ReMatDefs.resize(NumValNums, NULL);
1572  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1573  ReMatOrigDefs.resize(NumValNums, NULL);
1574  SmallVector<int, 4> ReMatIds;
1575  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1576  BitVector ReMatDelete(NumValNums);
1577  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1578
1579  // Spilling a split live interval. It cannot be split any further. Also,
1580  // it's also guaranteed to be a single val# / range interval.
1581  if (vrm.getPreSplitReg(li.reg)) {
1582    vrm.setIsSplitFromReg(li.reg, 0);
1583    // Unset the split kill marker on the last use.
1584    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1585    if (KillIdx != SlotIndex()) {
1586      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1587      assert(KillMI && "Last use disappeared?");
1588      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1589      assert(KillOp != -1 && "Last use disappeared?");
1590      KillMI->getOperand(KillOp).setIsKill(false);
1591    }
1592    vrm.removeKillPoint(li.reg);
1593    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1594    Slot = vrm.getStackSlot(li.reg);
1595    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1596    MachineInstr *ReMatDefMI = DefIsReMat ?
1597      vrm.getReMaterializedMI(li.reg) : NULL;
1598    int LdSlot = 0;
1599    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1600    bool isLoad = isLoadSS ||
1601      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1602    bool IsFirstRange = true;
1603    for (LiveInterval::Ranges::const_iterator
1604           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1605      // If this is a split live interval with multiple ranges, it means there
1606      // are two-address instructions that re-defined the value. Only the
1607      // first def can be rematerialized!
1608      if (IsFirstRange) {
1609        // Note ReMatOrigDefMI has already been deleted.
1610        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1611                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1612                             false, vrm, rc, ReMatIds, loopInfo,
1613                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1614                             MBBVRegsMap, NewLIs);
1615      } else {
1616        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1617                             Slot, 0, false, false, false,
1618                             false, vrm, rc, ReMatIds, loopInfo,
1619                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1620                             MBBVRegsMap, NewLIs);
1621      }
1622      IsFirstRange = false;
1623    }
1624
1625    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1626    normalizeSpillWeights(NewLIs);
1627    return NewLIs;
1628  }
1629
1630  bool TrySplit = !intervalIsInOneMBB(li);
1631  if (TrySplit)
1632    ++numSplits;
1633  bool NeedStackSlot = false;
1634  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1635       i != e; ++i) {
1636    const VNInfo *VNI = *i;
1637    unsigned VN = VNI->id;
1638    if (VNI->isUnused())
1639      continue; // Dead val#.
1640    // Is the def for the val# rematerializable?
1641    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1642    bool dummy;
1643    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1644      // Remember how to remat the def of this val#.
1645      ReMatOrigDefs[VN] = ReMatDefMI;
1646      // Original def may be modified so we have to make a copy here.
1647      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1648      CloneMIs.push_back(Clone);
1649      ReMatDefs[VN] = Clone;
1650
1651      bool CanDelete = true;
1652      if (VNI->hasPHIKill()) {
1653        // A kill is a phi node, not all of its uses can be rematerialized.
1654        // It must not be deleted.
1655        CanDelete = false;
1656        // Need a stack slot if there is any live range where uses cannot be
1657        // rematerialized.
1658        NeedStackSlot = true;
1659      }
1660      if (CanDelete)
1661        ReMatDelete.set(VN);
1662    } else {
1663      // Need a stack slot if there is any live range where uses cannot be
1664      // rematerialized.
1665      NeedStackSlot = true;
1666    }
1667  }
1668
1669  // One stack slot per live interval.
1670  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1671    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1672      Slot = vrm.assignVirt2StackSlot(li.reg);
1673
1674    // This case only occurs when the prealloc splitter has already assigned
1675    // a stack slot to this vreg.
1676    else
1677      Slot = vrm.getStackSlot(li.reg);
1678  }
1679
1680  // Create new intervals and rewrite defs and uses.
1681  for (LiveInterval::Ranges::const_iterator
1682         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1683    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1684    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1685    bool DefIsReMat = ReMatDefMI != NULL;
1686    bool CanDelete = ReMatDelete[I->valno->id];
1687    int LdSlot = 0;
1688    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1689    bool isLoad = isLoadSS ||
1690      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1691    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1692                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1693                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1694                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1695                               MBBVRegsMap, NewLIs);
1696  }
1697
1698  // Insert spills / restores if we are splitting.
1699  if (!TrySplit) {
1700    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1701    normalizeSpillWeights(NewLIs);
1702    return NewLIs;
1703  }
1704
1705  SmallPtrSet<LiveInterval*, 4> AddedKill;
1706  SmallVector<unsigned, 2> Ops;
1707  if (NeedStackSlot) {
1708    int Id = SpillMBBs.find_first();
1709    while (Id != -1) {
1710      std::vector<SRInfo> &spills = SpillIdxes[Id];
1711      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1712        SlotIndex index = spills[i].index;
1713        unsigned VReg = spills[i].vreg;
1714        LiveInterval &nI = getOrCreateInterval(VReg);
1715        bool isReMat = vrm.isReMaterialized(VReg);
1716        MachineInstr *MI = getInstructionFromIndex(index);
1717        bool CanFold = false;
1718        bool FoundUse = false;
1719        Ops.clear();
1720        if (spills[i].canFold) {
1721          CanFold = true;
1722          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1723            MachineOperand &MO = MI->getOperand(j);
1724            if (!MO.isReg() || MO.getReg() != VReg)
1725              continue;
1726
1727            Ops.push_back(j);
1728            if (MO.isDef())
1729              continue;
1730            if (isReMat ||
1731                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1732                                                RestoreMBBs, RestoreIdxes))) {
1733              // MI has two-address uses of the same register. If the use
1734              // isn't the first and only use in the BB, then we can't fold
1735              // it. FIXME: Move this to rewriteInstructionsForSpills.
1736              CanFold = false;
1737              break;
1738            }
1739            FoundUse = true;
1740          }
1741        }
1742        // Fold the store into the def if possible.
1743        bool Folded = false;
1744        if (CanFold && !Ops.empty()) {
1745          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1746            Folded = true;
1747            if (FoundUse) {
1748              // Also folded uses, do not issue a load.
1749              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1750              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1751            }
1752            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1753          }
1754        }
1755
1756        // Otherwise tell the spiller to issue a spill.
1757        if (!Folded) {
1758          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1759          bool isKill = LR->end == index.getStoreIndex();
1760          if (!MI->registerDefIsDead(nI.reg))
1761            // No need to spill a dead def.
1762            vrm.addSpillPoint(VReg, isKill, MI);
1763          if (isKill)
1764            AddedKill.insert(&nI);
1765        }
1766      }
1767      Id = SpillMBBs.find_next(Id);
1768    }
1769  }
1770
1771  int Id = RestoreMBBs.find_first();
1772  while (Id != -1) {
1773    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1774    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1775      SlotIndex index = restores[i].index;
1776      if (index == SlotIndex())
1777        continue;
1778      unsigned VReg = restores[i].vreg;
1779      LiveInterval &nI = getOrCreateInterval(VReg);
1780      bool isReMat = vrm.isReMaterialized(VReg);
1781      MachineInstr *MI = getInstructionFromIndex(index);
1782      bool CanFold = false;
1783      Ops.clear();
1784      if (restores[i].canFold) {
1785        CanFold = true;
1786        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1787          MachineOperand &MO = MI->getOperand(j);
1788          if (!MO.isReg() || MO.getReg() != VReg)
1789            continue;
1790
1791          if (MO.isDef()) {
1792            // If this restore were to be folded, it would have been folded
1793            // already.
1794            CanFold = false;
1795            break;
1796          }
1797          Ops.push_back(j);
1798        }
1799      }
1800
1801      // Fold the load into the use if possible.
1802      bool Folded = false;
1803      if (CanFold && !Ops.empty()) {
1804        if (!isReMat)
1805          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1806        else {
1807          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1808          int LdSlot = 0;
1809          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810          // If the rematerializable def is a load, also try to fold it.
1811          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1812            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1813                                          Ops, isLoadSS, LdSlot, VReg);
1814          if (!Folded) {
1815            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1816            if (ImpUse) {
1817              // Re-matting an instruction with virtual register use. Add the
1818              // register as an implicit use on the use MI and mark the register
1819              // interval as unspillable.
1820              LiveInterval &ImpLi = getInterval(ImpUse);
1821              ImpLi.markNotSpillable();
1822              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1823            }
1824          }
1825        }
1826      }
1827      // If folding is not possible / failed, then tell the spiller to issue a
1828      // load / rematerialization for us.
1829      if (Folded)
1830        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1831      else
1832        vrm.addRestorePoint(VReg, MI);
1833    }
1834    Id = RestoreMBBs.find_next(Id);
1835  }
1836
1837  // Finalize intervals: add kills, finalize spill weights, and filter out
1838  // dead intervals.
1839  std::vector<LiveInterval*> RetNewLIs;
1840  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1841    LiveInterval *LI = NewLIs[i];
1842    if (!LI->empty()) {
1843      if (!AddedKill.count(LI)) {
1844        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1845        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1846        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1847        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1848        assert(UseIdx != -1);
1849        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1850          LastUse->getOperand(UseIdx).setIsKill();
1851          vrm.addKillPoint(LI->reg, LastUseIdx);
1852        }
1853      }
1854      RetNewLIs.push_back(LI);
1855    }
1856  }
1857
1858  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1859  normalizeSpillWeights(RetNewLIs);
1860  return RetNewLIs;
1861}
1862
1863/// hasAllocatableSuperReg - Return true if the specified physical register has
1864/// any super register that's allocatable.
1865bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1866  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1867    if (allocatableRegs_[*AS] && hasInterval(*AS))
1868      return true;
1869  return false;
1870}
1871
1872/// getRepresentativeReg - Find the largest super register of the specified
1873/// physical register.
1874unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1875  // Find the largest super-register that is allocatable.
1876  unsigned BestReg = Reg;
1877  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1878    unsigned SuperReg = *AS;
1879    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1880      BestReg = SuperReg;
1881      break;
1882    }
1883  }
1884  return BestReg;
1885}
1886
1887/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1888/// specified interval that conflicts with the specified physical register.
1889unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1890                                                   unsigned PhysReg) const {
1891  unsigned NumConflicts = 0;
1892  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1893  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1894         E = mri_->reg_end(); I != E; ++I) {
1895    MachineOperand &O = I.getOperand();
1896    MachineInstr *MI = O.getParent();
1897    if (MI->isDebugValue())
1898      continue;
1899    SlotIndex Index = getInstructionIndex(MI);
1900    if (pli.liveAt(Index))
1901      ++NumConflicts;
1902  }
1903  return NumConflicts;
1904}
1905
1906/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1907/// around all defs and uses of the specified interval. Return true if it
1908/// was able to cut its interval.
1909bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1910                                            unsigned PhysReg, VirtRegMap &vrm) {
1911  unsigned SpillReg = getRepresentativeReg(PhysReg);
1912
1913  DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
1914               << " represented by " << tri_->getName(SpillReg) << '\n');
1915
1916  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1917    // If there are registers which alias PhysReg, but which are not a
1918    // sub-register of the chosen representative super register. Assert
1919    // since we can't handle it yet.
1920    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1921           tri_->isSuperRegister(*AS, SpillReg));
1922
1923  bool Cut = false;
1924  SmallVector<unsigned, 4> PRegs;
1925  if (hasInterval(SpillReg))
1926    PRegs.push_back(SpillReg);
1927  for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
1928    if (hasInterval(*SR))
1929      PRegs.push_back(*SR);
1930
1931  DEBUG({
1932    dbgs() << "Trying to spill:";
1933    for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
1934      dbgs() << ' ' << tri_->getName(PRegs[i]);
1935    dbgs() << '\n';
1936  });
1937
1938  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1939  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1940         E = mri_->reg_end(); I != E; ++I) {
1941    MachineOperand &O = I.getOperand();
1942    MachineInstr *MI = O.getParent();
1943    if (MI->isDebugValue() || SeenMIs.count(MI))
1944      continue;
1945    SeenMIs.insert(MI);
1946    SlotIndex Index = getInstructionIndex(MI);
1947    bool LiveReg = false;
1948    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1949      unsigned PReg = PRegs[i];
1950      LiveInterval &pli = getInterval(PReg);
1951      if (!pli.liveAt(Index))
1952        continue;
1953      LiveReg = true;
1954      SlotIndex StartIdx = Index.getLoadIndex();
1955      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1956      if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
1957        std::string msg;
1958        raw_string_ostream Msg(msg);
1959        Msg << "Ran out of registers during register allocation!";
1960        if (MI->isInlineAsm()) {
1961          Msg << "\nPlease check your inline asm statement for invalid "
1962              << "constraints:\n";
1963          MI->print(Msg, tm_);
1964        }
1965        report_fatal_error(Msg.str());
1966      }
1967      pli.removeRange(StartIdx, EndIdx);
1968      LiveReg = true;
1969    }
1970    if (!LiveReg)
1971      continue;
1972    DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
1973    vrm.addEmergencySpill(SpillReg, MI);
1974    Cut = true;
1975  }
1976  return Cut;
1977}
1978
1979LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1980                                                  MachineInstr* startInst) {
1981  LiveInterval& Interval = getOrCreateInterval(reg);
1982  VNInfo* VN = Interval.getNextValue(
1983    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1984    startInst, getVNInfoAllocator());
1985  VN->setHasPHIKill(true);
1986  LiveRange LR(
1987     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1988     getMBBEndIdx(startInst->getParent()), VN);
1989  Interval.addRange(LR);
1990
1991  return LR;
1992}
1993
1994