LiveIntervalAnalysis.cpp revision 6ffa1e38380bb76d491a690f831a897627193dee
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.DestroyAll();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << "    \t" << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it checks for sub-register reference and it can check use as well.
223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224                                            unsigned Reg, bool CheckUse,
225                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226  for (LiveInterval::Ranges::const_iterator
227         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228    for (SlotIndex index = I->start.getBaseIndex(),
229           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230           index != end;
231           index = index.getNextIndex()) {
232      MachineInstr *MI = getInstructionFromIndex(index);
233      if (!MI)
234        continue;               // skip deleted instructions
235
236      if (JoinedCopies.count(MI))
237        continue;
238      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239        MachineOperand& MO = MI->getOperand(i);
240        if (!MO.isReg())
241          continue;
242        if (MO.isUse() && !CheckUse)
243          continue;
244        unsigned PhysReg = MO.getReg();
245        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246          continue;
247        if (tri_->isSubRegister(Reg, PhysReg))
248          return true;
249      }
250    }
251  }
252
253  return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258  if (TargetRegisterInfo::isPhysicalRegister(reg))
259    dbgs() << tri_->getName(reg);
260  else
261    dbgs() << "%reg" << reg;
262}
263#endif
264
265static
266bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267  unsigned Reg = MI.getOperand(MOIdx).getReg();
268  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269    const MachineOperand &MO = MI.getOperand(i);
270    if (!MO.isReg())
271      continue;
272    if (MO.getReg() == Reg && MO.isDef()) {
273      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274             MI.getOperand(MOIdx).getSubReg() &&
275             MO.getSubReg());
276      return true;
277    }
278  }
279  return false;
280}
281
282/// isPartialRedef - Return true if the specified def at the specific index is
283/// partially re-defining the specified live interval. A common case of this is
284/// a definition of the sub-register.
285bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286                                   LiveInterval &interval) {
287  if (!MO.getSubReg() || MO.isEarlyClobber())
288    return false;
289
290  SlotIndex RedefIndex = MIIdx.getDefIndex();
291  const LiveRange *OldLR =
292    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293  if (OldLR->valno->isDefAccurate()) {
294    MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
296  }
297  return false;
298}
299
300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301                                             MachineBasicBlock::iterator mi,
302                                             SlotIndex MIIdx,
303                                             MachineOperand& MO,
304                                             unsigned MOIdx,
305                                             LiveInterval &interval) {
306  DEBUG({
307      dbgs() << "\t\tregister: ";
308      printRegName(interval.reg, tri_);
309    });
310
311  // Virtual registers may be defined multiple times (due to phi
312  // elimination and 2-addr elimination).  Much of what we do only has to be
313  // done once for the vreg.  We use an empty interval to detect the first
314  // time we see a vreg.
315  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316  if (interval.empty()) {
317    // Get the Idx of the defining instructions.
318    SlotIndex defIndex = MIIdx.getDefIndex();
319    // Earlyclobbers move back one, so that they overlap the live range
320    // of inputs.
321    if (MO.isEarlyClobber())
322      defIndex = MIIdx.getUseIndex();
323    MachineInstr *CopyMI = NULL;
324    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
325    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
326        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
327      CopyMI = mi;
328
329    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
330                                          VNInfoAllocator);
331    assert(ValNo->id == 0 && "First value in interval is not 0?");
332
333    // Loop over all of the blocks that the vreg is defined in.  There are
334    // two cases we have to handle here.  The most common case is a vreg
335    // whose lifetime is contained within a basic block.  In this case there
336    // will be a single kill, in MBB, which comes after the definition.
337    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338      // FIXME: what about dead vars?
339      SlotIndex killIdx;
340      if (vi.Kills[0] != mi)
341        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
342      else
343        killIdx = defIndex.getStoreIndex();
344
345      // If the kill happens after the definition, we have an intra-block
346      // live range.
347      if (killIdx > defIndex) {
348        assert(vi.AliveBlocks.empty() &&
349               "Shouldn't be alive across any blocks!");
350        LiveRange LR(defIndex, killIdx, ValNo);
351        interval.addRange(LR);
352        DEBUG(dbgs() << " +" << LR << "\n");
353        ValNo->addKill(killIdx);
354        return;
355      }
356    }
357
358    // The other case we handle is when a virtual register lives to the end
359    // of the defining block, potentially live across some blocks, then is
360    // live into some number of blocks, but gets killed.  Start by adding a
361    // range that goes from this definition to the end of the defining block.
362    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
363    DEBUG(dbgs() << " +" << NewLR);
364    interval.addRange(NewLR);
365
366    bool PHIJoin = lv_->isPHIJoin(interval.reg);
367
368    if (PHIJoin) {
369      // A phi join register is killed at the end of the MBB and revived as a new
370      // valno in the killing blocks.
371      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
372      DEBUG(dbgs() << " phi-join");
373      ValNo->addKill(indexes_->getTerminatorGap(mbb));
374      ValNo->setHasPHIKill(true);
375    } else {
376      // Iterate over all of the blocks that the variable is completely
377      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
378      // live interval.
379      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
380               E = vi.AliveBlocks.end(); I != E; ++I) {
381        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
382        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
383        interval.addRange(LR);
384        DEBUG(dbgs() << " +" << LR);
385      }
386    }
387
388    // Finally, this virtual register is live from the start of any killing
389    // block to the 'use' slot of the killing instruction.
390    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
391      MachineInstr *Kill = vi.Kills[i];
392      SlotIndex Start = getMBBStartIdx(Kill->getParent());
393      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
394
395      // Create interval with one of a NEW value number.  Note that this value
396      // number isn't actually defined by an instruction, weird huh? :)
397      if (PHIJoin) {
398        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
399                                      VNInfoAllocator);
400        ValNo->setIsPHIDef(true);
401      }
402      LiveRange LR(Start, killIdx, ValNo);
403      interval.addRange(LR);
404      ValNo->addKill(killIdx);
405      DEBUG(dbgs() << " +" << LR);
406    }
407
408  } else {
409    if (MultipleDefsBySameMI(*mi, MOIdx))
410      // Multiple defs of the same virtual register by the same instruction.
411      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412      // This is likely due to elimination of REG_SEQUENCE instructions. Return
413      // here since there is nothing to do.
414      return;
415
416    // If this is the second time we see a virtual register definition, it
417    // must be due to phi elimination or two addr elimination.  If this is
418    // the result of two address elimination, then the vreg is one of the
419    // def-and-use register operand.
420
421    // It may also be partial redef like this:
422    // 80	%reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423    // 120	%reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426      // If this is a two-address definition, then we have already processed
427      // the live range.  The only problem is that we didn't realize there
428      // are actually two values in the live interval.  Because of this we
429      // need to take the LiveRegion that defines this register and split it
430      // into two values.
431      // Two-address vregs should always only be redefined once.  This means
432      // that at this point, there should be exactly one value number in it.
433      assert((PartReDef || interval.containsOneValue()) &&
434             "Unexpected 2-addr liveint!");
435      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
436      SlotIndex RedefIndex = MIIdx.getDefIndex();
437      if (MO.isEarlyClobber())
438        RedefIndex = MIIdx.getUseIndex();
439
440      const LiveRange *OldLR =
441        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
442      VNInfo *OldValNo = OldLR->valno;
443
444      // Delete the initial value, which should be short and continuous,
445      // because the 2-addr copy must be in the same MBB as the redef.
446      interval.removeRange(DefIndex, RedefIndex);
447
448      // The new value number (#1) is defined by the instruction we claimed
449      // defined value #0.
450      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
451                                            false, // update at *
452                                            VNInfoAllocator);
453      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
454
455      // Value#0 is now defined by the 2-addr instruction.
456      OldValNo->def  = RedefIndex;
457      OldValNo->setCopy(0);
458
459      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
460      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461      if (PartReDef &&
462          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
463        OldValNo->setCopy(&*mi);
464
465      // Add the new live interval which replaces the range for the input copy.
466      LiveRange LR(DefIndex, RedefIndex, ValNo);
467      DEBUG(dbgs() << " replace range with " << LR);
468      interval.addRange(LR);
469      ValNo->addKill(RedefIndex);
470
471      // If this redefinition is dead, we need to add a dummy unit live
472      // range covering the def slot.
473      if (MO.isDead())
474        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
475                                    OldValNo));
476
477      DEBUG({
478          dbgs() << " RESULT: ";
479          interval.print(dbgs(), tri_);
480        });
481    } else if (lv_->isPHIJoin(interval.reg)) {
482      // In the case of PHI elimination, each variable definition is only
483      // live until the end of the block.  We've already taken care of the
484      // rest of the live range.
485
486      SlotIndex defIndex = MIIdx.getDefIndex();
487      if (MO.isEarlyClobber())
488        defIndex = MIIdx.getUseIndex();
489
490      VNInfo *ValNo;
491      MachineInstr *CopyMI = NULL;
492      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
493      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
494          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
495        CopyMI = mi;
496      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
497
498      SlotIndex killIndex = getMBBEndIdx(mbb);
499      LiveRange LR(defIndex, killIndex, ValNo);
500      interval.addRange(LR);
501      ValNo->addKill(indexes_->getTerminatorGap(mbb));
502      ValNo->setHasPHIKill(true);
503      DEBUG(dbgs() << " phi-join +" << LR);
504    } else {
505      llvm_unreachable("Multiply defined register");
506    }
507  }
508
509  DEBUG(dbgs() << '\n');
510}
511
512void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
513                                              MachineBasicBlock::iterator mi,
514                                              SlotIndex MIIdx,
515                                              MachineOperand& MO,
516                                              LiveInterval &interval,
517                                              MachineInstr *CopyMI) {
518  // A physical register cannot be live across basic block, so its
519  // lifetime must end somewhere in its defining basic block.
520  DEBUG({
521      dbgs() << "\t\tregister: ";
522      printRegName(interval.reg, tri_);
523    });
524
525  SlotIndex baseIndex = MIIdx;
526  SlotIndex start = baseIndex.getDefIndex();
527  // Earlyclobbers move back one.
528  if (MO.isEarlyClobber())
529    start = MIIdx.getUseIndex();
530  SlotIndex end = start;
531
532  // If it is not used after definition, it is considered dead at
533  // the instruction defining it. Hence its interval is:
534  // [defSlot(def), defSlot(def)+1)
535  // For earlyclobbers, the defSlot was pushed back one; the extra
536  // advance below compensates.
537  if (MO.isDead()) {
538    DEBUG(dbgs() << " dead");
539    end = start.getStoreIndex();
540    goto exit;
541  }
542
543  // If it is not dead on definition, it must be killed by a
544  // subsequent instruction. Hence its interval is:
545  // [defSlot(def), useSlot(kill)+1)
546  baseIndex = baseIndex.getNextIndex();
547  while (++mi != MBB->end()) {
548
549    if (mi->isDebugValue())
550      continue;
551    if (getInstructionFromIndex(baseIndex) == 0)
552      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
553
554    if (mi->killsRegister(interval.reg, tri_)) {
555      DEBUG(dbgs() << " killed");
556      end = baseIndex.getDefIndex();
557      goto exit;
558    } else {
559      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
560      if (DefIdx != -1) {
561        if (mi->isRegTiedToUseOperand(DefIdx)) {
562          // Two-address instruction.
563          end = baseIndex.getDefIndex();
564        } else {
565          // Another instruction redefines the register before it is ever read.
566          // Then the register is essentially dead at the instruction that
567          // defines it. Hence its interval is:
568          // [defSlot(def), defSlot(def)+1)
569          DEBUG(dbgs() << " dead");
570          end = start.getStoreIndex();
571        }
572        goto exit;
573      }
574    }
575
576    baseIndex = baseIndex.getNextIndex();
577  }
578
579  // The only case we should have a dead physreg here without a killing or
580  // instruction where we know it's dead is if it is live-in to the function
581  // and never used. Another possible case is the implicit use of the
582  // physical register has been deleted by two-address pass.
583  end = start.getStoreIndex();
584
585exit:
586  assert(start < end && "did not find end of interval?");
587
588  // Already exists? Extend old live interval.
589  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
590  bool Extend = OldLR != interval.end();
591  VNInfo *ValNo = Extend
592    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
593  if (MO.isEarlyClobber() && Extend)
594    ValNo->setHasRedefByEC(true);
595  LiveRange LR(start, end, ValNo);
596  interval.addRange(LR);
597  LR.valno->addKill(end);
598  DEBUG(dbgs() << " +" << LR << '\n');
599}
600
601void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
602                                      MachineBasicBlock::iterator MI,
603                                      SlotIndex MIIdx,
604                                      MachineOperand& MO,
605                                      unsigned MOIdx) {
606  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
607    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
608                             getOrCreateInterval(MO.getReg()));
609  else if (allocatableRegs_[MO.getReg()]) {
610    MachineInstr *CopyMI = NULL;
611    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
612    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
613        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
614      CopyMI = MI;
615    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
616                              getOrCreateInterval(MO.getReg()), CopyMI);
617    // Def of a register also defines its sub-registers.
618    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
619      // If MI also modifies the sub-register explicitly, avoid processing it
620      // more than once. Do not pass in TRI here so it checks for exact match.
621      if (!MI->modifiesRegister(*AS))
622        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
623                                  getOrCreateInterval(*AS), 0);
624  }
625}
626
627void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
628                                         SlotIndex MIIdx,
629                                         LiveInterval &interval, bool isAlias) {
630  DEBUG({
631      dbgs() << "\t\tlivein register: ";
632      printRegName(interval.reg, tri_);
633    });
634
635  // Look for kills, if it reaches a def before it's killed, then it shouldn't
636  // be considered a livein.
637  MachineBasicBlock::iterator mi = MBB->begin();
638  MachineBasicBlock::iterator E = MBB->end();
639  // Skip over DBG_VALUE at the start of the MBB.
640  if (mi != E && mi->isDebugValue()) {
641    while (++mi != E && mi->isDebugValue())
642      ;
643    if (mi == E)
644      // MBB is empty except for DBG_VALUE's.
645      return;
646  }
647
648  SlotIndex baseIndex = MIIdx;
649  SlotIndex start = baseIndex;
650  if (getInstructionFromIndex(baseIndex) == 0)
651    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
652
653  SlotIndex end = baseIndex;
654  bool SeenDefUse = false;
655
656  while (mi != E) {
657    if (mi->killsRegister(interval.reg, tri_)) {
658      DEBUG(dbgs() << " killed");
659      end = baseIndex.getDefIndex();
660      SeenDefUse = true;
661      break;
662    } else if (mi->modifiesRegister(interval.reg, tri_)) {
663      // Another instruction redefines the register before it is ever read.
664      // Then the register is essentially dead at the instruction that defines
665      // it. Hence its interval is:
666      // [defSlot(def), defSlot(def)+1)
667      DEBUG(dbgs() << " dead");
668      end = start.getStoreIndex();
669      SeenDefUse = true;
670      break;
671    }
672
673    while (++mi != E && mi->isDebugValue())
674      // Skip over DBG_VALUE.
675      ;
676    if (mi != E)
677      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
678  }
679
680  // Live-in register might not be used at all.
681  if (!SeenDefUse) {
682    if (isAlias) {
683      DEBUG(dbgs() << " dead");
684      end = MIIdx.getStoreIndex();
685    } else {
686      DEBUG(dbgs() << " live through");
687      end = baseIndex;
688    }
689  }
690
691  VNInfo *vni =
692    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
693                          0, false, VNInfoAllocator);
694  vni->setIsPHIDef(true);
695  LiveRange LR(start, end, vni);
696
697  interval.addRange(LR);
698  LR.valno->addKill(end);
699  DEBUG(dbgs() << " +" << LR << '\n');
700}
701
702/// computeIntervals - computes the live intervals for virtual
703/// registers. for some ordering of the machine instructions [1,N] a
704/// live interval is an interval [i, j) where 1 <= i <= j < N for
705/// which a variable is live
706void LiveIntervals::computeIntervals() {
707  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
708               << "********** Function: "
709               << ((Value*)mf_->getFunction())->getName() << '\n');
710
711  SmallVector<unsigned, 8> UndefUses;
712  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
713       MBBI != E; ++MBBI) {
714    MachineBasicBlock *MBB = MBBI;
715    if (MBB->empty())
716      continue;
717
718    // Track the index of the current machine instr.
719    SlotIndex MIIndex = getMBBStartIdx(MBB);
720    DEBUG(dbgs() << "BB#" << MBB->getNumber()
721          << ":\t\t# derived from " << MBB->getName() << "\n");
722
723    // Create intervals for live-ins to this BB first.
724    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
725           LE = MBB->livein_end(); LI != LE; ++LI) {
726      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
727      // Multiple live-ins can alias the same register.
728      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
729        if (!hasInterval(*AS))
730          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
731                               true);
732    }
733
734    // Skip over empty initial indices.
735    if (getInstructionFromIndex(MIIndex) == 0)
736      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
737
738    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
739         MI != miEnd; ++MI) {
740      DEBUG(dbgs() << MIIndex << "\t" << *MI);
741      if (MI->isDebugValue())
742        continue;
743
744      // Handle defs.
745      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
746        MachineOperand &MO = MI->getOperand(i);
747        if (!MO.isReg() || !MO.getReg())
748          continue;
749
750        // handle register defs - build intervals
751        if (MO.isDef())
752          handleRegisterDef(MBB, MI, MIIndex, MO, i);
753        else if (MO.isUndef())
754          UndefUses.push_back(MO.getReg());
755      }
756
757      // Move to the next instr slot.
758      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
759    }
760  }
761
762  // Create empty intervals for registers defined by implicit_def's (except
763  // for those implicit_def that define values which are liveout of their
764  // blocks.
765  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
766    unsigned UndefReg = UndefUses[i];
767    (void)getOrCreateInterval(UndefReg);
768  }
769}
770
771LiveInterval* LiveIntervals::createInterval(unsigned reg) {
772  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
773  return new LiveInterval(reg, Weight);
774}
775
776/// dupInterval - Duplicate a live interval. The caller is responsible for
777/// managing the allocated memory.
778LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
779  LiveInterval *NewLI = createInterval(li->reg);
780  NewLI->Copy(*li, mri_, getVNInfoAllocator());
781  return NewLI;
782}
783
784/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
785/// copy field and returns the source register that defines it.
786unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
787  if (!VNI->getCopy())
788    return 0;
789
790  if (VNI->getCopy()->isExtractSubreg()) {
791    // If it's extracting out of a physical register, return the sub-register.
792    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
793    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
794      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
795      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
796      if (SrcSubReg == DstSubReg)
797        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
798        // reg1034 can still be coalesced to EDX.
799        return Reg;
800      assert(DstSubReg == 0);
801      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
802    }
803    return Reg;
804  } else if (VNI->getCopy()->isInsertSubreg() ||
805             VNI->getCopy()->isSubregToReg())
806    return VNI->getCopy()->getOperand(2).getReg();
807
808  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
809  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
810    return SrcReg;
811  llvm_unreachable("Unrecognized copy instruction!");
812  return 0;
813}
814
815//===----------------------------------------------------------------------===//
816// Register allocator hooks.
817//
818
819/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
820/// allow one) virtual register operand, then its uses are implicitly using
821/// the register. Returns the virtual register.
822unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
823                                            MachineInstr *MI) const {
824  unsigned RegOp = 0;
825  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
826    MachineOperand &MO = MI->getOperand(i);
827    if (!MO.isReg() || !MO.isUse())
828      continue;
829    unsigned Reg = MO.getReg();
830    if (Reg == 0 || Reg == li.reg)
831      continue;
832
833    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
834        !allocatableRegs_[Reg])
835      continue;
836    // FIXME: For now, only remat MI with at most one register operand.
837    assert(!RegOp &&
838           "Can't rematerialize instruction with multiple register operand!");
839    RegOp = MO.getReg();
840#ifndef NDEBUG
841    break;
842#endif
843  }
844  return RegOp;
845}
846
847/// isValNoAvailableAt - Return true if the val# of the specified interval
848/// which reaches the given instruction also reaches the specified use index.
849bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
850                                       SlotIndex UseIdx) const {
851  SlotIndex Index = getInstructionIndex(MI);
852  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
853  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
854  return UI != li.end() && UI->valno == ValNo;
855}
856
857/// isReMaterializable - Returns true if the definition MI of the specified
858/// val# of the specified interval is re-materializable.
859bool LiveIntervals::isReMaterializable(const LiveInterval &li,
860                                       const VNInfo *ValNo, MachineInstr *MI,
861                                       SmallVectorImpl<LiveInterval*> &SpillIs,
862                                       bool &isLoad) {
863  if (DisableReMat)
864    return false;
865
866  if (!tii_->isTriviallyReMaterializable(MI, aa_))
867    return false;
868
869  // Target-specific code can mark an instruction as being rematerializable
870  // if it has one virtual reg use, though it had better be something like
871  // a PIC base register which is likely to be live everywhere.
872  unsigned ImpUse = getReMatImplicitUse(li, MI);
873  if (ImpUse) {
874    const LiveInterval &ImpLi = getInterval(ImpUse);
875    for (MachineRegisterInfo::use_nodbg_iterator
876           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
877         ri != re; ++ri) {
878      MachineInstr *UseMI = &*ri;
879      SlotIndex UseIdx = getInstructionIndex(UseMI);
880      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
881        continue;
882      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
883        return false;
884    }
885
886    // If a register operand of the re-materialized instruction is going to
887    // be spilled next, then it's not legal to re-materialize this instruction.
888    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
889      if (ImpUse == SpillIs[i]->reg)
890        return false;
891  }
892  return true;
893}
894
895/// isReMaterializable - Returns true if the definition MI of the specified
896/// val# of the specified interval is re-materializable.
897bool LiveIntervals::isReMaterializable(const LiveInterval &li,
898                                       const VNInfo *ValNo, MachineInstr *MI) {
899  SmallVector<LiveInterval*, 4> Dummy1;
900  bool Dummy2;
901  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
902}
903
904/// isReMaterializable - Returns true if every definition of MI of every
905/// val# of the specified interval is re-materializable.
906bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907                                       SmallVectorImpl<LiveInterval*> &SpillIs,
908                                       bool &isLoad) {
909  isLoad = false;
910  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
911       i != e; ++i) {
912    const VNInfo *VNI = *i;
913    if (VNI->isUnused())
914      continue; // Dead val#.
915    // Is the def for the val# rematerializable?
916    if (!VNI->isDefAccurate())
917      return false;
918    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
919    bool DefIsLoad = false;
920    if (!ReMatDefMI ||
921        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
922      return false;
923    isLoad |= DefIsLoad;
924  }
925  return true;
926}
927
928/// FilterFoldedOps - Filter out two-address use operands. Return
929/// true if it finds any issue with the operands that ought to prevent
930/// folding.
931static bool FilterFoldedOps(MachineInstr *MI,
932                            SmallVector<unsigned, 2> &Ops,
933                            unsigned &MRInfo,
934                            SmallVector<unsigned, 2> &FoldOps) {
935  MRInfo = 0;
936  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
937    unsigned OpIdx = Ops[i];
938    MachineOperand &MO = MI->getOperand(OpIdx);
939    // FIXME: fold subreg use.
940    if (MO.getSubReg())
941      return true;
942    if (MO.isDef())
943      MRInfo |= (unsigned)VirtRegMap::isMod;
944    else {
945      // Filter out two-address use operand(s).
946      if (MI->isRegTiedToDefOperand(OpIdx)) {
947        MRInfo = VirtRegMap::isModRef;
948        continue;
949      }
950      MRInfo |= (unsigned)VirtRegMap::isRef;
951    }
952    FoldOps.push_back(OpIdx);
953  }
954  return false;
955}
956
957
958/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
959/// slot / to reg or any rematerialized load into ith operand of specified
960/// MI. If it is successul, MI is updated with the newly created MI and
961/// returns true.
962bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
963                                         VirtRegMap &vrm, MachineInstr *DefMI,
964                                         SlotIndex InstrIdx,
965                                         SmallVector<unsigned, 2> &Ops,
966                                         bool isSS, int Slot, unsigned Reg) {
967  // If it is an implicit def instruction, just delete it.
968  if (MI->isImplicitDef()) {
969    RemoveMachineInstrFromMaps(MI);
970    vrm.RemoveMachineInstrFromMaps(MI);
971    MI->eraseFromParent();
972    ++numFolds;
973    return true;
974  }
975
976  // Filter the list of operand indexes that are to be folded. Abort if
977  // any operand will prevent folding.
978  unsigned MRInfo = 0;
979  SmallVector<unsigned, 2> FoldOps;
980  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
981    return false;
982
983  // The only time it's safe to fold into a two address instruction is when
984  // it's folding reload and spill from / into a spill stack slot.
985  if (DefMI && (MRInfo & VirtRegMap::isMod))
986    return false;
987
988  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
989                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
990  if (fmi) {
991    // Remember this instruction uses the spill slot.
992    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
993
994    // Attempt to fold the memory reference into the instruction. If
995    // we can do this, we don't need to insert spill code.
996    MachineBasicBlock &MBB = *MI->getParent();
997    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
998      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
999    vrm.transferSpillPts(MI, fmi);
1000    vrm.transferRestorePts(MI, fmi);
1001    vrm.transferEmergencySpills(MI, fmi);
1002    ReplaceMachineInstrInMaps(MI, fmi);
1003    MI = MBB.insert(MBB.erase(MI), fmi);
1004    ++numFolds;
1005    return true;
1006  }
1007  return false;
1008}
1009
1010/// canFoldMemoryOperand - Returns true if the specified load / store
1011/// folding is possible.
1012bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1013                                         SmallVector<unsigned, 2> &Ops,
1014                                         bool ReMat) const {
1015  // Filter the list of operand indexes that are to be folded. Abort if
1016  // any operand will prevent folding.
1017  unsigned MRInfo = 0;
1018  SmallVector<unsigned, 2> FoldOps;
1019  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1020    return false;
1021
1022  // It's only legal to remat for a use, not a def.
1023  if (ReMat && (MRInfo & VirtRegMap::isMod))
1024    return false;
1025
1026  return tii_->canFoldMemoryOperand(MI, FoldOps);
1027}
1028
1029bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1030  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1031
1032  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1033
1034  if (mbb == 0)
1035    return false;
1036
1037  for (++itr; itr != li.ranges.end(); ++itr) {
1038    MachineBasicBlock *mbb2 =
1039      indexes_->getMBBCoveringRange(itr->start, itr->end);
1040
1041    if (mbb2 != mbb)
1042      return false;
1043  }
1044
1045  return true;
1046}
1047
1048/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1049/// interval on to-be re-materialized operands of MI) with new register.
1050void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1051                                       MachineInstr *MI, unsigned NewVReg,
1052                                       VirtRegMap &vrm) {
1053  // There is an implicit use. That means one of the other operand is
1054  // being remat'ed and the remat'ed instruction has li.reg as an
1055  // use operand. Make sure we rewrite that as well.
1056  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1057    MachineOperand &MO = MI->getOperand(i);
1058    if (!MO.isReg())
1059      continue;
1060    unsigned Reg = MO.getReg();
1061    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1062      continue;
1063    if (!vrm.isReMaterialized(Reg))
1064      continue;
1065    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1066    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1067    if (UseMO)
1068      UseMO->setReg(NewVReg);
1069  }
1070}
1071
1072/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1073/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1074bool LiveIntervals::
1075rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1076                 bool TrySplit, SlotIndex index, SlotIndex end,
1077                 MachineInstr *MI,
1078                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1079                 unsigned Slot, int LdSlot,
1080                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1081                 VirtRegMap &vrm,
1082                 const TargetRegisterClass* rc,
1083                 SmallVector<int, 4> &ReMatIds,
1084                 const MachineLoopInfo *loopInfo,
1085                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1086                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1087                 std::vector<LiveInterval*> &NewLIs) {
1088  bool CanFold = false;
1089 RestartInstruction:
1090  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1091    MachineOperand& mop = MI->getOperand(i);
1092    if (!mop.isReg())
1093      continue;
1094    unsigned Reg = mop.getReg();
1095    unsigned RegI = Reg;
1096    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1097      continue;
1098    if (Reg != li.reg)
1099      continue;
1100
1101    bool TryFold = !DefIsReMat;
1102    bool FoldSS = true; // Default behavior unless it's a remat.
1103    int FoldSlot = Slot;
1104    if (DefIsReMat) {
1105      // If this is the rematerializable definition MI itself and
1106      // all of its uses are rematerialized, simply delete it.
1107      if (MI == ReMatOrigDefMI && CanDelete) {
1108        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1109                     << *MI << '\n');
1110        RemoveMachineInstrFromMaps(MI);
1111        vrm.RemoveMachineInstrFromMaps(MI);
1112        MI->eraseFromParent();
1113        break;
1114      }
1115
1116      // If def for this use can't be rematerialized, then try folding.
1117      // If def is rematerializable and it's a load, also try folding.
1118      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1119      if (isLoad) {
1120        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1121        FoldSS = isLoadSS;
1122        FoldSlot = LdSlot;
1123      }
1124    }
1125
1126    // Scan all of the operands of this instruction rewriting operands
1127    // to use NewVReg instead of li.reg as appropriate.  We do this for
1128    // two reasons:
1129    //
1130    //   1. If the instr reads the same spilled vreg multiple times, we
1131    //      want to reuse the NewVReg.
1132    //   2. If the instr is a two-addr instruction, we are required to
1133    //      keep the src/dst regs pinned.
1134    //
1135    // Keep track of whether we replace a use and/or def so that we can
1136    // create the spill interval with the appropriate range.
1137
1138    HasUse = mop.isUse();
1139    HasDef = mop.isDef();
1140    SmallVector<unsigned, 2> Ops;
1141    Ops.push_back(i);
1142    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1143      const MachineOperand &MOj = MI->getOperand(j);
1144      if (!MOj.isReg())
1145        continue;
1146      unsigned RegJ = MOj.getReg();
1147      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1148        continue;
1149      if (RegJ == RegI) {
1150        Ops.push_back(j);
1151        if (!MOj.isUndef()) {
1152          HasUse |= MOj.isUse();
1153          HasDef |= MOj.isDef();
1154        }
1155      }
1156    }
1157
1158    // Create a new virtual register for the spill interval.
1159    // Create the new register now so we can map the fold instruction
1160    // to the new register so when it is unfolded we get the correct
1161    // answer.
1162    bool CreatedNewVReg = false;
1163    if (NewVReg == 0) {
1164      NewVReg = mri_->createVirtualRegister(rc);
1165      vrm.grow();
1166      CreatedNewVReg = true;
1167
1168      // The new virtual register should get the same allocation hints as the
1169      // old one.
1170      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1171      if (Hint.first || Hint.second)
1172        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1173    }
1174
1175    if (!TryFold)
1176      CanFold = false;
1177    else {
1178      // Do not fold load / store here if we are splitting. We'll find an
1179      // optimal point to insert a load / store later.
1180      if (!TrySplit) {
1181        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1182                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1183          // Folding the load/store can completely change the instruction in
1184          // unpredictable ways, rescan it from the beginning.
1185
1186          if (FoldSS) {
1187            // We need to give the new vreg the same stack slot as the
1188            // spilled interval.
1189            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1190          }
1191
1192          HasUse = false;
1193          HasDef = false;
1194          CanFold = false;
1195          if (isNotInMIMap(MI))
1196            break;
1197          goto RestartInstruction;
1198        }
1199      } else {
1200        // We'll try to fold it later if it's profitable.
1201        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1202      }
1203    }
1204
1205    mop.setReg(NewVReg);
1206    if (mop.isImplicit())
1207      rewriteImplicitOps(li, MI, NewVReg, vrm);
1208
1209    // Reuse NewVReg for other reads.
1210    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1211      MachineOperand &mopj = MI->getOperand(Ops[j]);
1212      mopj.setReg(NewVReg);
1213      if (mopj.isImplicit())
1214        rewriteImplicitOps(li, MI, NewVReg, vrm);
1215    }
1216
1217    if (CreatedNewVReg) {
1218      if (DefIsReMat) {
1219        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1220        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1221          // Each valnum may have its own remat id.
1222          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1223        } else {
1224          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1225        }
1226        if (!CanDelete || (HasUse && HasDef)) {
1227          // If this is a two-addr instruction then its use operands are
1228          // rematerializable but its def is not. It should be assigned a
1229          // stack slot.
1230          vrm.assignVirt2StackSlot(NewVReg, Slot);
1231        }
1232      } else {
1233        vrm.assignVirt2StackSlot(NewVReg, Slot);
1234      }
1235    } else if (HasUse && HasDef &&
1236               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1237      // If this interval hasn't been assigned a stack slot (because earlier
1238      // def is a deleted remat def), do it now.
1239      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1240      vrm.assignVirt2StackSlot(NewVReg, Slot);
1241    }
1242
1243    // Re-matting an instruction with virtual register use. Add the
1244    // register as an implicit use on the use MI.
1245    if (DefIsReMat && ImpUse)
1246      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1247
1248    // Create a new register interval for this spill / remat.
1249    LiveInterval &nI = getOrCreateInterval(NewVReg);
1250    if (CreatedNewVReg) {
1251      NewLIs.push_back(&nI);
1252      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1253      if (TrySplit)
1254        vrm.setIsSplitFromReg(NewVReg, li.reg);
1255    }
1256
1257    if (HasUse) {
1258      if (CreatedNewVReg) {
1259        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1260                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1261        DEBUG(dbgs() << " +" << LR);
1262        nI.addRange(LR);
1263      } else {
1264        // Extend the split live interval to this def / use.
1265        SlotIndex End = index.getDefIndex();
1266        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1267                     nI.getValNumInfo(nI.getNumValNums()-1));
1268        DEBUG(dbgs() << " +" << LR);
1269        nI.addRange(LR);
1270      }
1271    }
1272    if (HasDef) {
1273      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1274                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1275      DEBUG(dbgs() << " +" << LR);
1276      nI.addRange(LR);
1277    }
1278
1279    DEBUG({
1280        dbgs() << "\t\t\t\tAdded new interval: ";
1281        nI.print(dbgs(), tri_);
1282        dbgs() << '\n';
1283      });
1284  }
1285  return CanFold;
1286}
1287bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1288                                   const VNInfo *VNI,
1289                                   MachineBasicBlock *MBB,
1290                                   SlotIndex Idx) const {
1291  SlotIndex End = getMBBEndIdx(MBB);
1292  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1293    if (VNI->kills[j].isPHI())
1294      continue;
1295
1296    SlotIndex KillIdx = VNI->kills[j];
1297    if (KillIdx > Idx && KillIdx <= End)
1298      return true;
1299  }
1300  return false;
1301}
1302
1303/// RewriteInfo - Keep track of machine instrs that will be rewritten
1304/// during spilling.
1305namespace {
1306  struct RewriteInfo {
1307    SlotIndex Index;
1308    MachineInstr *MI;
1309    bool HasUse;
1310    bool HasDef;
1311    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1312      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1313  };
1314
1315  struct RewriteInfoCompare {
1316    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1317      return LHS.Index < RHS.Index;
1318    }
1319  };
1320}
1321
1322void LiveIntervals::
1323rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1324                    LiveInterval::Ranges::const_iterator &I,
1325                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1326                    unsigned Slot, int LdSlot,
1327                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1328                    VirtRegMap &vrm,
1329                    const TargetRegisterClass* rc,
1330                    SmallVector<int, 4> &ReMatIds,
1331                    const MachineLoopInfo *loopInfo,
1332                    BitVector &SpillMBBs,
1333                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1334                    BitVector &RestoreMBBs,
1335                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1336                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1337                    std::vector<LiveInterval*> &NewLIs) {
1338  bool AllCanFold = true;
1339  unsigned NewVReg = 0;
1340  SlotIndex start = I->start.getBaseIndex();
1341  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1342
1343  // First collect all the def / use in this live range that will be rewritten.
1344  // Make sure they are sorted according to instruction index.
1345  std::vector<RewriteInfo> RewriteMIs;
1346  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1347         re = mri_->reg_end(); ri != re; ) {
1348    MachineInstr *MI = &*ri;
1349    MachineOperand &O = ri.getOperand();
1350    ++ri;
1351    if (MI->isDebugValue()) {
1352      // Modify DBG_VALUE now that the value is in a spill slot.
1353      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1354        uint64_t Offset = MI->getOperand(1).getImm();
1355        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1356        DebugLoc DL = MI->getDebugLoc();
1357        int FI = isLoadSS ? LdSlot : (int)Slot;
1358        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1359                                                           Offset, MDPtr, DL)) {
1360          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1361          ReplaceMachineInstrInMaps(MI, NewDV);
1362          MachineBasicBlock *MBB = MI->getParent();
1363          MBB->insert(MBB->erase(MI), NewDV);
1364          continue;
1365        }
1366      }
1367
1368      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1369      RemoveMachineInstrFromMaps(MI);
1370      vrm.RemoveMachineInstrFromMaps(MI);
1371      MI->eraseFromParent();
1372      continue;
1373    }
1374    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1375    SlotIndex index = getInstructionIndex(MI);
1376    if (index < start || index >= end)
1377      continue;
1378
1379    if (O.isUndef())
1380      // Must be defined by an implicit def. It should not be spilled. Note,
1381      // this is for correctness reason. e.g.
1382      // 8   %reg1024<def> = IMPLICIT_DEF
1383      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1384      // The live range [12, 14) are not part of the r1024 live interval since
1385      // it's defined by an implicit def. It will not conflicts with live
1386      // interval of r1025. Now suppose both registers are spilled, you can
1387      // easily see a situation where both registers are reloaded before
1388      // the INSERT_SUBREG and both target registers that would overlap.
1389      continue;
1390    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1391  }
1392  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1393
1394  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1395  // Now rewrite the defs and uses.
1396  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1397    RewriteInfo &rwi = RewriteMIs[i];
1398    ++i;
1399    SlotIndex index = rwi.Index;
1400    bool MIHasUse = rwi.HasUse;
1401    bool MIHasDef = rwi.HasDef;
1402    MachineInstr *MI = rwi.MI;
1403    // If MI def and/or use the same register multiple times, then there
1404    // are multiple entries.
1405    unsigned NumUses = MIHasUse;
1406    while (i != e && RewriteMIs[i].MI == MI) {
1407      assert(RewriteMIs[i].Index == index);
1408      bool isUse = RewriteMIs[i].HasUse;
1409      if (isUse) ++NumUses;
1410      MIHasUse |= isUse;
1411      MIHasDef |= RewriteMIs[i].HasDef;
1412      ++i;
1413    }
1414    MachineBasicBlock *MBB = MI->getParent();
1415
1416    if (ImpUse && MI != ReMatDefMI) {
1417      // Re-matting an instruction with virtual register use. Prevent interval
1418      // from being spilled.
1419      getInterval(ImpUse).markNotSpillable();
1420    }
1421
1422    unsigned MBBId = MBB->getNumber();
1423    unsigned ThisVReg = 0;
1424    if (TrySplit) {
1425      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1426      if (NVI != MBBVRegsMap.end()) {
1427        ThisVReg = NVI->second;
1428        // One common case:
1429        // x = use
1430        // ...
1431        // ...
1432        // def = ...
1433        //     = use
1434        // It's better to start a new interval to avoid artifically
1435        // extend the new interval.
1436        if (MIHasDef && !MIHasUse) {
1437          MBBVRegsMap.erase(MBB->getNumber());
1438          ThisVReg = 0;
1439        }
1440      }
1441    }
1442
1443    bool IsNew = ThisVReg == 0;
1444    if (IsNew) {
1445      // This ends the previous live interval. If all of its def / use
1446      // can be folded, give it a low spill weight.
1447      if (NewVReg && TrySplit && AllCanFold) {
1448        LiveInterval &nI = getOrCreateInterval(NewVReg);
1449        nI.weight /= 10.0F;
1450      }
1451      AllCanFold = true;
1452    }
1453    NewVReg = ThisVReg;
1454
1455    bool HasDef = false;
1456    bool HasUse = false;
1457    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1458                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1459                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1460                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1461                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1462    if (!HasDef && !HasUse)
1463      continue;
1464
1465    AllCanFold &= CanFold;
1466
1467    // Update weight of spill interval.
1468    LiveInterval &nI = getOrCreateInterval(NewVReg);
1469    if (!TrySplit) {
1470      // The spill weight is now infinity as it cannot be spilled again.
1471      nI.markNotSpillable();
1472      continue;
1473    }
1474
1475    // Keep track of the last def and first use in each MBB.
1476    if (HasDef) {
1477      if (MI != ReMatOrigDefMI || !CanDelete) {
1478        bool HasKill = false;
1479        if (!HasUse)
1480          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1481        else {
1482          // If this is a two-address code, then this index starts a new VNInfo.
1483          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1484          if (VNI)
1485            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1486        }
1487        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1488          SpillIdxes.find(MBBId);
1489        if (!HasKill) {
1490          if (SII == SpillIdxes.end()) {
1491            std::vector<SRInfo> S;
1492            S.push_back(SRInfo(index, NewVReg, true));
1493            SpillIdxes.insert(std::make_pair(MBBId, S));
1494          } else if (SII->second.back().vreg != NewVReg) {
1495            SII->second.push_back(SRInfo(index, NewVReg, true));
1496          } else if (index > SII->second.back().index) {
1497            // If there is an earlier def and this is a two-address
1498            // instruction, then it's not possible to fold the store (which
1499            // would also fold the load).
1500            SRInfo &Info = SII->second.back();
1501            Info.index = index;
1502            Info.canFold = !HasUse;
1503          }
1504          SpillMBBs.set(MBBId);
1505        } else if (SII != SpillIdxes.end() &&
1506                   SII->second.back().vreg == NewVReg &&
1507                   index > SII->second.back().index) {
1508          // There is an earlier def that's not killed (must be two-address).
1509          // The spill is no longer needed.
1510          SII->second.pop_back();
1511          if (SII->second.empty()) {
1512            SpillIdxes.erase(MBBId);
1513            SpillMBBs.reset(MBBId);
1514          }
1515        }
1516      }
1517    }
1518
1519    if (HasUse) {
1520      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1521        SpillIdxes.find(MBBId);
1522      if (SII != SpillIdxes.end() &&
1523          SII->second.back().vreg == NewVReg &&
1524          index > SII->second.back().index)
1525        // Use(s) following the last def, it's not safe to fold the spill.
1526        SII->second.back().canFold = false;
1527      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1528        RestoreIdxes.find(MBBId);
1529      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1530        // If we are splitting live intervals, only fold if it's the first
1531        // use and there isn't another use later in the MBB.
1532        RII->second.back().canFold = false;
1533      else if (IsNew) {
1534        // Only need a reload if there isn't an earlier def / use.
1535        if (RII == RestoreIdxes.end()) {
1536          std::vector<SRInfo> Infos;
1537          Infos.push_back(SRInfo(index, NewVReg, true));
1538          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1539        } else {
1540          RII->second.push_back(SRInfo(index, NewVReg, true));
1541        }
1542        RestoreMBBs.set(MBBId);
1543      }
1544    }
1545
1546    // Update spill weight.
1547    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1548    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1549  }
1550
1551  if (NewVReg && TrySplit && AllCanFold) {
1552    // If all of its def / use can be folded, give it a low spill weight.
1553    LiveInterval &nI = getOrCreateInterval(NewVReg);
1554    nI.weight /= 10.0F;
1555  }
1556}
1557
1558bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1559                        unsigned vr, BitVector &RestoreMBBs,
1560                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1561  if (!RestoreMBBs[Id])
1562    return false;
1563  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1564  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1565    if (Restores[i].index == index &&
1566        Restores[i].vreg == vr &&
1567        Restores[i].canFold)
1568      return true;
1569  return false;
1570}
1571
1572void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1573                        unsigned vr, BitVector &RestoreMBBs,
1574                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1575  if (!RestoreMBBs[Id])
1576    return;
1577  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1578  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1579    if (Restores[i].index == index && Restores[i].vreg)
1580      Restores[i].index = SlotIndex();
1581}
1582
1583/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1584/// spilled and create empty intervals for their uses.
1585void
1586LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1587                                    const TargetRegisterClass* rc,
1588                                    std::vector<LiveInterval*> &NewLIs) {
1589  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1590         re = mri_->reg_end(); ri != re; ) {
1591    MachineOperand &O = ri.getOperand();
1592    MachineInstr *MI = &*ri;
1593    ++ri;
1594    if (MI->isDebugValue()) {
1595      // Remove debug info for now.
1596      O.setReg(0U);
1597      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1598      continue;
1599    }
1600    if (O.isDef()) {
1601      assert(MI->isImplicitDef() &&
1602             "Register def was not rewritten?");
1603      RemoveMachineInstrFromMaps(MI);
1604      vrm.RemoveMachineInstrFromMaps(MI);
1605      MI->eraseFromParent();
1606    } else {
1607      // This must be an use of an implicit_def so it's not part of the live
1608      // interval. Create a new empty live interval for it.
1609      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1610      unsigned NewVReg = mri_->createVirtualRegister(rc);
1611      vrm.grow();
1612      vrm.setIsImplicitlyDefined(NewVReg);
1613      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1614      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1615        MachineOperand &MO = MI->getOperand(i);
1616        if (MO.isReg() && MO.getReg() == li.reg) {
1617          MO.setReg(NewVReg);
1618          MO.setIsUndef();
1619        }
1620      }
1621    }
1622  }
1623}
1624
1625float
1626LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1627  // Limit the loop depth ridiculousness.
1628  if (loopDepth > 200)
1629    loopDepth = 200;
1630
1631  // The loop depth is used to roughly estimate the number of times the
1632  // instruction is executed. Something like 10^d is simple, but will quickly
1633  // overflow a float. This expression behaves like 10^d for small d, but is
1634  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1635  // headroom before overflow.
1636  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1637
1638  return (isDef + isUse) * lc;
1639}
1640
1641void
1642LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1643  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1644    normalizeSpillWeight(*NewLIs[i]);
1645}
1646
1647std::vector<LiveInterval*> LiveIntervals::
1648addIntervalsForSpillsFast(const LiveInterval &li,
1649                          const MachineLoopInfo *loopInfo,
1650                          VirtRegMap &vrm) {
1651  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1652
1653  std::vector<LiveInterval*> added;
1654
1655  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1656
1657  DEBUG({
1658      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1659      li.dump();
1660      dbgs() << '\n';
1661    });
1662
1663  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1664
1665  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1666  while (RI != mri_->reg_end()) {
1667    MachineInstr* MI = &*RI;
1668
1669    SmallVector<unsigned, 2> Indices;
1670    bool HasUse = false;
1671    bool HasDef = false;
1672
1673    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1674      MachineOperand& mop = MI->getOperand(i);
1675      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1676
1677      HasUse |= MI->getOperand(i).isUse();
1678      HasDef |= MI->getOperand(i).isDef();
1679
1680      Indices.push_back(i);
1681    }
1682
1683    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1684                              Indices, true, slot, li.reg)) {
1685      unsigned NewVReg = mri_->createVirtualRegister(rc);
1686      vrm.grow();
1687      vrm.assignVirt2StackSlot(NewVReg, slot);
1688
1689      // create a new register for this spill
1690      LiveInterval &nI = getOrCreateInterval(NewVReg);
1691      nI.markNotSpillable();
1692
1693      // Rewrite register operands to use the new vreg.
1694      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1695           E = Indices.end(); I != E; ++I) {
1696        MI->getOperand(*I).setReg(NewVReg);
1697
1698        if (MI->getOperand(*I).isUse())
1699          MI->getOperand(*I).setIsKill(true);
1700      }
1701
1702      // Fill in  the new live interval.
1703      SlotIndex index = getInstructionIndex(MI);
1704      if (HasUse) {
1705        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1706                     nI.getNextValue(SlotIndex(), 0, false,
1707                                     getVNInfoAllocator()));
1708        DEBUG(dbgs() << " +" << LR);
1709        nI.addRange(LR);
1710        vrm.addRestorePoint(NewVReg, MI);
1711      }
1712      if (HasDef) {
1713        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1714                     nI.getNextValue(SlotIndex(), 0, false,
1715                                     getVNInfoAllocator()));
1716        DEBUG(dbgs() << " +" << LR);
1717        nI.addRange(LR);
1718        vrm.addSpillPoint(NewVReg, true, MI);
1719      }
1720
1721      added.push_back(&nI);
1722
1723      DEBUG({
1724          dbgs() << "\t\t\t\tadded new interval: ";
1725          nI.dump();
1726          dbgs() << '\n';
1727        });
1728    }
1729
1730
1731    RI = mri_->reg_begin(li.reg);
1732  }
1733
1734  return added;
1735}
1736
1737std::vector<LiveInterval*> LiveIntervals::
1738addIntervalsForSpills(const LiveInterval &li,
1739                      SmallVectorImpl<LiveInterval*> &SpillIs,
1740                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1741
1742  if (EnableFastSpilling)
1743    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1744
1745  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1746
1747  DEBUG({
1748      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1749      li.print(dbgs(), tri_);
1750      dbgs() << '\n';
1751    });
1752
1753  // Each bit specify whether a spill is required in the MBB.
1754  BitVector SpillMBBs(mf_->getNumBlockIDs());
1755  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1756  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1757  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1758  DenseMap<unsigned,unsigned> MBBVRegsMap;
1759  std::vector<LiveInterval*> NewLIs;
1760  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1761
1762  unsigned NumValNums = li.getNumValNums();
1763  SmallVector<MachineInstr*, 4> ReMatDefs;
1764  ReMatDefs.resize(NumValNums, NULL);
1765  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1766  ReMatOrigDefs.resize(NumValNums, NULL);
1767  SmallVector<int, 4> ReMatIds;
1768  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1769  BitVector ReMatDelete(NumValNums);
1770  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1771
1772  // Spilling a split live interval. It cannot be split any further. Also,
1773  // it's also guaranteed to be a single val# / range interval.
1774  if (vrm.getPreSplitReg(li.reg)) {
1775    vrm.setIsSplitFromReg(li.reg, 0);
1776    // Unset the split kill marker on the last use.
1777    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1778    if (KillIdx != SlotIndex()) {
1779      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1780      assert(KillMI && "Last use disappeared?");
1781      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1782      assert(KillOp != -1 && "Last use disappeared?");
1783      KillMI->getOperand(KillOp).setIsKill(false);
1784    }
1785    vrm.removeKillPoint(li.reg);
1786    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1787    Slot = vrm.getStackSlot(li.reg);
1788    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1789    MachineInstr *ReMatDefMI = DefIsReMat ?
1790      vrm.getReMaterializedMI(li.reg) : NULL;
1791    int LdSlot = 0;
1792    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1793    bool isLoad = isLoadSS ||
1794      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1795    bool IsFirstRange = true;
1796    for (LiveInterval::Ranges::const_iterator
1797           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1798      // If this is a split live interval with multiple ranges, it means there
1799      // are two-address instructions that re-defined the value. Only the
1800      // first def can be rematerialized!
1801      if (IsFirstRange) {
1802        // Note ReMatOrigDefMI has already been deleted.
1803        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1804                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1805                             false, vrm, rc, ReMatIds, loopInfo,
1806                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1807                             MBBVRegsMap, NewLIs);
1808      } else {
1809        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1810                             Slot, 0, false, false, false,
1811                             false, vrm, rc, ReMatIds, loopInfo,
1812                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1813                             MBBVRegsMap, NewLIs);
1814      }
1815      IsFirstRange = false;
1816    }
1817
1818    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1819    normalizeSpillWeights(NewLIs);
1820    return NewLIs;
1821  }
1822
1823  bool TrySplit = !intervalIsInOneMBB(li);
1824  if (TrySplit)
1825    ++numSplits;
1826  bool NeedStackSlot = false;
1827  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1828       i != e; ++i) {
1829    const VNInfo *VNI = *i;
1830    unsigned VN = VNI->id;
1831    if (VNI->isUnused())
1832      continue; // Dead val#.
1833    // Is the def for the val# rematerializable?
1834    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1835      ? getInstructionFromIndex(VNI->def) : 0;
1836    bool dummy;
1837    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1838      // Remember how to remat the def of this val#.
1839      ReMatOrigDefs[VN] = ReMatDefMI;
1840      // Original def may be modified so we have to make a copy here.
1841      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1842      CloneMIs.push_back(Clone);
1843      ReMatDefs[VN] = Clone;
1844
1845      bool CanDelete = true;
1846      if (VNI->hasPHIKill()) {
1847        // A kill is a phi node, not all of its uses can be rematerialized.
1848        // It must not be deleted.
1849        CanDelete = false;
1850        // Need a stack slot if there is any live range where uses cannot be
1851        // rematerialized.
1852        NeedStackSlot = true;
1853      }
1854      if (CanDelete)
1855        ReMatDelete.set(VN);
1856    } else {
1857      // Need a stack slot if there is any live range where uses cannot be
1858      // rematerialized.
1859      NeedStackSlot = true;
1860    }
1861  }
1862
1863  // One stack slot per live interval.
1864  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1865    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1866      Slot = vrm.assignVirt2StackSlot(li.reg);
1867
1868    // This case only occurs when the prealloc splitter has already assigned
1869    // a stack slot to this vreg.
1870    else
1871      Slot = vrm.getStackSlot(li.reg);
1872  }
1873
1874  // Create new intervals and rewrite defs and uses.
1875  for (LiveInterval::Ranges::const_iterator
1876         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1877    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1878    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1879    bool DefIsReMat = ReMatDefMI != NULL;
1880    bool CanDelete = ReMatDelete[I->valno->id];
1881    int LdSlot = 0;
1882    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1883    bool isLoad = isLoadSS ||
1884      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1885    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1886                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1887                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1888                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1889                               MBBVRegsMap, NewLIs);
1890  }
1891
1892  // Insert spills / restores if we are splitting.
1893  if (!TrySplit) {
1894    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1895    normalizeSpillWeights(NewLIs);
1896    return NewLIs;
1897  }
1898
1899  SmallPtrSet<LiveInterval*, 4> AddedKill;
1900  SmallVector<unsigned, 2> Ops;
1901  if (NeedStackSlot) {
1902    int Id = SpillMBBs.find_first();
1903    while (Id != -1) {
1904      std::vector<SRInfo> &spills = SpillIdxes[Id];
1905      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1906        SlotIndex index = spills[i].index;
1907        unsigned VReg = spills[i].vreg;
1908        LiveInterval &nI = getOrCreateInterval(VReg);
1909        bool isReMat = vrm.isReMaterialized(VReg);
1910        MachineInstr *MI = getInstructionFromIndex(index);
1911        bool CanFold = false;
1912        bool FoundUse = false;
1913        Ops.clear();
1914        if (spills[i].canFold) {
1915          CanFold = true;
1916          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1917            MachineOperand &MO = MI->getOperand(j);
1918            if (!MO.isReg() || MO.getReg() != VReg)
1919              continue;
1920
1921            Ops.push_back(j);
1922            if (MO.isDef())
1923              continue;
1924            if (isReMat ||
1925                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1926                                                RestoreMBBs, RestoreIdxes))) {
1927              // MI has two-address uses of the same register. If the use
1928              // isn't the first and only use in the BB, then we can't fold
1929              // it. FIXME: Move this to rewriteInstructionsForSpills.
1930              CanFold = false;
1931              break;
1932            }
1933            FoundUse = true;
1934          }
1935        }
1936        // Fold the store into the def if possible.
1937        bool Folded = false;
1938        if (CanFold && !Ops.empty()) {
1939          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1940            Folded = true;
1941            if (FoundUse) {
1942              // Also folded uses, do not issue a load.
1943              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1944              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1945            }
1946            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1947          }
1948        }
1949
1950        // Otherwise tell the spiller to issue a spill.
1951        if (!Folded) {
1952          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1953          bool isKill = LR->end == index.getStoreIndex();
1954          if (!MI->registerDefIsDead(nI.reg))
1955            // No need to spill a dead def.
1956            vrm.addSpillPoint(VReg, isKill, MI);
1957          if (isKill)
1958            AddedKill.insert(&nI);
1959        }
1960      }
1961      Id = SpillMBBs.find_next(Id);
1962    }
1963  }
1964
1965  int Id = RestoreMBBs.find_first();
1966  while (Id != -1) {
1967    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1968    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1969      SlotIndex index = restores[i].index;
1970      if (index == SlotIndex())
1971        continue;
1972      unsigned VReg = restores[i].vreg;
1973      LiveInterval &nI = getOrCreateInterval(VReg);
1974      bool isReMat = vrm.isReMaterialized(VReg);
1975      MachineInstr *MI = getInstructionFromIndex(index);
1976      bool CanFold = false;
1977      Ops.clear();
1978      if (restores[i].canFold) {
1979        CanFold = true;
1980        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1981          MachineOperand &MO = MI->getOperand(j);
1982          if (!MO.isReg() || MO.getReg() != VReg)
1983            continue;
1984
1985          if (MO.isDef()) {
1986            // If this restore were to be folded, it would have been folded
1987            // already.
1988            CanFold = false;
1989            break;
1990          }
1991          Ops.push_back(j);
1992        }
1993      }
1994
1995      // Fold the load into the use if possible.
1996      bool Folded = false;
1997      if (CanFold && !Ops.empty()) {
1998        if (!isReMat)
1999          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2000        else {
2001          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2002          int LdSlot = 0;
2003          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2004          // If the rematerializable def is a load, also try to fold it.
2005          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2006            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2007                                          Ops, isLoadSS, LdSlot, VReg);
2008          if (!Folded) {
2009            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2010            if (ImpUse) {
2011              // Re-matting an instruction with virtual register use. Add the
2012              // register as an implicit use on the use MI and mark the register
2013              // interval as unspillable.
2014              LiveInterval &ImpLi = getInterval(ImpUse);
2015              ImpLi.markNotSpillable();
2016              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2017            }
2018          }
2019        }
2020      }
2021      // If folding is not possible / failed, then tell the spiller to issue a
2022      // load / rematerialization for us.
2023      if (Folded)
2024        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2025      else
2026        vrm.addRestorePoint(VReg, MI);
2027    }
2028    Id = RestoreMBBs.find_next(Id);
2029  }
2030
2031  // Finalize intervals: add kills, finalize spill weights, and filter out
2032  // dead intervals.
2033  std::vector<LiveInterval*> RetNewLIs;
2034  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2035    LiveInterval *LI = NewLIs[i];
2036    if (!LI->empty()) {
2037      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2038      if (!AddedKill.count(LI)) {
2039        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2040        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2041        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2042        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2043        assert(UseIdx != -1);
2044        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2045          LastUse->getOperand(UseIdx).setIsKill();
2046          vrm.addKillPoint(LI->reg, LastUseIdx);
2047        }
2048      }
2049      RetNewLIs.push_back(LI);
2050    }
2051  }
2052
2053  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2054  normalizeSpillWeights(RetNewLIs);
2055  return RetNewLIs;
2056}
2057
2058/// hasAllocatableSuperReg - Return true if the specified physical register has
2059/// any super register that's allocatable.
2060bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2061  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2062    if (allocatableRegs_[*AS] && hasInterval(*AS))
2063      return true;
2064  return false;
2065}
2066
2067/// getRepresentativeReg - Find the largest super register of the specified
2068/// physical register.
2069unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2070  // Find the largest super-register that is allocatable.
2071  unsigned BestReg = Reg;
2072  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2073    unsigned SuperReg = *AS;
2074    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2075      BestReg = SuperReg;
2076      break;
2077    }
2078  }
2079  return BestReg;
2080}
2081
2082/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2083/// specified interval that conflicts with the specified physical register.
2084unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2085                                                   unsigned PhysReg) const {
2086  unsigned NumConflicts = 0;
2087  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2088  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2089         E = mri_->reg_end(); I != E; ++I) {
2090    MachineOperand &O = I.getOperand();
2091    MachineInstr *MI = O.getParent();
2092    if (MI->isDebugValue())
2093      continue;
2094    SlotIndex Index = getInstructionIndex(MI);
2095    if (pli.liveAt(Index))
2096      ++NumConflicts;
2097  }
2098  return NumConflicts;
2099}
2100
2101/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2102/// around all defs and uses of the specified interval. Return true if it
2103/// was able to cut its interval.
2104bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2105                                            unsigned PhysReg, VirtRegMap &vrm) {
2106  unsigned SpillReg = getRepresentativeReg(PhysReg);
2107
2108  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2109    // If there are registers which alias PhysReg, but which are not a
2110    // sub-register of the chosen representative super register. Assert
2111    // since we can't handle it yet.
2112    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2113           tri_->isSuperRegister(*AS, SpillReg));
2114
2115  bool Cut = false;
2116  SmallVector<unsigned, 4> PRegs;
2117  if (hasInterval(SpillReg))
2118    PRegs.push_back(SpillReg);
2119  else {
2120    SmallSet<unsigned, 4> Added;
2121    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2122      if (Added.insert(*AS) && hasInterval(*AS)) {
2123        PRegs.push_back(*AS);
2124        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2125          Added.insert(*ASS);
2126      }
2127  }
2128
2129  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2130  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2131         E = mri_->reg_end(); I != E; ++I) {
2132    MachineOperand &O = I.getOperand();
2133    MachineInstr *MI = O.getParent();
2134    if (MI->isDebugValue() || SeenMIs.count(MI))
2135      continue;
2136    SeenMIs.insert(MI);
2137    SlotIndex Index = getInstructionIndex(MI);
2138    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2139      unsigned PReg = PRegs[i];
2140      LiveInterval &pli = getInterval(PReg);
2141      if (!pli.liveAt(Index))
2142        continue;
2143      vrm.addEmergencySpill(PReg, MI);
2144      SlotIndex StartIdx = Index.getLoadIndex();
2145      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2146      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2147        pli.removeRange(StartIdx, EndIdx);
2148        Cut = true;
2149      } else {
2150        std::string msg;
2151        raw_string_ostream Msg(msg);
2152        Msg << "Ran out of registers during register allocation!";
2153        if (MI->isInlineAsm()) {
2154          Msg << "\nPlease check your inline asm statement for invalid "
2155              << "constraints:\n";
2156          MI->print(Msg, tm_);
2157        }
2158        report_fatal_error(Msg.str());
2159      }
2160      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2161        if (!hasInterval(*AS))
2162          continue;
2163        LiveInterval &spli = getInterval(*AS);
2164        if (spli.liveAt(Index))
2165          spli.removeRange(Index.getLoadIndex(),
2166                           Index.getNextIndex().getBaseIndex());
2167      }
2168    }
2169  }
2170  return Cut;
2171}
2172
2173LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2174                                                  MachineInstr* startInst) {
2175  LiveInterval& Interval = getOrCreateInterval(reg);
2176  VNInfo* VN = Interval.getNextValue(
2177    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2178    startInst, true, getVNInfoAllocator());
2179  VN->setHasPHIKill(true);
2180  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2181  LiveRange LR(
2182     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2183     getMBBEndIdx(startInst->getParent()), VN);
2184  Interval.addRange(LR);
2185
2186  return LR;
2187}
2188
2189