LiveIntervalAnalysis.cpp revision 623d3c10e12a4227926e5b69947f7f0fa7e1908a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.DestroyAll();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << "    \t" << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it checks for sub-register reference and it can check use as well.
223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224                                            unsigned Reg, bool CheckUse,
225                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226  for (LiveInterval::Ranges::const_iterator
227         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228    for (SlotIndex index = I->start.getBaseIndex(),
229           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230           index != end;
231           index = index.getNextIndex()) {
232      MachineInstr *MI = getInstructionFromIndex(index);
233      if (!MI)
234        continue;               // skip deleted instructions
235
236      if (JoinedCopies.count(MI))
237        continue;
238      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239        MachineOperand& MO = MI->getOperand(i);
240        if (!MO.isReg())
241          continue;
242        if (MO.isUse() && !CheckUse)
243          continue;
244        unsigned PhysReg = MO.getReg();
245        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246          continue;
247        if (tri_->isSubRegister(Reg, PhysReg))
248          return true;
249      }
250    }
251  }
252
253  return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258  if (TargetRegisterInfo::isPhysicalRegister(reg))
259    dbgs() << tri_->getName(reg);
260  else
261    dbgs() << "%reg" << reg;
262}
263#endif
264
265static
266bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
267  unsigned Reg = MI.getOperand(MOIdx).getReg();
268  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
269    const MachineOperand &MO = MI.getOperand(i);
270    if (!MO.isReg())
271      continue;
272    if (MO.getReg() == Reg && MO.isDef()) {
273      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
274             MI.getOperand(MOIdx).getSubReg() &&
275             MO.getSubReg());
276      return true;
277    }
278  }
279  return false;
280}
281
282/// isPartialRedef - Return true if the specified def at the specific index is
283/// partially re-defining the specified live interval. A common case of this is
284/// a definition of the sub-register.
285bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
286                                   LiveInterval &interval) {
287  if (!MO.getSubReg() || MO.isEarlyClobber())
288    return false;
289
290  SlotIndex RedefIndex = MIIdx.getDefIndex();
291  const LiveRange *OldLR =
292    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
293  if (OldLR->valno->isDefAccurate()) {
294    MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
295    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
296  }
297  return false;
298}
299
300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301                                             MachineBasicBlock::iterator mi,
302                                             SlotIndex MIIdx,
303                                             MachineOperand& MO,
304                                             unsigned MOIdx,
305                                             LiveInterval &interval) {
306  DEBUG({
307      dbgs() << "\t\tregister: ";
308      printRegName(interval.reg, tri_);
309    });
310
311  // Virtual registers may be defined multiple times (due to phi
312  // elimination and 2-addr elimination).  Much of what we do only has to be
313  // done once for the vreg.  We use an empty interval to detect the first
314  // time we see a vreg.
315  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
316  if (interval.empty()) {
317    // Get the Idx of the defining instructions.
318    SlotIndex defIndex = MIIdx.getDefIndex();
319    // Earlyclobbers move back one, so that they overlap the live range
320    // of inputs.
321    if (MO.isEarlyClobber())
322      defIndex = MIIdx.getUseIndex();
323    MachineInstr *CopyMI = NULL;
324    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
325    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
326        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
327      CopyMI = mi;
328
329    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
330                                          VNInfoAllocator);
331    assert(ValNo->id == 0 && "First value in interval is not 0?");
332
333    // Loop over all of the blocks that the vreg is defined in.  There are
334    // two cases we have to handle here.  The most common case is a vreg
335    // whose lifetime is contained within a basic block.  In this case there
336    // will be a single kill, in MBB, which comes after the definition.
337    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338      // FIXME: what about dead vars?
339      SlotIndex killIdx;
340      if (vi.Kills[0] != mi)
341        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
342      else
343        killIdx = defIndex.getStoreIndex();
344
345      // If the kill happens after the definition, we have an intra-block
346      // live range.
347      if (killIdx > defIndex) {
348        assert(vi.AliveBlocks.empty() &&
349               "Shouldn't be alive across any blocks!");
350        LiveRange LR(defIndex, killIdx, ValNo);
351        interval.addRange(LR);
352        DEBUG(dbgs() << " +" << LR << "\n");
353        ValNo->addKill(killIdx);
354        return;
355      }
356    }
357
358    // The other case we handle is when a virtual register lives to the end
359    // of the defining block, potentially live across some blocks, then is
360    // live into some number of blocks, but gets killed.  Start by adding a
361    // range that goes from this definition to the end of the defining block.
362    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
363    DEBUG(dbgs() << " +" << NewLR);
364    interval.addRange(NewLR);
365
366    bool PHIJoin = lv_->isPHIJoin(interval.reg);
367
368    if (PHIJoin) {
369      // A phi join register is killed at the end of the MBB and revived as a new
370      // valno in the killing blocks.
371      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
372      DEBUG(dbgs() << " phi-join");
373      ValNo->addKill(indexes_->getTerminatorGap(mbb));
374      ValNo->setHasPHIKill(true);
375    } else {
376      // Iterate over all of the blocks that the variable is completely
377      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
378      // live interval.
379      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
380               E = vi.AliveBlocks.end(); I != E; ++I) {
381        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
382        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
383        interval.addRange(LR);
384        DEBUG(dbgs() << " +" << LR);
385      }
386    }
387
388    // Finally, this virtual register is live from the start of any killing
389    // block to the 'use' slot of the killing instruction.
390    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
391      MachineInstr *Kill = vi.Kills[i];
392      SlotIndex Start = getMBBStartIdx(Kill->getParent());
393      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
394
395      // Create interval with one of a NEW value number.  Note that this value
396      // number isn't actually defined by an instruction, weird huh? :)
397      if (PHIJoin) {
398        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
399                                      VNInfoAllocator);
400        ValNo->setIsPHIDef(true);
401      }
402      LiveRange LR(Start, killIdx, ValNo);
403      interval.addRange(LR);
404      ValNo->addKill(killIdx);
405      DEBUG(dbgs() << " +" << LR);
406    }
407
408  } else {
409    if (MultipleDefsBySameMI(*mi, MOIdx))
410      // Mutple defs of the same virtual register by the same instruction. e.g.
411      // %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
412      // This is likely due to elimination of REG_SEQUENCE instructions. Return
413      // here since there is nothing to do.
414      return;
415
416    // If this is the second time we see a virtual register definition, it
417    // must be due to phi elimination or two addr elimination.  If this is
418    // the result of two address elimination, then the vreg is one of the
419    // def-and-use register operand.
420
421    // It may also be partial redef like this:
422    // 80	%reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
423    // 120	%reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
424    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
425    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
426      // If this is a two-address definition, then we have already processed
427      // the live range.  The only problem is that we didn't realize there
428      // are actually two values in the live interval.  Because of this we
429      // need to take the LiveRegion that defines this register and split it
430      // into two values.
431      // Two-address vregs should always only be redefined once.  This means
432      // that at this point, there should be exactly one value number in it.
433      assert((PartReDef || interval.containsOneValue()) &&
434             "Unexpected 2-addr liveint!");
435      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
436      SlotIndex RedefIndex = MIIdx.getDefIndex();
437      if (MO.isEarlyClobber())
438        RedefIndex = MIIdx.getUseIndex();
439
440      const LiveRange *OldLR =
441        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
442      VNInfo *OldValNo = OldLR->valno;
443
444      // Delete the initial value, which should be short and continuous,
445      // because the 2-addr copy must be in the same MBB as the redef.
446      interval.removeRange(DefIndex, RedefIndex);
447
448      // The new value number (#1) is defined by the instruction we claimed
449      // defined value #0.
450      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
451                                            false, // update at *
452                                            VNInfoAllocator);
453      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
454
455      // Value#0 is now defined by the 2-addr instruction.
456      OldValNo->def  = RedefIndex;
457      OldValNo->setCopy(0);
458
459      // Add the new live interval which replaces the range for the input copy.
460      LiveRange LR(DefIndex, RedefIndex, ValNo);
461      DEBUG(dbgs() << " replace range with " << LR);
462      interval.addRange(LR);
463      ValNo->addKill(RedefIndex);
464
465      // If this redefinition is dead, we need to add a dummy unit live
466      // range covering the def slot.
467      if (MO.isDead())
468        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
469                                    OldValNo));
470
471      DEBUG({
472          dbgs() << " RESULT: ";
473          interval.print(dbgs(), tri_);
474        });
475    } else if (lv_->isPHIJoin(interval.reg)) {
476      // In the case of PHI elimination, each variable definition is only
477      // live until the end of the block.  We've already taken care of the
478      // rest of the live range.
479
480      SlotIndex defIndex = MIIdx.getDefIndex();
481      if (MO.isEarlyClobber())
482        defIndex = MIIdx.getUseIndex();
483
484      VNInfo *ValNo;
485      MachineInstr *CopyMI = NULL;
486      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
487      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
488          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
489        CopyMI = mi;
490      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
491
492      SlotIndex killIndex = getMBBEndIdx(mbb);
493      LiveRange LR(defIndex, killIndex, ValNo);
494      interval.addRange(LR);
495      ValNo->addKill(indexes_->getTerminatorGap(mbb));
496      ValNo->setHasPHIKill(true);
497      DEBUG(dbgs() << " phi-join +" << LR);
498    } else {
499      llvm_unreachable("Multiply defined register");
500    }
501  }
502
503  DEBUG(dbgs() << '\n');
504}
505
506void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
507                                              MachineBasicBlock::iterator mi,
508                                              SlotIndex MIIdx,
509                                              MachineOperand& MO,
510                                              LiveInterval &interval,
511                                              MachineInstr *CopyMI) {
512  // A physical register cannot be live across basic block, so its
513  // lifetime must end somewhere in its defining basic block.
514  DEBUG({
515      dbgs() << "\t\tregister: ";
516      printRegName(interval.reg, tri_);
517    });
518
519  SlotIndex baseIndex = MIIdx;
520  SlotIndex start = baseIndex.getDefIndex();
521  // Earlyclobbers move back one.
522  if (MO.isEarlyClobber())
523    start = MIIdx.getUseIndex();
524  SlotIndex end = start;
525
526  // If it is not used after definition, it is considered dead at
527  // the instruction defining it. Hence its interval is:
528  // [defSlot(def), defSlot(def)+1)
529  // For earlyclobbers, the defSlot was pushed back one; the extra
530  // advance below compensates.
531  if (MO.isDead()) {
532    DEBUG(dbgs() << " dead");
533    end = start.getStoreIndex();
534    goto exit;
535  }
536
537  // If it is not dead on definition, it must be killed by a
538  // subsequent instruction. Hence its interval is:
539  // [defSlot(def), useSlot(kill)+1)
540  baseIndex = baseIndex.getNextIndex();
541  while (++mi != MBB->end()) {
542
543    if (mi->isDebugValue())
544      continue;
545    if (getInstructionFromIndex(baseIndex) == 0)
546      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
547
548    if (mi->killsRegister(interval.reg, tri_)) {
549      DEBUG(dbgs() << " killed");
550      end = baseIndex.getDefIndex();
551      goto exit;
552    } else {
553      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
554      if (DefIdx != -1) {
555        if (mi->isRegTiedToUseOperand(DefIdx)) {
556          // Two-address instruction.
557          end = baseIndex.getDefIndex();
558        } else {
559          // Another instruction redefines the register before it is ever read.
560          // Then the register is essentially dead at the instruction that
561          // defines it. Hence its interval is:
562          // [defSlot(def), defSlot(def)+1)
563          DEBUG(dbgs() << " dead");
564          end = start.getStoreIndex();
565        }
566        goto exit;
567      }
568    }
569
570    baseIndex = baseIndex.getNextIndex();
571  }
572
573  // The only case we should have a dead physreg here without a killing or
574  // instruction where we know it's dead is if it is live-in to the function
575  // and never used. Another possible case is the implicit use of the
576  // physical register has been deleted by two-address pass.
577  end = start.getStoreIndex();
578
579exit:
580  assert(start < end && "did not find end of interval?");
581
582  // Already exists? Extend old live interval.
583  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
584  bool Extend = OldLR != interval.end();
585  VNInfo *ValNo = Extend
586    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
587  if (MO.isEarlyClobber() && Extend)
588    ValNo->setHasRedefByEC(true);
589  LiveRange LR(start, end, ValNo);
590  interval.addRange(LR);
591  LR.valno->addKill(end);
592  DEBUG(dbgs() << " +" << LR << '\n');
593}
594
595void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
596                                      MachineBasicBlock::iterator MI,
597                                      SlotIndex MIIdx,
598                                      MachineOperand& MO,
599                                      unsigned MOIdx) {
600  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
601    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
602                             getOrCreateInterval(MO.getReg()));
603  else if (allocatableRegs_[MO.getReg()]) {
604    MachineInstr *CopyMI = NULL;
605    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
606    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
607        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
608      CopyMI = MI;
609    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
610                              getOrCreateInterval(MO.getReg()), CopyMI);
611    // Def of a register also defines its sub-registers.
612    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
613      // If MI also modifies the sub-register explicitly, avoid processing it
614      // more than once. Do not pass in TRI here so it checks for exact match.
615      if (!MI->modifiesRegister(*AS))
616        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
617                                  getOrCreateInterval(*AS), 0);
618  }
619}
620
621void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
622                                         SlotIndex MIIdx,
623                                         LiveInterval &interval, bool isAlias) {
624  DEBUG({
625      dbgs() << "\t\tlivein register: ";
626      printRegName(interval.reg, tri_);
627    });
628
629  // Look for kills, if it reaches a def before it's killed, then it shouldn't
630  // be considered a livein.
631  MachineBasicBlock::iterator mi = MBB->begin();
632  MachineBasicBlock::iterator E = MBB->end();
633  // Skip over DBG_VALUE at the start of the MBB.
634  if (mi != E && mi->isDebugValue()) {
635    while (++mi != E && mi->isDebugValue())
636      ;
637    if (mi == E)
638      // MBB is empty except for DBG_VALUE's.
639      return;
640  }
641
642  SlotIndex baseIndex = MIIdx;
643  SlotIndex start = baseIndex;
644  if (getInstructionFromIndex(baseIndex) == 0)
645    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
646
647  SlotIndex end = baseIndex;
648  bool SeenDefUse = false;
649
650  while (mi != E) {
651    if (mi->killsRegister(interval.reg, tri_)) {
652      DEBUG(dbgs() << " killed");
653      end = baseIndex.getDefIndex();
654      SeenDefUse = true;
655      break;
656    } else if (mi->modifiesRegister(interval.reg, tri_)) {
657      // Another instruction redefines the register before it is ever read.
658      // Then the register is essentially dead at the instruction that defines
659      // it. Hence its interval is:
660      // [defSlot(def), defSlot(def)+1)
661      DEBUG(dbgs() << " dead");
662      end = start.getStoreIndex();
663      SeenDefUse = true;
664      break;
665    }
666
667    while (++mi != E && mi->isDebugValue())
668      // Skip over DBG_VALUE.
669      ;
670    if (mi != E)
671      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
672  }
673
674  // Live-in register might not be used at all.
675  if (!SeenDefUse) {
676    if (isAlias) {
677      DEBUG(dbgs() << " dead");
678      end = MIIdx.getStoreIndex();
679    } else {
680      DEBUG(dbgs() << " live through");
681      end = baseIndex;
682    }
683  }
684
685  VNInfo *vni =
686    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
687                          0, false, VNInfoAllocator);
688  vni->setIsPHIDef(true);
689  LiveRange LR(start, end, vni);
690
691  interval.addRange(LR);
692  LR.valno->addKill(end);
693  DEBUG(dbgs() << " +" << LR << '\n');
694}
695
696/// computeIntervals - computes the live intervals for virtual
697/// registers. for some ordering of the machine instructions [1,N] a
698/// live interval is an interval [i, j) where 1 <= i <= j < N for
699/// which a variable is live
700void LiveIntervals::computeIntervals() {
701  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
702               << "********** Function: "
703               << ((Value*)mf_->getFunction())->getName() << '\n');
704
705  SmallVector<unsigned, 8> UndefUses;
706  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
707       MBBI != E; ++MBBI) {
708    MachineBasicBlock *MBB = MBBI;
709    if (MBB->empty())
710      continue;
711
712    // Track the index of the current machine instr.
713    SlotIndex MIIndex = getMBBStartIdx(MBB);
714    DEBUG(dbgs() << "BB#" << MBB->getNumber()
715          << ":\t\t# derived from " << MBB->getName() << "\n");
716
717    // Create intervals for live-ins to this BB first.
718    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
719           LE = MBB->livein_end(); LI != LE; ++LI) {
720      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
721      // Multiple live-ins can alias the same register.
722      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
723        if (!hasInterval(*AS))
724          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
725                               true);
726    }
727
728    // Skip over empty initial indices.
729    if (getInstructionFromIndex(MIIndex) == 0)
730      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
731
732    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
733         MI != miEnd; ++MI) {
734      DEBUG(dbgs() << MIIndex << "\t" << *MI);
735      if (MI->isDebugValue())
736        continue;
737
738      // Handle defs.
739      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
740        MachineOperand &MO = MI->getOperand(i);
741        if (!MO.isReg() || !MO.getReg())
742          continue;
743
744        // handle register defs - build intervals
745        if (MO.isDef())
746          handleRegisterDef(MBB, MI, MIIndex, MO, i);
747        else if (MO.isUndef())
748          UndefUses.push_back(MO.getReg());
749      }
750
751      // Move to the next instr slot.
752      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
753    }
754  }
755
756  // Create empty intervals for registers defined by implicit_def's (except
757  // for those implicit_def that define values which are liveout of their
758  // blocks.
759  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
760    unsigned UndefReg = UndefUses[i];
761    (void)getOrCreateInterval(UndefReg);
762  }
763}
764
765LiveInterval* LiveIntervals::createInterval(unsigned reg) {
766  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
767  return new LiveInterval(reg, Weight);
768}
769
770/// dupInterval - Duplicate a live interval. The caller is responsible for
771/// managing the allocated memory.
772LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
773  LiveInterval *NewLI = createInterval(li->reg);
774  NewLI->Copy(*li, mri_, getVNInfoAllocator());
775  return NewLI;
776}
777
778/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
779/// copy field and returns the source register that defines it.
780unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
781  if (!VNI->getCopy())
782    return 0;
783
784  if (VNI->getCopy()->isExtractSubreg()) {
785    // If it's extracting out of a physical register, return the sub-register.
786    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
787    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
788      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
789      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
790      if (SrcSubReg == DstSubReg)
791        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
792        // reg1034 can still be coalesced to EDX.
793        return Reg;
794      assert(DstSubReg == 0);
795      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
796    }
797    return Reg;
798  } else if (VNI->getCopy()->isInsertSubreg() ||
799             VNI->getCopy()->isSubregToReg())
800    return VNI->getCopy()->getOperand(2).getReg();
801
802  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
803  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
804    return SrcReg;
805  llvm_unreachable("Unrecognized copy instruction!");
806  return 0;
807}
808
809//===----------------------------------------------------------------------===//
810// Register allocator hooks.
811//
812
813/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
814/// allow one) virtual register operand, then its uses are implicitly using
815/// the register. Returns the virtual register.
816unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
817                                            MachineInstr *MI) const {
818  unsigned RegOp = 0;
819  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
820    MachineOperand &MO = MI->getOperand(i);
821    if (!MO.isReg() || !MO.isUse())
822      continue;
823    unsigned Reg = MO.getReg();
824    if (Reg == 0 || Reg == li.reg)
825      continue;
826
827    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
828        !allocatableRegs_[Reg])
829      continue;
830    // FIXME: For now, only remat MI with at most one register operand.
831    assert(!RegOp &&
832           "Can't rematerialize instruction with multiple register operand!");
833    RegOp = MO.getReg();
834#ifndef NDEBUG
835    break;
836#endif
837  }
838  return RegOp;
839}
840
841/// isValNoAvailableAt - Return true if the val# of the specified interval
842/// which reaches the given instruction also reaches the specified use index.
843bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
844                                       SlotIndex UseIdx) const {
845  SlotIndex Index = getInstructionIndex(MI);
846  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
847  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
848  return UI != li.end() && UI->valno == ValNo;
849}
850
851/// isReMaterializable - Returns true if the definition MI of the specified
852/// val# of the specified interval is re-materializable.
853bool LiveIntervals::isReMaterializable(const LiveInterval &li,
854                                       const VNInfo *ValNo, MachineInstr *MI,
855                                       SmallVectorImpl<LiveInterval*> &SpillIs,
856                                       bool &isLoad) {
857  if (DisableReMat)
858    return false;
859
860  if (!tii_->isTriviallyReMaterializable(MI, aa_))
861    return false;
862
863  // Target-specific code can mark an instruction as being rematerializable
864  // if it has one virtual reg use, though it had better be something like
865  // a PIC base register which is likely to be live everywhere.
866  unsigned ImpUse = getReMatImplicitUse(li, MI);
867  if (ImpUse) {
868    const LiveInterval &ImpLi = getInterval(ImpUse);
869    for (MachineRegisterInfo::use_nodbg_iterator
870           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
871         ri != re; ++ri) {
872      MachineInstr *UseMI = &*ri;
873      SlotIndex UseIdx = getInstructionIndex(UseMI);
874      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
875        continue;
876      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
877        return false;
878    }
879
880    // If a register operand of the re-materialized instruction is going to
881    // be spilled next, then it's not legal to re-materialize this instruction.
882    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
883      if (ImpUse == SpillIs[i]->reg)
884        return false;
885  }
886  return true;
887}
888
889/// isReMaterializable - Returns true if the definition MI of the specified
890/// val# of the specified interval is re-materializable.
891bool LiveIntervals::isReMaterializable(const LiveInterval &li,
892                                       const VNInfo *ValNo, MachineInstr *MI) {
893  SmallVector<LiveInterval*, 4> Dummy1;
894  bool Dummy2;
895  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
896}
897
898/// isReMaterializable - Returns true if every definition of MI of every
899/// val# of the specified interval is re-materializable.
900bool LiveIntervals::isReMaterializable(const LiveInterval &li,
901                                       SmallVectorImpl<LiveInterval*> &SpillIs,
902                                       bool &isLoad) {
903  isLoad = false;
904  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
905       i != e; ++i) {
906    const VNInfo *VNI = *i;
907    if (VNI->isUnused())
908      continue; // Dead val#.
909    // Is the def for the val# rematerializable?
910    if (!VNI->isDefAccurate())
911      return false;
912    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
913    bool DefIsLoad = false;
914    if (!ReMatDefMI ||
915        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
916      return false;
917    isLoad |= DefIsLoad;
918  }
919  return true;
920}
921
922/// FilterFoldedOps - Filter out two-address use operands. Return
923/// true if it finds any issue with the operands that ought to prevent
924/// folding.
925static bool FilterFoldedOps(MachineInstr *MI,
926                            SmallVector<unsigned, 2> &Ops,
927                            unsigned &MRInfo,
928                            SmallVector<unsigned, 2> &FoldOps) {
929  MRInfo = 0;
930  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
931    unsigned OpIdx = Ops[i];
932    MachineOperand &MO = MI->getOperand(OpIdx);
933    // FIXME: fold subreg use.
934    if (MO.getSubReg())
935      return true;
936    if (MO.isDef())
937      MRInfo |= (unsigned)VirtRegMap::isMod;
938    else {
939      // Filter out two-address use operand(s).
940      if (MI->isRegTiedToDefOperand(OpIdx)) {
941        MRInfo = VirtRegMap::isModRef;
942        continue;
943      }
944      MRInfo |= (unsigned)VirtRegMap::isRef;
945    }
946    FoldOps.push_back(OpIdx);
947  }
948  return false;
949}
950
951
952/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
953/// slot / to reg or any rematerialized load into ith operand of specified
954/// MI. If it is successul, MI is updated with the newly created MI and
955/// returns true.
956bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
957                                         VirtRegMap &vrm, MachineInstr *DefMI,
958                                         SlotIndex InstrIdx,
959                                         SmallVector<unsigned, 2> &Ops,
960                                         bool isSS, int Slot, unsigned Reg) {
961  // If it is an implicit def instruction, just delete it.
962  if (MI->isImplicitDef()) {
963    RemoveMachineInstrFromMaps(MI);
964    vrm.RemoveMachineInstrFromMaps(MI);
965    MI->eraseFromParent();
966    ++numFolds;
967    return true;
968  }
969
970  // Filter the list of operand indexes that are to be folded. Abort if
971  // any operand will prevent folding.
972  unsigned MRInfo = 0;
973  SmallVector<unsigned, 2> FoldOps;
974  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
975    return false;
976
977  // The only time it's safe to fold into a two address instruction is when
978  // it's folding reload and spill from / into a spill stack slot.
979  if (DefMI && (MRInfo & VirtRegMap::isMod))
980    return false;
981
982  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
983                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
984  if (fmi) {
985    // Remember this instruction uses the spill slot.
986    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
987
988    // Attempt to fold the memory reference into the instruction. If
989    // we can do this, we don't need to insert spill code.
990    MachineBasicBlock &MBB = *MI->getParent();
991    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
992      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
993    vrm.transferSpillPts(MI, fmi);
994    vrm.transferRestorePts(MI, fmi);
995    vrm.transferEmergencySpills(MI, fmi);
996    ReplaceMachineInstrInMaps(MI, fmi);
997    MI = MBB.insert(MBB.erase(MI), fmi);
998    ++numFolds;
999    return true;
1000  }
1001  return false;
1002}
1003
1004/// canFoldMemoryOperand - Returns true if the specified load / store
1005/// folding is possible.
1006bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1007                                         SmallVector<unsigned, 2> &Ops,
1008                                         bool ReMat) const {
1009  // Filter the list of operand indexes that are to be folded. Abort if
1010  // any operand will prevent folding.
1011  unsigned MRInfo = 0;
1012  SmallVector<unsigned, 2> FoldOps;
1013  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1014    return false;
1015
1016  // It's only legal to remat for a use, not a def.
1017  if (ReMat && (MRInfo & VirtRegMap::isMod))
1018    return false;
1019
1020  return tii_->canFoldMemoryOperand(MI, FoldOps);
1021}
1022
1023bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1024  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1025
1026  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1027
1028  if (mbb == 0)
1029    return false;
1030
1031  for (++itr; itr != li.ranges.end(); ++itr) {
1032    MachineBasicBlock *mbb2 =
1033      indexes_->getMBBCoveringRange(itr->start, itr->end);
1034
1035    if (mbb2 != mbb)
1036      return false;
1037  }
1038
1039  return true;
1040}
1041
1042/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1043/// interval on to-be re-materialized operands of MI) with new register.
1044void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1045                                       MachineInstr *MI, unsigned NewVReg,
1046                                       VirtRegMap &vrm) {
1047  // There is an implicit use. That means one of the other operand is
1048  // being remat'ed and the remat'ed instruction has li.reg as an
1049  // use operand. Make sure we rewrite that as well.
1050  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1051    MachineOperand &MO = MI->getOperand(i);
1052    if (!MO.isReg())
1053      continue;
1054    unsigned Reg = MO.getReg();
1055    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1056      continue;
1057    if (!vrm.isReMaterialized(Reg))
1058      continue;
1059    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1060    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1061    if (UseMO)
1062      UseMO->setReg(NewVReg);
1063  }
1064}
1065
1066/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1067/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1068bool LiveIntervals::
1069rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1070                 bool TrySplit, SlotIndex index, SlotIndex end,
1071                 MachineInstr *MI,
1072                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1073                 unsigned Slot, int LdSlot,
1074                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1075                 VirtRegMap &vrm,
1076                 const TargetRegisterClass* rc,
1077                 SmallVector<int, 4> &ReMatIds,
1078                 const MachineLoopInfo *loopInfo,
1079                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1080                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1081                 std::vector<LiveInterval*> &NewLIs) {
1082  bool CanFold = false;
1083 RestartInstruction:
1084  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1085    MachineOperand& mop = MI->getOperand(i);
1086    if (!mop.isReg())
1087      continue;
1088    unsigned Reg = mop.getReg();
1089    unsigned RegI = Reg;
1090    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1091      continue;
1092    if (Reg != li.reg)
1093      continue;
1094
1095    bool TryFold = !DefIsReMat;
1096    bool FoldSS = true; // Default behavior unless it's a remat.
1097    int FoldSlot = Slot;
1098    if (DefIsReMat) {
1099      // If this is the rematerializable definition MI itself and
1100      // all of its uses are rematerialized, simply delete it.
1101      if (MI == ReMatOrigDefMI && CanDelete) {
1102        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1103                     << *MI << '\n');
1104        RemoveMachineInstrFromMaps(MI);
1105        vrm.RemoveMachineInstrFromMaps(MI);
1106        MI->eraseFromParent();
1107        break;
1108      }
1109
1110      // If def for this use can't be rematerialized, then try folding.
1111      // If def is rematerializable and it's a load, also try folding.
1112      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1113      if (isLoad) {
1114        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1115        FoldSS = isLoadSS;
1116        FoldSlot = LdSlot;
1117      }
1118    }
1119
1120    // Scan all of the operands of this instruction rewriting operands
1121    // to use NewVReg instead of li.reg as appropriate.  We do this for
1122    // two reasons:
1123    //
1124    //   1. If the instr reads the same spilled vreg multiple times, we
1125    //      want to reuse the NewVReg.
1126    //   2. If the instr is a two-addr instruction, we are required to
1127    //      keep the src/dst regs pinned.
1128    //
1129    // Keep track of whether we replace a use and/or def so that we can
1130    // create the spill interval with the appropriate range.
1131
1132    HasUse = mop.isUse();
1133    HasDef = mop.isDef();
1134    SmallVector<unsigned, 2> Ops;
1135    Ops.push_back(i);
1136    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1137      const MachineOperand &MOj = MI->getOperand(j);
1138      if (!MOj.isReg())
1139        continue;
1140      unsigned RegJ = MOj.getReg();
1141      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1142        continue;
1143      if (RegJ == RegI) {
1144        Ops.push_back(j);
1145        if (!MOj.isUndef()) {
1146          HasUse |= MOj.isUse();
1147          HasDef |= MOj.isDef();
1148        }
1149      }
1150    }
1151
1152    // Create a new virtual register for the spill interval.
1153    // Create the new register now so we can map the fold instruction
1154    // to the new register so when it is unfolded we get the correct
1155    // answer.
1156    bool CreatedNewVReg = false;
1157    if (NewVReg == 0) {
1158      NewVReg = mri_->createVirtualRegister(rc);
1159      vrm.grow();
1160      CreatedNewVReg = true;
1161
1162      // The new virtual register should get the same allocation hints as the
1163      // old one.
1164      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1165      if (Hint.first || Hint.second)
1166        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1167    }
1168
1169    if (!TryFold)
1170      CanFold = false;
1171    else {
1172      // Do not fold load / store here if we are splitting. We'll find an
1173      // optimal point to insert a load / store later.
1174      if (!TrySplit) {
1175        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1176                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1177          // Folding the load/store can completely change the instruction in
1178          // unpredictable ways, rescan it from the beginning.
1179
1180          if (FoldSS) {
1181            // We need to give the new vreg the same stack slot as the
1182            // spilled interval.
1183            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1184          }
1185
1186          HasUse = false;
1187          HasDef = false;
1188          CanFold = false;
1189          if (isNotInMIMap(MI))
1190            break;
1191          goto RestartInstruction;
1192        }
1193      } else {
1194        // We'll try to fold it later if it's profitable.
1195        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1196      }
1197    }
1198
1199    mop.setReg(NewVReg);
1200    if (mop.isImplicit())
1201      rewriteImplicitOps(li, MI, NewVReg, vrm);
1202
1203    // Reuse NewVReg for other reads.
1204    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1205      MachineOperand &mopj = MI->getOperand(Ops[j]);
1206      mopj.setReg(NewVReg);
1207      if (mopj.isImplicit())
1208        rewriteImplicitOps(li, MI, NewVReg, vrm);
1209    }
1210
1211    if (CreatedNewVReg) {
1212      if (DefIsReMat) {
1213        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1214        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1215          // Each valnum may have its own remat id.
1216          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1217        } else {
1218          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1219        }
1220        if (!CanDelete || (HasUse && HasDef)) {
1221          // If this is a two-addr instruction then its use operands are
1222          // rematerializable but its def is not. It should be assigned a
1223          // stack slot.
1224          vrm.assignVirt2StackSlot(NewVReg, Slot);
1225        }
1226      } else {
1227        vrm.assignVirt2StackSlot(NewVReg, Slot);
1228      }
1229    } else if (HasUse && HasDef &&
1230               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1231      // If this interval hasn't been assigned a stack slot (because earlier
1232      // def is a deleted remat def), do it now.
1233      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1234      vrm.assignVirt2StackSlot(NewVReg, Slot);
1235    }
1236
1237    // Re-matting an instruction with virtual register use. Add the
1238    // register as an implicit use on the use MI.
1239    if (DefIsReMat && ImpUse)
1240      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1241
1242    // Create a new register interval for this spill / remat.
1243    LiveInterval &nI = getOrCreateInterval(NewVReg);
1244    if (CreatedNewVReg) {
1245      NewLIs.push_back(&nI);
1246      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1247      if (TrySplit)
1248        vrm.setIsSplitFromReg(NewVReg, li.reg);
1249    }
1250
1251    if (HasUse) {
1252      if (CreatedNewVReg) {
1253        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1254                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1255        DEBUG(dbgs() << " +" << LR);
1256        nI.addRange(LR);
1257      } else {
1258        // Extend the split live interval to this def / use.
1259        SlotIndex End = index.getDefIndex();
1260        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1261                     nI.getValNumInfo(nI.getNumValNums()-1));
1262        DEBUG(dbgs() << " +" << LR);
1263        nI.addRange(LR);
1264      }
1265    }
1266    if (HasDef) {
1267      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1268                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1269      DEBUG(dbgs() << " +" << LR);
1270      nI.addRange(LR);
1271    }
1272
1273    DEBUG({
1274        dbgs() << "\t\t\t\tAdded new interval: ";
1275        nI.print(dbgs(), tri_);
1276        dbgs() << '\n';
1277      });
1278  }
1279  return CanFold;
1280}
1281bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1282                                   const VNInfo *VNI,
1283                                   MachineBasicBlock *MBB,
1284                                   SlotIndex Idx) const {
1285  SlotIndex End = getMBBEndIdx(MBB);
1286  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1287    if (VNI->kills[j].isPHI())
1288      continue;
1289
1290    SlotIndex KillIdx = VNI->kills[j];
1291    if (KillIdx > Idx && KillIdx <= End)
1292      return true;
1293  }
1294  return false;
1295}
1296
1297/// RewriteInfo - Keep track of machine instrs that will be rewritten
1298/// during spilling.
1299namespace {
1300  struct RewriteInfo {
1301    SlotIndex Index;
1302    MachineInstr *MI;
1303    bool HasUse;
1304    bool HasDef;
1305    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1306      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1307  };
1308
1309  struct RewriteInfoCompare {
1310    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1311      return LHS.Index < RHS.Index;
1312    }
1313  };
1314}
1315
1316void LiveIntervals::
1317rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1318                    LiveInterval::Ranges::const_iterator &I,
1319                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1320                    unsigned Slot, int LdSlot,
1321                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1322                    VirtRegMap &vrm,
1323                    const TargetRegisterClass* rc,
1324                    SmallVector<int, 4> &ReMatIds,
1325                    const MachineLoopInfo *loopInfo,
1326                    BitVector &SpillMBBs,
1327                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1328                    BitVector &RestoreMBBs,
1329                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1330                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1331                    std::vector<LiveInterval*> &NewLIs) {
1332  bool AllCanFold = true;
1333  unsigned NewVReg = 0;
1334  SlotIndex start = I->start.getBaseIndex();
1335  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1336
1337  // First collect all the def / use in this live range that will be rewritten.
1338  // Make sure they are sorted according to instruction index.
1339  std::vector<RewriteInfo> RewriteMIs;
1340  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1341         re = mri_->reg_end(); ri != re; ) {
1342    MachineInstr *MI = &*ri;
1343    MachineOperand &O = ri.getOperand();
1344    ++ri;
1345    if (MI->isDebugValue()) {
1346      // Modify DBG_VALUE now that the value is in a spill slot.
1347      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1348        uint64_t Offset = MI->getOperand(1).getImm();
1349        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1350        DebugLoc DL = MI->getDebugLoc();
1351        int FI = isLoadSS ? LdSlot : (int)Slot;
1352        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1353                                                           Offset, MDPtr, DL)) {
1354          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1355          ReplaceMachineInstrInMaps(MI, NewDV);
1356          MachineBasicBlock *MBB = MI->getParent();
1357          MBB->insert(MBB->erase(MI), NewDV);
1358          continue;
1359        }
1360      }
1361
1362      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1363      RemoveMachineInstrFromMaps(MI);
1364      vrm.RemoveMachineInstrFromMaps(MI);
1365      MI->eraseFromParent();
1366      continue;
1367    }
1368    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1369    SlotIndex index = getInstructionIndex(MI);
1370    if (index < start || index >= end)
1371      continue;
1372
1373    if (O.isUndef())
1374      // Must be defined by an implicit def. It should not be spilled. Note,
1375      // this is for correctness reason. e.g.
1376      // 8   %reg1024<def> = IMPLICIT_DEF
1377      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1378      // The live range [12, 14) are not part of the r1024 live interval since
1379      // it's defined by an implicit def. It will not conflicts with live
1380      // interval of r1025. Now suppose both registers are spilled, you can
1381      // easily see a situation where both registers are reloaded before
1382      // the INSERT_SUBREG and both target registers that would overlap.
1383      continue;
1384    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1385  }
1386  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1387
1388  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1389  // Now rewrite the defs and uses.
1390  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1391    RewriteInfo &rwi = RewriteMIs[i];
1392    ++i;
1393    SlotIndex index = rwi.Index;
1394    bool MIHasUse = rwi.HasUse;
1395    bool MIHasDef = rwi.HasDef;
1396    MachineInstr *MI = rwi.MI;
1397    // If MI def and/or use the same register multiple times, then there
1398    // are multiple entries.
1399    unsigned NumUses = MIHasUse;
1400    while (i != e && RewriteMIs[i].MI == MI) {
1401      assert(RewriteMIs[i].Index == index);
1402      bool isUse = RewriteMIs[i].HasUse;
1403      if (isUse) ++NumUses;
1404      MIHasUse |= isUse;
1405      MIHasDef |= RewriteMIs[i].HasDef;
1406      ++i;
1407    }
1408    MachineBasicBlock *MBB = MI->getParent();
1409
1410    if (ImpUse && MI != ReMatDefMI) {
1411      // Re-matting an instruction with virtual register use. Prevent interval
1412      // from being spilled.
1413      getInterval(ImpUse).markNotSpillable();
1414    }
1415
1416    unsigned MBBId = MBB->getNumber();
1417    unsigned ThisVReg = 0;
1418    if (TrySplit) {
1419      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1420      if (NVI != MBBVRegsMap.end()) {
1421        ThisVReg = NVI->second;
1422        // One common case:
1423        // x = use
1424        // ...
1425        // ...
1426        // def = ...
1427        //     = use
1428        // It's better to start a new interval to avoid artifically
1429        // extend the new interval.
1430        if (MIHasDef && !MIHasUse) {
1431          MBBVRegsMap.erase(MBB->getNumber());
1432          ThisVReg = 0;
1433        }
1434      }
1435    }
1436
1437    bool IsNew = ThisVReg == 0;
1438    if (IsNew) {
1439      // This ends the previous live interval. If all of its def / use
1440      // can be folded, give it a low spill weight.
1441      if (NewVReg && TrySplit && AllCanFold) {
1442        LiveInterval &nI = getOrCreateInterval(NewVReg);
1443        nI.weight /= 10.0F;
1444      }
1445      AllCanFold = true;
1446    }
1447    NewVReg = ThisVReg;
1448
1449    bool HasDef = false;
1450    bool HasUse = false;
1451    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1452                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1453                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1454                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1455                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1456    if (!HasDef && !HasUse)
1457      continue;
1458
1459    AllCanFold &= CanFold;
1460
1461    // Update weight of spill interval.
1462    LiveInterval &nI = getOrCreateInterval(NewVReg);
1463    if (!TrySplit) {
1464      // The spill weight is now infinity as it cannot be spilled again.
1465      nI.markNotSpillable();
1466      continue;
1467    }
1468
1469    // Keep track of the last def and first use in each MBB.
1470    if (HasDef) {
1471      if (MI != ReMatOrigDefMI || !CanDelete) {
1472        bool HasKill = false;
1473        if (!HasUse)
1474          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1475        else {
1476          // If this is a two-address code, then this index starts a new VNInfo.
1477          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1478          if (VNI)
1479            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1480        }
1481        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1482          SpillIdxes.find(MBBId);
1483        if (!HasKill) {
1484          if (SII == SpillIdxes.end()) {
1485            std::vector<SRInfo> S;
1486            S.push_back(SRInfo(index, NewVReg, true));
1487            SpillIdxes.insert(std::make_pair(MBBId, S));
1488          } else if (SII->second.back().vreg != NewVReg) {
1489            SII->second.push_back(SRInfo(index, NewVReg, true));
1490          } else if (index > SII->second.back().index) {
1491            // If there is an earlier def and this is a two-address
1492            // instruction, then it's not possible to fold the store (which
1493            // would also fold the load).
1494            SRInfo &Info = SII->second.back();
1495            Info.index = index;
1496            Info.canFold = !HasUse;
1497          }
1498          SpillMBBs.set(MBBId);
1499        } else if (SII != SpillIdxes.end() &&
1500                   SII->second.back().vreg == NewVReg &&
1501                   index > SII->second.back().index) {
1502          // There is an earlier def that's not killed (must be two-address).
1503          // The spill is no longer needed.
1504          SII->second.pop_back();
1505          if (SII->second.empty()) {
1506            SpillIdxes.erase(MBBId);
1507            SpillMBBs.reset(MBBId);
1508          }
1509        }
1510      }
1511    }
1512
1513    if (HasUse) {
1514      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1515        SpillIdxes.find(MBBId);
1516      if (SII != SpillIdxes.end() &&
1517          SII->second.back().vreg == NewVReg &&
1518          index > SII->second.back().index)
1519        // Use(s) following the last def, it's not safe to fold the spill.
1520        SII->second.back().canFold = false;
1521      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1522        RestoreIdxes.find(MBBId);
1523      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1524        // If we are splitting live intervals, only fold if it's the first
1525        // use and there isn't another use later in the MBB.
1526        RII->second.back().canFold = false;
1527      else if (IsNew) {
1528        // Only need a reload if there isn't an earlier def / use.
1529        if (RII == RestoreIdxes.end()) {
1530          std::vector<SRInfo> Infos;
1531          Infos.push_back(SRInfo(index, NewVReg, true));
1532          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1533        } else {
1534          RII->second.push_back(SRInfo(index, NewVReg, true));
1535        }
1536        RestoreMBBs.set(MBBId);
1537      }
1538    }
1539
1540    // Update spill weight.
1541    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1542    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1543  }
1544
1545  if (NewVReg && TrySplit && AllCanFold) {
1546    // If all of its def / use can be folded, give it a low spill weight.
1547    LiveInterval &nI = getOrCreateInterval(NewVReg);
1548    nI.weight /= 10.0F;
1549  }
1550}
1551
1552bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1553                        unsigned vr, BitVector &RestoreMBBs,
1554                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1555  if (!RestoreMBBs[Id])
1556    return false;
1557  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1558  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1559    if (Restores[i].index == index &&
1560        Restores[i].vreg == vr &&
1561        Restores[i].canFold)
1562      return true;
1563  return false;
1564}
1565
1566void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1567                        unsigned vr, BitVector &RestoreMBBs,
1568                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1569  if (!RestoreMBBs[Id])
1570    return;
1571  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1572  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1573    if (Restores[i].index == index && Restores[i].vreg)
1574      Restores[i].index = SlotIndex();
1575}
1576
1577/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1578/// spilled and create empty intervals for their uses.
1579void
1580LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1581                                    const TargetRegisterClass* rc,
1582                                    std::vector<LiveInterval*> &NewLIs) {
1583  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1584         re = mri_->reg_end(); ri != re; ) {
1585    MachineOperand &O = ri.getOperand();
1586    MachineInstr *MI = &*ri;
1587    ++ri;
1588    if (MI->isDebugValue()) {
1589      // Remove debug info for now.
1590      O.setReg(0U);
1591      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1592      continue;
1593    }
1594    if (O.isDef()) {
1595      assert(MI->isImplicitDef() &&
1596             "Register def was not rewritten?");
1597      RemoveMachineInstrFromMaps(MI);
1598      vrm.RemoveMachineInstrFromMaps(MI);
1599      MI->eraseFromParent();
1600    } else {
1601      // This must be an use of an implicit_def so it's not part of the live
1602      // interval. Create a new empty live interval for it.
1603      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1604      unsigned NewVReg = mri_->createVirtualRegister(rc);
1605      vrm.grow();
1606      vrm.setIsImplicitlyDefined(NewVReg);
1607      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1608      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1609        MachineOperand &MO = MI->getOperand(i);
1610        if (MO.isReg() && MO.getReg() == li.reg) {
1611          MO.setReg(NewVReg);
1612          MO.setIsUndef();
1613        }
1614      }
1615    }
1616  }
1617}
1618
1619float
1620LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1621  // Limit the loop depth ridiculousness.
1622  if (loopDepth > 200)
1623    loopDepth = 200;
1624
1625  // The loop depth is used to roughly estimate the number of times the
1626  // instruction is executed. Something like 10^d is simple, but will quickly
1627  // overflow a float. This expression behaves like 10^d for small d, but is
1628  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1629  // headroom before overflow.
1630  float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1631
1632  return (isDef + isUse) * lc;
1633}
1634
1635void
1636LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1637  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1638    normalizeSpillWeight(*NewLIs[i]);
1639}
1640
1641std::vector<LiveInterval*> LiveIntervals::
1642addIntervalsForSpillsFast(const LiveInterval &li,
1643                          const MachineLoopInfo *loopInfo,
1644                          VirtRegMap &vrm) {
1645  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1646
1647  std::vector<LiveInterval*> added;
1648
1649  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1650
1651  DEBUG({
1652      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1653      li.dump();
1654      dbgs() << '\n';
1655    });
1656
1657  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1658
1659  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1660  while (RI != mri_->reg_end()) {
1661    MachineInstr* MI = &*RI;
1662
1663    SmallVector<unsigned, 2> Indices;
1664    bool HasUse = false;
1665    bool HasDef = false;
1666
1667    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1668      MachineOperand& mop = MI->getOperand(i);
1669      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1670
1671      HasUse |= MI->getOperand(i).isUse();
1672      HasDef |= MI->getOperand(i).isDef();
1673
1674      Indices.push_back(i);
1675    }
1676
1677    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1678                              Indices, true, slot, li.reg)) {
1679      unsigned NewVReg = mri_->createVirtualRegister(rc);
1680      vrm.grow();
1681      vrm.assignVirt2StackSlot(NewVReg, slot);
1682
1683      // create a new register for this spill
1684      LiveInterval &nI = getOrCreateInterval(NewVReg);
1685      nI.markNotSpillable();
1686
1687      // Rewrite register operands to use the new vreg.
1688      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1689           E = Indices.end(); I != E; ++I) {
1690        MI->getOperand(*I).setReg(NewVReg);
1691
1692        if (MI->getOperand(*I).isUse())
1693          MI->getOperand(*I).setIsKill(true);
1694      }
1695
1696      // Fill in  the new live interval.
1697      SlotIndex index = getInstructionIndex(MI);
1698      if (HasUse) {
1699        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1700                     nI.getNextValue(SlotIndex(), 0, false,
1701                                     getVNInfoAllocator()));
1702        DEBUG(dbgs() << " +" << LR);
1703        nI.addRange(LR);
1704        vrm.addRestorePoint(NewVReg, MI);
1705      }
1706      if (HasDef) {
1707        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1708                     nI.getNextValue(SlotIndex(), 0, false,
1709                                     getVNInfoAllocator()));
1710        DEBUG(dbgs() << " +" << LR);
1711        nI.addRange(LR);
1712        vrm.addSpillPoint(NewVReg, true, MI);
1713      }
1714
1715      added.push_back(&nI);
1716
1717      DEBUG({
1718          dbgs() << "\t\t\t\tadded new interval: ";
1719          nI.dump();
1720          dbgs() << '\n';
1721        });
1722    }
1723
1724
1725    RI = mri_->reg_begin(li.reg);
1726  }
1727
1728  return added;
1729}
1730
1731std::vector<LiveInterval*> LiveIntervals::
1732addIntervalsForSpills(const LiveInterval &li,
1733                      SmallVectorImpl<LiveInterval*> &SpillIs,
1734                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1735
1736  if (EnableFastSpilling)
1737    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1738
1739  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1740
1741  DEBUG({
1742      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1743      li.print(dbgs(), tri_);
1744      dbgs() << '\n';
1745    });
1746
1747  // Each bit specify whether a spill is required in the MBB.
1748  BitVector SpillMBBs(mf_->getNumBlockIDs());
1749  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1750  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1751  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1752  DenseMap<unsigned,unsigned> MBBVRegsMap;
1753  std::vector<LiveInterval*> NewLIs;
1754  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1755
1756  unsigned NumValNums = li.getNumValNums();
1757  SmallVector<MachineInstr*, 4> ReMatDefs;
1758  ReMatDefs.resize(NumValNums, NULL);
1759  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1760  ReMatOrigDefs.resize(NumValNums, NULL);
1761  SmallVector<int, 4> ReMatIds;
1762  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1763  BitVector ReMatDelete(NumValNums);
1764  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1765
1766  // Spilling a split live interval. It cannot be split any further. Also,
1767  // it's also guaranteed to be a single val# / range interval.
1768  if (vrm.getPreSplitReg(li.reg)) {
1769    vrm.setIsSplitFromReg(li.reg, 0);
1770    // Unset the split kill marker on the last use.
1771    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1772    if (KillIdx != SlotIndex()) {
1773      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1774      assert(KillMI && "Last use disappeared?");
1775      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1776      assert(KillOp != -1 && "Last use disappeared?");
1777      KillMI->getOperand(KillOp).setIsKill(false);
1778    }
1779    vrm.removeKillPoint(li.reg);
1780    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1781    Slot = vrm.getStackSlot(li.reg);
1782    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1783    MachineInstr *ReMatDefMI = DefIsReMat ?
1784      vrm.getReMaterializedMI(li.reg) : NULL;
1785    int LdSlot = 0;
1786    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1787    bool isLoad = isLoadSS ||
1788      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1789    bool IsFirstRange = true;
1790    for (LiveInterval::Ranges::const_iterator
1791           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1792      // If this is a split live interval with multiple ranges, it means there
1793      // are two-address instructions that re-defined the value. Only the
1794      // first def can be rematerialized!
1795      if (IsFirstRange) {
1796        // Note ReMatOrigDefMI has already been deleted.
1797        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1798                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1799                             false, vrm, rc, ReMatIds, loopInfo,
1800                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1801                             MBBVRegsMap, NewLIs);
1802      } else {
1803        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1804                             Slot, 0, false, false, false,
1805                             false, vrm, rc, ReMatIds, loopInfo,
1806                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1807                             MBBVRegsMap, NewLIs);
1808      }
1809      IsFirstRange = false;
1810    }
1811
1812    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1813    normalizeSpillWeights(NewLIs);
1814    return NewLIs;
1815  }
1816
1817  bool TrySplit = !intervalIsInOneMBB(li);
1818  if (TrySplit)
1819    ++numSplits;
1820  bool NeedStackSlot = false;
1821  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1822       i != e; ++i) {
1823    const VNInfo *VNI = *i;
1824    unsigned VN = VNI->id;
1825    if (VNI->isUnused())
1826      continue; // Dead val#.
1827    // Is the def for the val# rematerializable?
1828    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1829      ? getInstructionFromIndex(VNI->def) : 0;
1830    bool dummy;
1831    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1832      // Remember how to remat the def of this val#.
1833      ReMatOrigDefs[VN] = ReMatDefMI;
1834      // Original def may be modified so we have to make a copy here.
1835      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1836      CloneMIs.push_back(Clone);
1837      ReMatDefs[VN] = Clone;
1838
1839      bool CanDelete = true;
1840      if (VNI->hasPHIKill()) {
1841        // A kill is a phi node, not all of its uses can be rematerialized.
1842        // It must not be deleted.
1843        CanDelete = false;
1844        // Need a stack slot if there is any live range where uses cannot be
1845        // rematerialized.
1846        NeedStackSlot = true;
1847      }
1848      if (CanDelete)
1849        ReMatDelete.set(VN);
1850    } else {
1851      // Need a stack slot if there is any live range where uses cannot be
1852      // rematerialized.
1853      NeedStackSlot = true;
1854    }
1855  }
1856
1857  // One stack slot per live interval.
1858  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1859    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1860      Slot = vrm.assignVirt2StackSlot(li.reg);
1861
1862    // This case only occurs when the prealloc splitter has already assigned
1863    // a stack slot to this vreg.
1864    else
1865      Slot = vrm.getStackSlot(li.reg);
1866  }
1867
1868  // Create new intervals and rewrite defs and uses.
1869  for (LiveInterval::Ranges::const_iterator
1870         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1871    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1872    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1873    bool DefIsReMat = ReMatDefMI != NULL;
1874    bool CanDelete = ReMatDelete[I->valno->id];
1875    int LdSlot = 0;
1876    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1877    bool isLoad = isLoadSS ||
1878      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1879    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1880                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1881                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1882                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1883                               MBBVRegsMap, NewLIs);
1884  }
1885
1886  // Insert spills / restores if we are splitting.
1887  if (!TrySplit) {
1888    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1889    normalizeSpillWeights(NewLIs);
1890    return NewLIs;
1891  }
1892
1893  SmallPtrSet<LiveInterval*, 4> AddedKill;
1894  SmallVector<unsigned, 2> Ops;
1895  if (NeedStackSlot) {
1896    int Id = SpillMBBs.find_first();
1897    while (Id != -1) {
1898      std::vector<SRInfo> &spills = SpillIdxes[Id];
1899      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1900        SlotIndex index = spills[i].index;
1901        unsigned VReg = spills[i].vreg;
1902        LiveInterval &nI = getOrCreateInterval(VReg);
1903        bool isReMat = vrm.isReMaterialized(VReg);
1904        MachineInstr *MI = getInstructionFromIndex(index);
1905        bool CanFold = false;
1906        bool FoundUse = false;
1907        Ops.clear();
1908        if (spills[i].canFold) {
1909          CanFold = true;
1910          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1911            MachineOperand &MO = MI->getOperand(j);
1912            if (!MO.isReg() || MO.getReg() != VReg)
1913              continue;
1914
1915            Ops.push_back(j);
1916            if (MO.isDef())
1917              continue;
1918            if (isReMat ||
1919                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1920                                                RestoreMBBs, RestoreIdxes))) {
1921              // MI has two-address uses of the same register. If the use
1922              // isn't the first and only use in the BB, then we can't fold
1923              // it. FIXME: Move this to rewriteInstructionsForSpills.
1924              CanFold = false;
1925              break;
1926            }
1927            FoundUse = true;
1928          }
1929        }
1930        // Fold the store into the def if possible.
1931        bool Folded = false;
1932        if (CanFold && !Ops.empty()) {
1933          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1934            Folded = true;
1935            if (FoundUse) {
1936              // Also folded uses, do not issue a load.
1937              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1938              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1939            }
1940            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1941          }
1942        }
1943
1944        // Otherwise tell the spiller to issue a spill.
1945        if (!Folded) {
1946          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1947          bool isKill = LR->end == index.getStoreIndex();
1948          if (!MI->registerDefIsDead(nI.reg))
1949            // No need to spill a dead def.
1950            vrm.addSpillPoint(VReg, isKill, MI);
1951          if (isKill)
1952            AddedKill.insert(&nI);
1953        }
1954      }
1955      Id = SpillMBBs.find_next(Id);
1956    }
1957  }
1958
1959  int Id = RestoreMBBs.find_first();
1960  while (Id != -1) {
1961    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1962    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1963      SlotIndex index = restores[i].index;
1964      if (index == SlotIndex())
1965        continue;
1966      unsigned VReg = restores[i].vreg;
1967      LiveInterval &nI = getOrCreateInterval(VReg);
1968      bool isReMat = vrm.isReMaterialized(VReg);
1969      MachineInstr *MI = getInstructionFromIndex(index);
1970      bool CanFold = false;
1971      Ops.clear();
1972      if (restores[i].canFold) {
1973        CanFold = true;
1974        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1975          MachineOperand &MO = MI->getOperand(j);
1976          if (!MO.isReg() || MO.getReg() != VReg)
1977            continue;
1978
1979          if (MO.isDef()) {
1980            // If this restore were to be folded, it would have been folded
1981            // already.
1982            CanFold = false;
1983            break;
1984          }
1985          Ops.push_back(j);
1986        }
1987      }
1988
1989      // Fold the load into the use if possible.
1990      bool Folded = false;
1991      if (CanFold && !Ops.empty()) {
1992        if (!isReMat)
1993          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1994        else {
1995          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1996          int LdSlot = 0;
1997          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1998          // If the rematerializable def is a load, also try to fold it.
1999          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2000            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2001                                          Ops, isLoadSS, LdSlot, VReg);
2002          if (!Folded) {
2003            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2004            if (ImpUse) {
2005              // Re-matting an instruction with virtual register use. Add the
2006              // register as an implicit use on the use MI and mark the register
2007              // interval as unspillable.
2008              LiveInterval &ImpLi = getInterval(ImpUse);
2009              ImpLi.markNotSpillable();
2010              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2011            }
2012          }
2013        }
2014      }
2015      // If folding is not possible / failed, then tell the spiller to issue a
2016      // load / rematerialization for us.
2017      if (Folded)
2018        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2019      else
2020        vrm.addRestorePoint(VReg, MI);
2021    }
2022    Id = RestoreMBBs.find_next(Id);
2023  }
2024
2025  // Finalize intervals: add kills, finalize spill weights, and filter out
2026  // dead intervals.
2027  std::vector<LiveInterval*> RetNewLIs;
2028  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2029    LiveInterval *LI = NewLIs[i];
2030    if (!LI->empty()) {
2031      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2032      if (!AddedKill.count(LI)) {
2033        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2034        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2035        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2036        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2037        assert(UseIdx != -1);
2038        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2039          LastUse->getOperand(UseIdx).setIsKill();
2040          vrm.addKillPoint(LI->reg, LastUseIdx);
2041        }
2042      }
2043      RetNewLIs.push_back(LI);
2044    }
2045  }
2046
2047  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2048  normalizeSpillWeights(RetNewLIs);
2049  return RetNewLIs;
2050}
2051
2052/// hasAllocatableSuperReg - Return true if the specified physical register has
2053/// any super register that's allocatable.
2054bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2055  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2056    if (allocatableRegs_[*AS] && hasInterval(*AS))
2057      return true;
2058  return false;
2059}
2060
2061/// getRepresentativeReg - Find the largest super register of the specified
2062/// physical register.
2063unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2064  // Find the largest super-register that is allocatable.
2065  unsigned BestReg = Reg;
2066  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2067    unsigned SuperReg = *AS;
2068    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2069      BestReg = SuperReg;
2070      break;
2071    }
2072  }
2073  return BestReg;
2074}
2075
2076/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2077/// specified interval that conflicts with the specified physical register.
2078unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2079                                                   unsigned PhysReg) const {
2080  unsigned NumConflicts = 0;
2081  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2082  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2083         E = mri_->reg_end(); I != E; ++I) {
2084    MachineOperand &O = I.getOperand();
2085    MachineInstr *MI = O.getParent();
2086    if (MI->isDebugValue())
2087      continue;
2088    SlotIndex Index = getInstructionIndex(MI);
2089    if (pli.liveAt(Index))
2090      ++NumConflicts;
2091  }
2092  return NumConflicts;
2093}
2094
2095/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2096/// around all defs and uses of the specified interval. Return true if it
2097/// was able to cut its interval.
2098bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2099                                            unsigned PhysReg, VirtRegMap &vrm) {
2100  unsigned SpillReg = getRepresentativeReg(PhysReg);
2101
2102  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2103    // If there are registers which alias PhysReg, but which are not a
2104    // sub-register of the chosen representative super register. Assert
2105    // since we can't handle it yet.
2106    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2107           tri_->isSuperRegister(*AS, SpillReg));
2108
2109  bool Cut = false;
2110  SmallVector<unsigned, 4> PRegs;
2111  if (hasInterval(SpillReg))
2112    PRegs.push_back(SpillReg);
2113  else {
2114    SmallSet<unsigned, 4> Added;
2115    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2116      if (Added.insert(*AS) && hasInterval(*AS)) {
2117        PRegs.push_back(*AS);
2118        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2119          Added.insert(*ASS);
2120      }
2121  }
2122
2123  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2124  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2125         E = mri_->reg_end(); I != E; ++I) {
2126    MachineOperand &O = I.getOperand();
2127    MachineInstr *MI = O.getParent();
2128    if (MI->isDebugValue() || SeenMIs.count(MI))
2129      continue;
2130    SeenMIs.insert(MI);
2131    SlotIndex Index = getInstructionIndex(MI);
2132    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2133      unsigned PReg = PRegs[i];
2134      LiveInterval &pli = getInterval(PReg);
2135      if (!pli.liveAt(Index))
2136        continue;
2137      vrm.addEmergencySpill(PReg, MI);
2138      SlotIndex StartIdx = Index.getLoadIndex();
2139      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2140      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2141        pli.removeRange(StartIdx, EndIdx);
2142        Cut = true;
2143      } else {
2144        std::string msg;
2145        raw_string_ostream Msg(msg);
2146        Msg << "Ran out of registers during register allocation!";
2147        if (MI->isInlineAsm()) {
2148          Msg << "\nPlease check your inline asm statement for invalid "
2149              << "constraints:\n";
2150          MI->print(Msg, tm_);
2151        }
2152        report_fatal_error(Msg.str());
2153      }
2154      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2155        if (!hasInterval(*AS))
2156          continue;
2157        LiveInterval &spli = getInterval(*AS);
2158        if (spli.liveAt(Index))
2159          spli.removeRange(Index.getLoadIndex(),
2160                           Index.getNextIndex().getBaseIndex());
2161      }
2162    }
2163  }
2164  return Cut;
2165}
2166
2167LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2168                                                  MachineInstr* startInst) {
2169  LiveInterval& Interval = getOrCreateInterval(reg);
2170  VNInfo* VN = Interval.getNextValue(
2171    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2172    startInst, true, getVNInfoAllocator());
2173  VN->setHasPHIKill(true);
2174  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2175  LiveRange LR(
2176     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2177     getMBBEndIdx(startInst->getParent()), VN);
2178  Interval.addRange(LR);
2179
2180  return LR;
2181}
2182
2183