LiveIntervalAnalysis.cpp revision 48fe63526e35ddaee7e98879596a569911f41319
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/STLExtras.h"
38#include <algorithm>
39#include <cmath>
40using namespace llvm;
41
42// Hidden options for help debugging.
43static cl::opt<bool> DisableReMat("disable-rematerialization",
44                                  cl::init(false), cl::Hidden);
45
46static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47                               cl::init(true), cl::Hidden);
48static cl::opt<int> SplitLimit("split-limit",
49                               cl::init(-1), cl::Hidden);
50
51static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals, "Number of original intervals");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70
71  if (!StrongPHIElim) {
72    AU.addPreservedID(PHIEliminationID);
73    AU.addRequiredID(PHIEliminationID);
74  }
75
76  AU.addRequiredID(TwoAddressInstructionPassID);
77  MachineFunctionPass::getAnalysisUsage(AU);
78}
79
80void LiveIntervals::releaseMemory() {
81  // Free the live intervals themselves.
82  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83       E = r2iMap_.end(); I != E; ++I)
84    delete I->second;
85
86  MBB2IdxMap.clear();
87  Idx2MBBMap.clear();
88  mi2iMap_.clear();
89  i2miMap_.clear();
90  r2iMap_.clear();
91  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92  VNInfoAllocator.Reset();
93  while (!ClonedMIs.empty()) {
94    MachineInstr *MI = ClonedMIs.back();
95    ClonedMIs.pop_back();
96    mf_->DeleteMachineInstr(MI);
97  }
98}
99
100void LiveIntervals::computeNumbering() {
101  Index2MiMap OldI2MI = i2miMap_;
102  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
103
104  Idx2MBBMap.clear();
105  MBB2IdxMap.clear();
106  mi2iMap_.clear();
107  i2miMap_.clear();
108
109  FunctionSize = 0;
110
111  // Number MachineInstrs and MachineBasicBlocks.
112  // Initialize MBB indexes to a sentinal.
113  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
114
115  unsigned MIIndex = 0;
116  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
117       MBB != E; ++MBB) {
118    unsigned StartIdx = MIIndex;
119
120    // Insert an empty slot at the beginning of each block.
121    MIIndex += InstrSlots::NUM;
122    i2miMap_.push_back(0);
123
124    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125         I != E; ++I) {
126      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127      assert(inserted && "multiple MachineInstr -> index mappings");
128      inserted = true;
129      i2miMap_.push_back(I);
130      MIIndex += InstrSlots::NUM;
131      FunctionSize++;
132
133      // Insert max(1, numdefs) empty slots after every instruction.
134      unsigned Slots = I->getDesc().getNumDefs();
135      if (Slots == 0)
136        Slots = 1;
137      MIIndex += InstrSlots::NUM * Slots;
138      while (Slots--)
139        i2miMap_.push_back(0);
140    }
141
142    // Set the MBB2IdxMap entry for this MBB.
143    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
145  }
146  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
147
148  if (!OldI2MI.empty())
149    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150      for (LiveInterval::iterator LI = OI->second->begin(),
151           LE = OI->second->end(); LI != LE; ++LI) {
152
153        // Remap the start index of the live range to the corresponding new
154        // number, or our best guess at what it _should_ correspond to if the
155        // original instruction has been erased.  This is either the following
156        // instruction or its predecessor.
157        unsigned index = LI->start / InstrSlots::NUM;
158        unsigned offset = LI->start % InstrSlots::NUM;
159        if (offset == InstrSlots::LOAD) {
160          std::vector<IdxMBBPair>::const_iterator I =
161                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162          // Take the pair containing the index
163          std::vector<IdxMBBPair>::const_iterator J =
164                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165
166          LI->start = getMBBStartIdx(J->second);
167        } else {
168          LI->start = mi2iMap_[OldI2MI[index]] + offset;
169        }
170
171        // Remap the ending index in the same way that we remapped the start,
172        // except for the final step where we always map to the immediately
173        // following instruction.
174        index = (LI->end - 1) / InstrSlots::NUM;
175        offset  = LI->end % InstrSlots::NUM;
176        if (offset == InstrSlots::LOAD) {
177          // VReg dies at end of block.
178          std::vector<IdxMBBPair>::const_iterator I =
179                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180          --I;
181
182          LI->end = getMBBEndIdx(I->second) + 1;
183        } else {
184          unsigned idx = index;
185          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
186
187          if (index != OldI2MI.size())
188            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189          else
190            LI->end = InstrSlots::NUM * i2miMap_.size();
191        }
192      }
193
194      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196        VNInfo* vni = *VNI;
197
198        // Remap the VNInfo def index, which works the same as the
199        // start indices above. VN's with special sentinel defs
200        // don't need to be remapped.
201        if (vni->def != ~0U && vni->def != ~1U) {
202          unsigned index = vni->def / InstrSlots::NUM;
203          unsigned offset = vni->def % InstrSlots::NUM;
204          if (offset == InstrSlots::LOAD) {
205            std::vector<IdxMBBPair>::const_iterator I =
206                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207            // Take the pair containing the index
208            std::vector<IdxMBBPair>::const_iterator J =
209                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
210
211            vni->def = getMBBStartIdx(J->second);
212          } else {
213            vni->def = mi2iMap_[OldI2MI[index]] + offset;
214          }
215        }
216
217        // Remap the VNInfo kill indices, which works the same as
218        // the end indices above.
219        for (size_t i = 0; i < vni->kills.size(); ++i) {
220          // PHI kills don't need to be remapped.
221          if (!vni->kills[i]) continue;
222
223          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224          unsigned offset = vni->kills[i] % InstrSlots::NUM;
225          if (offset == InstrSlots::LOAD) {
226            std::vector<IdxMBBPair>::const_iterator I =
227             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228            --I;
229
230            vni->kills[i] = getMBBEndIdx(I->second);
231          } else {
232            unsigned idx = index;
233            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
234
235            if (index != OldI2MI.size())
236              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237                              (idx == index ? offset : 0);
238            else
239              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
240          }
241        }
242      }
243    }
244}
245
246/// runOnMachineFunction - Register allocate the whole function
247///
248bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249  mf_ = &fn;
250  mri_ = &mf_->getRegInfo();
251  tm_ = &fn.getTarget();
252  tri_ = tm_->getRegisterInfo();
253  tii_ = tm_->getInstrInfo();
254  aa_ = &getAnalysis<AliasAnalysis>();
255  lv_ = &getAnalysis<LiveVariables>();
256  allocatableRegs_ = tri_->getAllocatableSet(fn);
257
258  computeNumbering();
259  computeIntervals();
260
261  numIntervals += getNumIntervals();
262
263  DEBUG(dump());
264  return true;
265}
266
267/// print - Implement the dump method.
268void LiveIntervals::print(std::ostream &O, const Module* ) const {
269  O << "********** INTERVALS **********\n";
270  for (const_iterator I = begin(), E = end(); I != E; ++I) {
271    I->second->print(O, tri_);
272    O << "\n";
273  }
274
275  O << "********** MACHINEINSTRS **********\n";
276  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277       mbbi != mbbe; ++mbbi) {
278    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279    for (MachineBasicBlock::iterator mii = mbbi->begin(),
280           mie = mbbi->end(); mii != mie; ++mii) {
281      O << getInstructionIndex(mii) << '\t' << *mii;
282    }
283  }
284}
285
286/// conflictsWithPhysRegDef - Returns true if the specified register
287/// is defined during the duration of the specified interval.
288bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289                                            VirtRegMap &vrm, unsigned reg) {
290  for (LiveInterval::Ranges::const_iterator
291         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292    for (unsigned index = getBaseIndex(I->start),
293           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294         index += InstrSlots::NUM) {
295      // skip deleted instructions
296      while (index != end && !getInstructionFromIndex(index))
297        index += InstrSlots::NUM;
298      if (index == end) break;
299
300      MachineInstr *MI = getInstructionFromIndex(index);
301      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303        if (SrcReg == li.reg || DstReg == li.reg)
304          continue;
305      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306        MachineOperand& mop = MI->getOperand(i);
307        if (!mop.isReg())
308          continue;
309        unsigned PhysReg = mop.getReg();
310        if (PhysReg == 0 || PhysReg == li.reg)
311          continue;
312        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313          if (!vrm.hasPhys(PhysReg))
314            continue;
315          PhysReg = vrm.getPhys(PhysReg);
316        }
317        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
318          return true;
319      }
320    }
321  }
322
323  return false;
324}
325
326/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327/// it can check use as well.
328bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329                                            unsigned Reg, bool CheckUse,
330                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331  for (LiveInterval::Ranges::const_iterator
332         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333    for (unsigned index = getBaseIndex(I->start),
334           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335         index += InstrSlots::NUM) {
336      // Skip deleted instructions.
337      MachineInstr *MI = 0;
338      while (index != end) {
339        MI = getInstructionFromIndex(index);
340        if (MI)
341          break;
342        index += InstrSlots::NUM;
343      }
344      if (index == end) break;
345
346      if (JoinedCopies.count(MI))
347        continue;
348      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349        MachineOperand& MO = MI->getOperand(i);
350        if (!MO.isReg())
351          continue;
352        if (MO.isUse() && !CheckUse)
353          continue;
354        unsigned PhysReg = MO.getReg();
355        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
356          continue;
357        if (tri_->isSubRegister(Reg, PhysReg))
358          return true;
359      }
360    }
361  }
362
363  return false;
364}
365
366
367void LiveIntervals::printRegName(unsigned reg) const {
368  if (TargetRegisterInfo::isPhysicalRegister(reg))
369    cerr << tri_->getName(reg);
370  else
371    cerr << "%reg" << reg;
372}
373
374void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375                                             MachineBasicBlock::iterator mi,
376                                             unsigned MIIdx, MachineOperand& MO,
377                                             unsigned MOIdx,
378                                             LiveInterval &interval) {
379  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
381
382  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383    DOUT << "is a implicit_def\n";
384    return;
385  }
386
387  // Virtual registers may be defined multiple times (due to phi
388  // elimination and 2-addr elimination).  Much of what we do only has to be
389  // done once for the vreg.  We use an empty interval to detect the first
390  // time we see a vreg.
391  if (interval.empty()) {
392    // Get the Idx of the defining instructions.
393    unsigned defIndex = getDefIndex(MIIdx);
394    // Earlyclobbers move back one.
395    if (MO.isEarlyClobber())
396      defIndex = getUseIndex(MIIdx);
397    VNInfo *ValNo;
398    MachineInstr *CopyMI = NULL;
399    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
403      CopyMI = mi;
404    // Earlyclobbers move back one.
405    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
406
407    assert(ValNo->id == 0 && "First value in interval is not 0?");
408
409    // Loop over all of the blocks that the vreg is defined in.  There are
410    // two cases we have to handle here.  The most common case is a vreg
411    // whose lifetime is contained within a basic block.  In this case there
412    // will be a single kill, in MBB, which comes after the definition.
413    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414      // FIXME: what about dead vars?
415      unsigned killIdx;
416      if (vi.Kills[0] != mi)
417        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
418      else
419        killIdx = defIndex+1;
420
421      // If the kill happens after the definition, we have an intra-block
422      // live range.
423      if (killIdx > defIndex) {
424        assert(vi.AliveBlocks.none() &&
425               "Shouldn't be alive across any blocks!");
426        LiveRange LR(defIndex, killIdx, ValNo);
427        interval.addRange(LR);
428        DOUT << " +" << LR << "\n";
429        interval.addKill(ValNo, killIdx);
430        return;
431      }
432    }
433
434    // The other case we handle is when a virtual register lives to the end
435    // of the defining block, potentially live across some blocks, then is
436    // live into some number of blocks, but gets killed.  Start by adding a
437    // range that goes from this definition to the end of the defining block.
438    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439    DOUT << " +" << NewLR;
440    interval.addRange(NewLR);
441
442    // Iterate over all of the blocks that the variable is completely
443    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
444    // live interval.
445    for (int i = vi.AliveBlocks.find_first(); i != -1;
446         i = vi.AliveBlocks.find_next(i)) {
447      LiveRange LR(getMBBStartIdx(i),
448                   getMBBEndIdx(i)+1,  // MBB ends at -1.
449                   ValNo);
450      interval.addRange(LR);
451      DOUT << " +" << LR;
452    }
453
454    // Finally, this virtual register is live from the start of any killing
455    // block to the 'use' slot of the killing instruction.
456    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457      MachineInstr *Kill = vi.Kills[i];
458      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459      LiveRange LR(getMBBStartIdx(Kill->getParent()),
460                   killIdx, ValNo);
461      interval.addRange(LR);
462      interval.addKill(ValNo, killIdx);
463      DOUT << " +" << LR;
464    }
465
466  } else {
467    // If this is the second time we see a virtual register definition, it
468    // must be due to phi elimination or two addr elimination.  If this is
469    // the result of two address elimination, then the vreg is one of the
470    // def-and-use register operand.
471    if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472      // If this is a two-address definition, then we have already processed
473      // the live range.  The only problem is that we didn't realize there
474      // are actually two values in the live interval.  Because of this we
475      // need to take the LiveRegion that defines this register and split it
476      // into two values.
477      assert(interval.containsOneValue());
478      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479      unsigned RedefIndex = getDefIndex(MIIdx);
480      // It cannot be an early clobber MO.
481      assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
482
483      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484      VNInfo *OldValNo = OldLR->valno;
485
486      // Delete the initial value, which should be short and continuous,
487      // because the 2-addr copy must be in the same MBB as the redef.
488      interval.removeRange(DefIndex, RedefIndex);
489
490      // Two-address vregs should always only be redefined once.  This means
491      // that at this point, there should be exactly one value number in it.
492      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
493
494      // The new value number (#1) is defined by the instruction we claimed
495      // defined value #0.
496      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
497                                            VNInfoAllocator);
498
499      // Value#0 is now defined by the 2-addr instruction.
500      OldValNo->def  = RedefIndex;
501      OldValNo->copy = 0;
502
503      // Add the new live interval which replaces the range for the input copy.
504      LiveRange LR(DefIndex, RedefIndex, ValNo);
505      DOUT << " replace range with " << LR;
506      interval.addRange(LR);
507      interval.addKill(ValNo, RedefIndex);
508
509      // If this redefinition is dead, we need to add a dummy unit live
510      // range covering the def slot.
511      if (MO.isDead())
512        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
513
514      DOUT << " RESULT: ";
515      interval.print(DOUT, tri_);
516
517    } else {
518      // Otherwise, this must be because of phi elimination.  If this is the
519      // first redefinition of the vreg that we have seen, go back and change
520      // the live range in the PHI block to be a different value number.
521      if (interval.containsOneValue()) {
522        assert(vi.Kills.size() == 1 &&
523               "PHI elimination vreg should have one kill, the PHI itself!");
524
525        // Remove the old range that we now know has an incorrect number.
526        VNInfo *VNI = interval.getValNumInfo(0);
527        MachineInstr *Killer = vi.Kills[0];
528        unsigned Start = getMBBStartIdx(Killer->getParent());
529        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530        DOUT << " Removing [" << Start << "," << End << "] from: ";
531        interval.print(DOUT, tri_); DOUT << "\n";
532        interval.removeRange(Start, End);
533        VNI->hasPHIKill = true;
534        DOUT << " RESULT: "; interval.print(DOUT, tri_);
535
536        // Replace the interval with one of a NEW value number.  Note that this
537        // value number isn't actually defined by an instruction, weird huh? :)
538        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539        DOUT << " replace range with " << LR;
540        interval.addRange(LR);
541        interval.addKill(LR.valno, End);
542        DOUT << " RESULT: "; interval.print(DOUT, tri_);
543      }
544
545      // In the case of PHI elimination, each variable definition is only
546      // live until the end of the block.  We've already taken care of the
547      // rest of the live range.
548      unsigned defIndex = getDefIndex(MIIdx);
549      // It cannot be an early clobber MO.
550      assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
551
552      VNInfo *ValNo;
553      MachineInstr *CopyMI = NULL;
554      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
558        CopyMI = mi;
559      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
560
561      unsigned killIndex = getMBBEndIdx(mbb) + 1;
562      LiveRange LR(defIndex, killIndex, ValNo);
563      interval.addRange(LR);
564      interval.addKill(ValNo, killIndex);
565      ValNo->hasPHIKill = true;
566      DOUT << " +" << LR;
567    }
568  }
569
570  DOUT << '\n';
571}
572
573void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574                                              MachineBasicBlock::iterator mi,
575                                              unsigned MIIdx,
576                                              MachineOperand& MO,
577                                              LiveInterval &interval,
578                                              MachineInstr *CopyMI) {
579  // A physical register cannot be live across basic block, so its
580  // lifetime must end somewhere in its defining basic block.
581  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
582
583  unsigned baseIndex = MIIdx;
584  unsigned start = getDefIndex(baseIndex);
585  // Earlyclobbers move back one.
586  if (MO.isEarlyClobber())
587    start = getUseIndex(MIIdx);
588  unsigned end = start;
589
590  // If it is not used after definition, it is considered dead at
591  // the instruction defining it. Hence its interval is:
592  // [defSlot(def), defSlot(def)+1)
593  if (MO.isDead()) {
594    DOUT << " dead";
595    end = start + 1;
596    goto exit;
597  }
598
599  // If it is not dead on definition, it must be killed by a
600  // subsequent instruction. Hence its interval is:
601  // [defSlot(def), useSlot(kill)+1)
602  baseIndex += InstrSlots::NUM;
603  while (++mi != MBB->end()) {
604    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605           getInstructionFromIndex(baseIndex) == 0)
606      baseIndex += InstrSlots::NUM;
607    if (mi->killsRegister(interval.reg, tri_)) {
608      DOUT << " killed";
609      end = getUseIndex(baseIndex) + 1;
610      goto exit;
611    } else if (mi->modifiesRegister(interval.reg, tri_)) {
612      // Another instruction redefines the register before it is ever read.
613      // Then the register is essentially dead at the instruction that defines
614      // it. Hence its interval is:
615      // [defSlot(def), defSlot(def)+1)
616      DOUT << " dead";
617      end = start + 1;
618      goto exit;
619    }
620
621    baseIndex += InstrSlots::NUM;
622  }
623
624  // The only case we should have a dead physreg here without a killing or
625  // instruction where we know it's dead is if it is live-in to the function
626  // and never used.
627  assert(!CopyMI && "physreg was not killed in defining block!");
628  end = start + 1;
629
630exit:
631  assert(start < end && "did not find end of interval?");
632
633  // Already exists? Extend old live interval.
634  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635  bool Extend = OldLR != interval.end();
636  VNInfo *ValNo = Extend
637    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638  if (MO.isEarlyClobber() && Extend)
639    ValNo->redefByEC = true;
640  LiveRange LR(start, end, ValNo);
641  interval.addRange(LR);
642  interval.addKill(LR.valno, end);
643  DOUT << " +" << LR << '\n';
644}
645
646void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647                                      MachineBasicBlock::iterator MI,
648                                      unsigned MIIdx,
649                                      MachineOperand& MO,
650                                      unsigned MOIdx) {
651  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653                             getOrCreateInterval(MO.getReg()));
654  else if (allocatableRegs_[MO.getReg()]) {
655    MachineInstr *CopyMI = NULL;
656    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
660      CopyMI = MI;
661    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662                              getOrCreateInterval(MO.getReg()), CopyMI);
663    // Def of a register also defines its sub-registers.
664    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665      // If MI also modifies the sub-register explicitly, avoid processing it
666      // more than once. Do not pass in TRI here so it checks for exact match.
667      if (!MI->modifiesRegister(*AS))
668        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669                                  getOrCreateInterval(*AS), 0);
670  }
671}
672
673void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
674                                         unsigned MIIdx,
675                                         LiveInterval &interval, bool isAlias) {
676  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
677
678  // Look for kills, if it reaches a def before it's killed, then it shouldn't
679  // be considered a livein.
680  MachineBasicBlock::iterator mi = MBB->begin();
681  unsigned baseIndex = MIIdx;
682  unsigned start = baseIndex;
683  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684         getInstructionFromIndex(baseIndex) == 0)
685    baseIndex += InstrSlots::NUM;
686  unsigned end = baseIndex;
687  bool SeenDefUse = false;
688
689  while (mi != MBB->end()) {
690    if (mi->killsRegister(interval.reg, tri_)) {
691      DOUT << " killed";
692      end = getUseIndex(baseIndex) + 1;
693      SeenDefUse = true;
694      goto exit;
695    } else if (mi->modifiesRegister(interval.reg, tri_)) {
696      // Another instruction redefines the register before it is ever read.
697      // Then the register is essentially dead at the instruction that defines
698      // it. Hence its interval is:
699      // [defSlot(def), defSlot(def)+1)
700      DOUT << " dead";
701      end = getDefIndex(start) + 1;
702      SeenDefUse = true;
703      goto exit;
704    }
705
706    baseIndex += InstrSlots::NUM;
707    ++mi;
708    if (mi != MBB->end()) {
709      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
710             getInstructionFromIndex(baseIndex) == 0)
711        baseIndex += InstrSlots::NUM;
712    }
713  }
714
715exit:
716  // Live-in register might not be used at all.
717  if (!SeenDefUse) {
718    if (isAlias) {
719      DOUT << " dead";
720      end = getDefIndex(MIIdx) + 1;
721    } else {
722      DOUT << " live through";
723      end = baseIndex;
724    }
725  }
726
727  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
728  interval.addRange(LR);
729  interval.addKill(LR.valno, end);
730  DOUT << " +" << LR << '\n';
731}
732
733/// computeIntervals - computes the live intervals for virtual
734/// registers. for some ordering of the machine instructions [1,N] a
735/// live interval is an interval [i, j) where 1 <= i <= j < N for
736/// which a variable is live
737void LiveIntervals::computeIntervals() {
738
739  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
740       << "********** Function: "
741       << ((Value*)mf_->getFunction())->getName() << '\n';
742
743  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
744       MBBI != E; ++MBBI) {
745    MachineBasicBlock *MBB = MBBI;
746    // Track the index of the current machine instr.
747    unsigned MIIndex = getMBBStartIdx(MBB);
748    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
749
750    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
751
752    // Create intervals for live-ins to this BB first.
753    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
754           LE = MBB->livein_end(); LI != LE; ++LI) {
755      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
756      // Multiple live-ins can alias the same register.
757      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
758        if (!hasInterval(*AS))
759          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
760                               true);
761    }
762
763    // Skip over empty initial indices.
764    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
765           getInstructionFromIndex(MIIndex) == 0)
766      MIIndex += InstrSlots::NUM;
767
768    for (; MI != miEnd; ++MI) {
769      DOUT << MIIndex << "\t" << *MI;
770
771      // Handle defs.
772      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
773        MachineOperand &MO = MI->getOperand(i);
774        // handle register defs - build intervals
775        if (MO.isReg() && MO.getReg() && MO.isDef()) {
776          handleRegisterDef(MBB, MI, MIIndex, MO, i);
777        }
778      }
779
780      // Skip over the empty slots after each instruction.
781      unsigned Slots = MI->getDesc().getNumDefs();
782      if (Slots == 0)
783        Slots = 1;
784      MIIndex += InstrSlots::NUM * Slots;
785
786      // Skip over empty indices.
787      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
788             getInstructionFromIndex(MIIndex) == 0)
789        MIIndex += InstrSlots::NUM;
790    }
791  }
792}
793
794bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
795                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
796  std::vector<IdxMBBPair>::const_iterator I =
797    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
798
799  bool ResVal = false;
800  while (I != Idx2MBBMap.end()) {
801    if (I->first >= End)
802      break;
803    MBBs.push_back(I->second);
804    ResVal = true;
805    ++I;
806  }
807  return ResVal;
808}
809
810bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
811                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
812  std::vector<IdxMBBPair>::const_iterator I =
813    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
814
815  bool ResVal = false;
816  while (I != Idx2MBBMap.end()) {
817    if (I->first > End)
818      break;
819    MachineBasicBlock *MBB = I->second;
820    if (getMBBEndIdx(MBB) > End)
821      break;
822    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
823           SE = MBB->succ_end(); SI != SE; ++SI)
824      MBBs.push_back(*SI);
825    ResVal = true;
826    ++I;
827  }
828  return ResVal;
829}
830
831LiveInterval* LiveIntervals::createInterval(unsigned reg) {
832  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
833  return new LiveInterval(reg, Weight);
834}
835
836/// dupInterval - Duplicate a live interval. The caller is responsible for
837/// managing the allocated memory.
838LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
839  LiveInterval *NewLI = createInterval(li->reg);
840  NewLI->Copy(*li, getVNInfoAllocator());
841  return NewLI;
842}
843
844/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
845/// copy field and returns the source register that defines it.
846unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
847  if (!VNI->copy)
848    return 0;
849
850  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
851    // If it's extracting out of a physical register, return the sub-register.
852    unsigned Reg = VNI->copy->getOperand(1).getReg();
853    if (TargetRegisterInfo::isPhysicalRegister(Reg))
854      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
855    return Reg;
856  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
857    return VNI->copy->getOperand(2).getReg();
858
859  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
860  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
861    return SrcReg;
862  assert(0 && "Unrecognized copy instruction!");
863  return 0;
864}
865
866//===----------------------------------------------------------------------===//
867// Register allocator hooks.
868//
869
870/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
871/// allow one) virtual register operand, then its uses are implicitly using
872/// the register. Returns the virtual register.
873unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
874                                            MachineInstr *MI) const {
875  unsigned RegOp = 0;
876  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
877    MachineOperand &MO = MI->getOperand(i);
878    if (!MO.isReg() || !MO.isUse())
879      continue;
880    unsigned Reg = MO.getReg();
881    if (Reg == 0 || Reg == li.reg)
882      continue;
883    // FIXME: For now, only remat MI with at most one register operand.
884    assert(!RegOp &&
885           "Can't rematerialize instruction with multiple register operand!");
886    RegOp = MO.getReg();
887#ifndef NDEBUG
888    break;
889#endif
890  }
891  return RegOp;
892}
893
894/// isValNoAvailableAt - Return true if the val# of the specified interval
895/// which reaches the given instruction also reaches the specified use index.
896bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
897                                       unsigned UseIdx) const {
898  unsigned Index = getInstructionIndex(MI);
899  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
900  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
901  return UI != li.end() && UI->valno == ValNo;
902}
903
904/// isReMaterializable - Returns true if the definition MI of the specified
905/// val# of the specified interval is re-materializable.
906bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907                                       const VNInfo *ValNo, MachineInstr *MI,
908                                       SmallVectorImpl<LiveInterval*> &SpillIs,
909                                       bool &isLoad) {
910  if (DisableReMat)
911    return false;
912
913  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
914    return true;
915
916  int FrameIdx = 0;
917  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
918      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
919    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
920    // this but remember this is not safe to fold into a two-address
921    // instruction.
922    // This is a load from fixed stack slot. It can be rematerialized.
923    return true;
924
925  // If the target-specific rules don't identify an instruction as
926  // being trivially rematerializable, use some target-independent
927  // rules.
928  if (!MI->getDesc().isRematerializable() ||
929      !tii_->isTriviallyReMaterializable(MI)) {
930    if (!EnableAggressiveRemat)
931      return false;
932
933    // If the instruction accesses memory but the memoperands have been lost,
934    // we can't analyze it.
935    const TargetInstrDesc &TID = MI->getDesc();
936    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
937      return false;
938
939    // Avoid instructions obviously unsafe for remat.
940    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
941      return false;
942
943    // If the instruction accesses memory and the memory could be non-constant,
944    // assume the instruction is not rematerializable.
945    for (std::list<MachineMemOperand>::const_iterator
946           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
947      const MachineMemOperand &MMO = *I;
948      if (MMO.isVolatile() || MMO.isStore())
949        return false;
950      const Value *V = MMO.getValue();
951      if (!V)
952        return false;
953      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
954        if (!PSV->isConstant(mf_->getFrameInfo()))
955          return false;
956      } else if (!aa_->pointsToConstantMemory(V))
957        return false;
958    }
959
960    // If any of the registers accessed are non-constant, conservatively assume
961    // the instruction is not rematerializable.
962    unsigned ImpUse = 0;
963    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
964      const MachineOperand &MO = MI->getOperand(i);
965      if (MO.isReg()) {
966        unsigned Reg = MO.getReg();
967        if (Reg == 0)
968          continue;
969        if (TargetRegisterInfo::isPhysicalRegister(Reg))
970          return false;
971
972        // Only allow one def, and that in the first operand.
973        if (MO.isDef() != (i == 0))
974          return false;
975
976        // Only allow constant-valued registers.
977        bool IsLiveIn = mri_->isLiveIn(Reg);
978        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
979                                          E = mri_->def_end();
980
981        // For the def, it should be the only def of that register.
982        if (MO.isDef() && (next(I) != E || IsLiveIn))
983          return false;
984
985        if (MO.isUse()) {
986          // Only allow one use other register use, as that's all the
987          // remat mechanisms support currently.
988          if (Reg != li.reg) {
989            if (ImpUse == 0)
990              ImpUse = Reg;
991            else if (Reg != ImpUse)
992              return false;
993          }
994          // For the use, there should be only one associated def.
995          if (I != E && (next(I) != E || IsLiveIn))
996            return false;
997        }
998      }
999    }
1000  }
1001
1002  unsigned ImpUse = getReMatImplicitUse(li, MI);
1003  if (ImpUse) {
1004    const LiveInterval &ImpLi = getInterval(ImpUse);
1005    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1006           re = mri_->use_end(); ri != re; ++ri) {
1007      MachineInstr *UseMI = &*ri;
1008      unsigned UseIdx = getInstructionIndex(UseMI);
1009      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1010        continue;
1011      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1012        return false;
1013    }
1014
1015    // If a register operand of the re-materialized instruction is going to
1016    // be spilled next, then it's not legal to re-materialize this instruction.
1017    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1018      if (ImpUse == SpillIs[i]->reg)
1019        return false;
1020  }
1021  return true;
1022}
1023
1024/// isReMaterializable - Returns true if the definition MI of the specified
1025/// val# of the specified interval is re-materializable.
1026bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1027                                       const VNInfo *ValNo, MachineInstr *MI) {
1028  SmallVector<LiveInterval*, 4> Dummy1;
1029  bool Dummy2;
1030  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1031}
1032
1033/// isReMaterializable - Returns true if every definition of MI of every
1034/// val# of the specified interval is re-materializable.
1035bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1036                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1037                                       bool &isLoad) {
1038  isLoad = false;
1039  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1040       i != e; ++i) {
1041    const VNInfo *VNI = *i;
1042    unsigned DefIdx = VNI->def;
1043    if (DefIdx == ~1U)
1044      continue; // Dead val#.
1045    // Is the def for the val# rematerializable?
1046    if (DefIdx == ~0u)
1047      return false;
1048    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1049    bool DefIsLoad = false;
1050    if (!ReMatDefMI ||
1051        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1052      return false;
1053    isLoad |= DefIsLoad;
1054  }
1055  return true;
1056}
1057
1058/// FilterFoldedOps - Filter out two-address use operands. Return
1059/// true if it finds any issue with the operands that ought to prevent
1060/// folding.
1061static bool FilterFoldedOps(MachineInstr *MI,
1062                            SmallVector<unsigned, 2> &Ops,
1063                            unsigned &MRInfo,
1064                            SmallVector<unsigned, 2> &FoldOps) {
1065  MRInfo = 0;
1066  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1067    unsigned OpIdx = Ops[i];
1068    MachineOperand &MO = MI->getOperand(OpIdx);
1069    // FIXME: fold subreg use.
1070    if (MO.getSubReg())
1071      return true;
1072    if (MO.isDef())
1073      MRInfo |= (unsigned)VirtRegMap::isMod;
1074    else {
1075      // Filter out two-address use operand(s).
1076      if (MI->isRegTiedToDefOperand(OpIdx)) {
1077        MRInfo = VirtRegMap::isModRef;
1078        continue;
1079      }
1080      MRInfo |= (unsigned)VirtRegMap::isRef;
1081    }
1082    FoldOps.push_back(OpIdx);
1083  }
1084  return false;
1085}
1086
1087
1088/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1089/// slot / to reg or any rematerialized load into ith operand of specified
1090/// MI. If it is successul, MI is updated with the newly created MI and
1091/// returns true.
1092bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1093                                         VirtRegMap &vrm, MachineInstr *DefMI,
1094                                         unsigned InstrIdx,
1095                                         SmallVector<unsigned, 2> &Ops,
1096                                         bool isSS, int Slot, unsigned Reg) {
1097  // If it is an implicit def instruction, just delete it.
1098  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1099    RemoveMachineInstrFromMaps(MI);
1100    vrm.RemoveMachineInstrFromMaps(MI);
1101    MI->eraseFromParent();
1102    ++numFolds;
1103    return true;
1104  }
1105
1106  // Filter the list of operand indexes that are to be folded. Abort if
1107  // any operand will prevent folding.
1108  unsigned MRInfo = 0;
1109  SmallVector<unsigned, 2> FoldOps;
1110  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1111    return false;
1112
1113  // The only time it's safe to fold into a two address instruction is when
1114  // it's folding reload and spill from / into a spill stack slot.
1115  if (DefMI && (MRInfo & VirtRegMap::isMod))
1116    return false;
1117
1118  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1119                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1120  if (fmi) {
1121    // Remember this instruction uses the spill slot.
1122    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1123
1124    // Attempt to fold the memory reference into the instruction. If
1125    // we can do this, we don't need to insert spill code.
1126    MachineBasicBlock &MBB = *MI->getParent();
1127    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1128      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1129    vrm.transferSpillPts(MI, fmi);
1130    vrm.transferRestorePts(MI, fmi);
1131    vrm.transferEmergencySpills(MI, fmi);
1132    mi2iMap_.erase(MI);
1133    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1134    mi2iMap_[fmi] = InstrIdx;
1135    MI = MBB.insert(MBB.erase(MI), fmi);
1136    ++numFolds;
1137    return true;
1138  }
1139  return false;
1140}
1141
1142/// canFoldMemoryOperand - Returns true if the specified load / store
1143/// folding is possible.
1144bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1145                                         SmallVector<unsigned, 2> &Ops,
1146                                         bool ReMat) const {
1147  // Filter the list of operand indexes that are to be folded. Abort if
1148  // any operand will prevent folding.
1149  unsigned MRInfo = 0;
1150  SmallVector<unsigned, 2> FoldOps;
1151  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1152    return false;
1153
1154  // It's only legal to remat for a use, not a def.
1155  if (ReMat && (MRInfo & VirtRegMap::isMod))
1156    return false;
1157
1158  return tii_->canFoldMemoryOperand(MI, FoldOps);
1159}
1160
1161bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1162  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1163  for (LiveInterval::Ranges::const_iterator
1164         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1165    std::vector<IdxMBBPair>::const_iterator II =
1166      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1167    if (II == Idx2MBBMap.end())
1168      continue;
1169    if (I->end > II->first)  // crossing a MBB.
1170      return false;
1171    MBBs.insert(II->second);
1172    if (MBBs.size() > 1)
1173      return false;
1174  }
1175  return true;
1176}
1177
1178/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1179/// interval on to-be re-materialized operands of MI) with new register.
1180void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1181                                       MachineInstr *MI, unsigned NewVReg,
1182                                       VirtRegMap &vrm) {
1183  // There is an implicit use. That means one of the other operand is
1184  // being remat'ed and the remat'ed instruction has li.reg as an
1185  // use operand. Make sure we rewrite that as well.
1186  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1187    MachineOperand &MO = MI->getOperand(i);
1188    if (!MO.isReg())
1189      continue;
1190    unsigned Reg = MO.getReg();
1191    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1192      continue;
1193    if (!vrm.isReMaterialized(Reg))
1194      continue;
1195    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1196    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1197    if (UseMO)
1198      UseMO->setReg(NewVReg);
1199  }
1200}
1201
1202/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1203/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1204bool LiveIntervals::
1205rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1206                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1207                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1208                 unsigned Slot, int LdSlot,
1209                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1210                 VirtRegMap &vrm,
1211                 const TargetRegisterClass* rc,
1212                 SmallVector<int, 4> &ReMatIds,
1213                 const MachineLoopInfo *loopInfo,
1214                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1215                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1216                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1217  MachineBasicBlock *MBB = MI->getParent();
1218  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1219  bool CanFold = false;
1220 RestartInstruction:
1221  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1222    MachineOperand& mop = MI->getOperand(i);
1223    if (!mop.isReg())
1224      continue;
1225    unsigned Reg = mop.getReg();
1226    unsigned RegI = Reg;
1227    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1228      continue;
1229    if (Reg != li.reg)
1230      continue;
1231
1232    bool TryFold = !DefIsReMat;
1233    bool FoldSS = true; // Default behavior unless it's a remat.
1234    int FoldSlot = Slot;
1235    if (DefIsReMat) {
1236      // If this is the rematerializable definition MI itself and
1237      // all of its uses are rematerialized, simply delete it.
1238      if (MI == ReMatOrigDefMI && CanDelete) {
1239        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1240        DOUT << MI << '\n';
1241        RemoveMachineInstrFromMaps(MI);
1242        vrm.RemoveMachineInstrFromMaps(MI);
1243        MI->eraseFromParent();
1244        break;
1245      }
1246
1247      // If def for this use can't be rematerialized, then try folding.
1248      // If def is rematerializable and it's a load, also try folding.
1249      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1250      if (isLoad) {
1251        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1252        FoldSS = isLoadSS;
1253        FoldSlot = LdSlot;
1254      }
1255    }
1256
1257    // Scan all of the operands of this instruction rewriting operands
1258    // to use NewVReg instead of li.reg as appropriate.  We do this for
1259    // two reasons:
1260    //
1261    //   1. If the instr reads the same spilled vreg multiple times, we
1262    //      want to reuse the NewVReg.
1263    //   2. If the instr is a two-addr instruction, we are required to
1264    //      keep the src/dst regs pinned.
1265    //
1266    // Keep track of whether we replace a use and/or def so that we can
1267    // create the spill interval with the appropriate range.
1268
1269    HasUse = mop.isUse();
1270    HasDef = mop.isDef();
1271    SmallVector<unsigned, 2> Ops;
1272    Ops.push_back(i);
1273    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1274      const MachineOperand &MOj = MI->getOperand(j);
1275      if (!MOj.isReg())
1276        continue;
1277      unsigned RegJ = MOj.getReg();
1278      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1279        continue;
1280      if (RegJ == RegI) {
1281        Ops.push_back(j);
1282        HasUse |= MOj.isUse();
1283        HasDef |= MOj.isDef();
1284      }
1285    }
1286
1287    if (HasUse && !li.liveAt(getUseIndex(index)))
1288      // Must be defined by an implicit def. It should not be spilled. Note,
1289      // this is for correctness reason. e.g.
1290      // 8   %reg1024<def> = IMPLICIT_DEF
1291      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1292      // The live range [12, 14) are not part of the r1024 live interval since
1293      // it's defined by an implicit def. It will not conflicts with live
1294      // interval of r1025. Now suppose both registers are spilled, you can
1295      // easily see a situation where both registers are reloaded before
1296      // the INSERT_SUBREG and both target registers that would overlap.
1297      HasUse = false;
1298
1299    // Update stack slot spill weight if we are splitting.
1300    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1301      if (!TrySplit)
1302      SSWeight += Weight;
1303
1304    // Create a new virtual register for the spill interval.
1305    // Create the new register now so we can map the fold instruction
1306    // to the new register so when it is unfolded we get the correct
1307    // answer.
1308    bool CreatedNewVReg = false;
1309    if (NewVReg == 0) {
1310      NewVReg = mri_->createVirtualRegister(rc);
1311      vrm.grow();
1312      CreatedNewVReg = true;
1313    }
1314
1315    if (!TryFold)
1316      CanFold = false;
1317    else {
1318      // Do not fold load / store here if we are splitting. We'll find an
1319      // optimal point to insert a load / store later.
1320      if (!TrySplit) {
1321        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1322                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1323          // Folding the load/store can completely change the instruction in
1324          // unpredictable ways, rescan it from the beginning.
1325
1326          if (FoldSS) {
1327            // We need to give the new vreg the same stack slot as the
1328            // spilled interval.
1329            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1330          }
1331
1332          HasUse = false;
1333          HasDef = false;
1334          CanFold = false;
1335          if (isRemoved(MI)) {
1336            SSWeight -= Weight;
1337            break;
1338          }
1339          goto RestartInstruction;
1340        }
1341      } else {
1342        // We'll try to fold it later if it's profitable.
1343        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1344      }
1345    }
1346
1347    mop.setReg(NewVReg);
1348    if (mop.isImplicit())
1349      rewriteImplicitOps(li, MI, NewVReg, vrm);
1350
1351    // Reuse NewVReg for other reads.
1352    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1353      MachineOperand &mopj = MI->getOperand(Ops[j]);
1354      mopj.setReg(NewVReg);
1355      if (mopj.isImplicit())
1356        rewriteImplicitOps(li, MI, NewVReg, vrm);
1357    }
1358
1359    if (CreatedNewVReg) {
1360      if (DefIsReMat) {
1361        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1362        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1363          // Each valnum may have its own remat id.
1364          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1365        } else {
1366          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1367        }
1368        if (!CanDelete || (HasUse && HasDef)) {
1369          // If this is a two-addr instruction then its use operands are
1370          // rematerializable but its def is not. It should be assigned a
1371          // stack slot.
1372          vrm.assignVirt2StackSlot(NewVReg, Slot);
1373        }
1374      } else {
1375        vrm.assignVirt2StackSlot(NewVReg, Slot);
1376      }
1377    } else if (HasUse && HasDef &&
1378               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1379      // If this interval hasn't been assigned a stack slot (because earlier
1380      // def is a deleted remat def), do it now.
1381      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1382      vrm.assignVirt2StackSlot(NewVReg, Slot);
1383    }
1384
1385    // Re-matting an instruction with virtual register use. Add the
1386    // register as an implicit use on the use MI.
1387    if (DefIsReMat && ImpUse)
1388      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1389
1390    // create a new register interval for this spill / remat.
1391    LiveInterval &nI = getOrCreateInterval(NewVReg);
1392    if (CreatedNewVReg) {
1393      NewLIs.push_back(&nI);
1394      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1395      if (TrySplit)
1396        vrm.setIsSplitFromReg(NewVReg, li.reg);
1397    }
1398
1399    if (HasUse) {
1400      if (CreatedNewVReg) {
1401        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1402                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1403        DOUT << " +" << LR;
1404        nI.addRange(LR);
1405      } else {
1406        // Extend the split live interval to this def / use.
1407        unsigned End = getUseIndex(index)+1;
1408        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1409                     nI.getValNumInfo(nI.getNumValNums()-1));
1410        DOUT << " +" << LR;
1411        nI.addRange(LR);
1412      }
1413    }
1414    if (HasDef) {
1415      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1416                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1417      DOUT << " +" << LR;
1418      nI.addRange(LR);
1419    }
1420
1421    DOUT << "\t\t\t\tAdded new interval: ";
1422    nI.print(DOUT, tri_);
1423    DOUT << '\n';
1424  }
1425  return CanFold;
1426}
1427bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1428                                   const VNInfo *VNI,
1429                                   MachineBasicBlock *MBB, unsigned Idx) const {
1430  unsigned End = getMBBEndIdx(MBB);
1431  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1432    unsigned KillIdx = VNI->kills[j];
1433    if (KillIdx > Idx && KillIdx < End)
1434      return true;
1435  }
1436  return false;
1437}
1438
1439/// RewriteInfo - Keep track of machine instrs that will be rewritten
1440/// during spilling.
1441namespace {
1442  struct RewriteInfo {
1443    unsigned Index;
1444    MachineInstr *MI;
1445    bool HasUse;
1446    bool HasDef;
1447    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1448      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1449  };
1450
1451  struct RewriteInfoCompare {
1452    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1453      return LHS.Index < RHS.Index;
1454    }
1455  };
1456}
1457
1458void LiveIntervals::
1459rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1460                    LiveInterval::Ranges::const_iterator &I,
1461                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1462                    unsigned Slot, int LdSlot,
1463                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1464                    VirtRegMap &vrm,
1465                    const TargetRegisterClass* rc,
1466                    SmallVector<int, 4> &ReMatIds,
1467                    const MachineLoopInfo *loopInfo,
1468                    BitVector &SpillMBBs,
1469                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1470                    BitVector &RestoreMBBs,
1471                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1472                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1473                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1474  bool AllCanFold = true;
1475  unsigned NewVReg = 0;
1476  unsigned start = getBaseIndex(I->start);
1477  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1478
1479  // First collect all the def / use in this live range that will be rewritten.
1480  // Make sure they are sorted according to instruction index.
1481  std::vector<RewriteInfo> RewriteMIs;
1482  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1483         re = mri_->reg_end(); ri != re; ) {
1484    MachineInstr *MI = &*ri;
1485    MachineOperand &O = ri.getOperand();
1486    ++ri;
1487    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1488    unsigned index = getInstructionIndex(MI);
1489    if (index < start || index >= end)
1490      continue;
1491    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1492      // Must be defined by an implicit def. It should not be spilled. Note,
1493      // this is for correctness reason. e.g.
1494      // 8   %reg1024<def> = IMPLICIT_DEF
1495      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1496      // The live range [12, 14) are not part of the r1024 live interval since
1497      // it's defined by an implicit def. It will not conflicts with live
1498      // interval of r1025. Now suppose both registers are spilled, you can
1499      // easily see a situation where both registers are reloaded before
1500      // the INSERT_SUBREG and both target registers that would overlap.
1501      continue;
1502    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1503  }
1504  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1505
1506  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1507  // Now rewrite the defs and uses.
1508  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1509    RewriteInfo &rwi = RewriteMIs[i];
1510    ++i;
1511    unsigned index = rwi.Index;
1512    bool MIHasUse = rwi.HasUse;
1513    bool MIHasDef = rwi.HasDef;
1514    MachineInstr *MI = rwi.MI;
1515    // If MI def and/or use the same register multiple times, then there
1516    // are multiple entries.
1517    unsigned NumUses = MIHasUse;
1518    while (i != e && RewriteMIs[i].MI == MI) {
1519      assert(RewriteMIs[i].Index == index);
1520      bool isUse = RewriteMIs[i].HasUse;
1521      if (isUse) ++NumUses;
1522      MIHasUse |= isUse;
1523      MIHasDef |= RewriteMIs[i].HasDef;
1524      ++i;
1525    }
1526    MachineBasicBlock *MBB = MI->getParent();
1527
1528    if (ImpUse && MI != ReMatDefMI) {
1529      // Re-matting an instruction with virtual register use. Update the
1530      // register interval's spill weight to HUGE_VALF to prevent it from
1531      // being spilled.
1532      LiveInterval &ImpLi = getInterval(ImpUse);
1533      ImpLi.weight = HUGE_VALF;
1534    }
1535
1536    unsigned MBBId = MBB->getNumber();
1537    unsigned ThisVReg = 0;
1538    if (TrySplit) {
1539      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1540      if (NVI != MBBVRegsMap.end()) {
1541        ThisVReg = NVI->second;
1542        // One common case:
1543        // x = use
1544        // ...
1545        // ...
1546        // def = ...
1547        //     = use
1548        // It's better to start a new interval to avoid artifically
1549        // extend the new interval.
1550        if (MIHasDef && !MIHasUse) {
1551          MBBVRegsMap.erase(MBB->getNumber());
1552          ThisVReg = 0;
1553        }
1554      }
1555    }
1556
1557    bool IsNew = ThisVReg == 0;
1558    if (IsNew) {
1559      // This ends the previous live interval. If all of its def / use
1560      // can be folded, give it a low spill weight.
1561      if (NewVReg && TrySplit && AllCanFold) {
1562        LiveInterval &nI = getOrCreateInterval(NewVReg);
1563        nI.weight /= 10.0F;
1564      }
1565      AllCanFold = true;
1566    }
1567    NewVReg = ThisVReg;
1568
1569    bool HasDef = false;
1570    bool HasUse = false;
1571    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1572                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1573                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1574                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1575                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1576    if (!HasDef && !HasUse)
1577      continue;
1578
1579    AllCanFold &= CanFold;
1580
1581    // Update weight of spill interval.
1582    LiveInterval &nI = getOrCreateInterval(NewVReg);
1583    if (!TrySplit) {
1584      // The spill weight is now infinity as it cannot be spilled again.
1585      nI.weight = HUGE_VALF;
1586      continue;
1587    }
1588
1589    // Keep track of the last def and first use in each MBB.
1590    if (HasDef) {
1591      if (MI != ReMatOrigDefMI || !CanDelete) {
1592        bool HasKill = false;
1593        if (!HasUse)
1594          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1595        else {
1596          // If this is a two-address code, then this index starts a new VNInfo.
1597          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1598          if (VNI)
1599            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1600        }
1601        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1602          SpillIdxes.find(MBBId);
1603        if (!HasKill) {
1604          if (SII == SpillIdxes.end()) {
1605            std::vector<SRInfo> S;
1606            S.push_back(SRInfo(index, NewVReg, true));
1607            SpillIdxes.insert(std::make_pair(MBBId, S));
1608          } else if (SII->second.back().vreg != NewVReg) {
1609            SII->second.push_back(SRInfo(index, NewVReg, true));
1610          } else if ((int)index > SII->second.back().index) {
1611            // If there is an earlier def and this is a two-address
1612            // instruction, then it's not possible to fold the store (which
1613            // would also fold the load).
1614            SRInfo &Info = SII->second.back();
1615            Info.index = index;
1616            Info.canFold = !HasUse;
1617          }
1618          SpillMBBs.set(MBBId);
1619        } else if (SII != SpillIdxes.end() &&
1620                   SII->second.back().vreg == NewVReg &&
1621                   (int)index > SII->second.back().index) {
1622          // There is an earlier def that's not killed (must be two-address).
1623          // The spill is no longer needed.
1624          SII->second.pop_back();
1625          if (SII->second.empty()) {
1626            SpillIdxes.erase(MBBId);
1627            SpillMBBs.reset(MBBId);
1628          }
1629        }
1630      }
1631    }
1632
1633    if (HasUse) {
1634      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1635        SpillIdxes.find(MBBId);
1636      if (SII != SpillIdxes.end() &&
1637          SII->second.back().vreg == NewVReg &&
1638          (int)index > SII->second.back().index)
1639        // Use(s) following the last def, it's not safe to fold the spill.
1640        SII->second.back().canFold = false;
1641      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1642        RestoreIdxes.find(MBBId);
1643      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1644        // If we are splitting live intervals, only fold if it's the first
1645        // use and there isn't another use later in the MBB.
1646        RII->second.back().canFold = false;
1647      else if (IsNew) {
1648        // Only need a reload if there isn't an earlier def / use.
1649        if (RII == RestoreIdxes.end()) {
1650          std::vector<SRInfo> Infos;
1651          Infos.push_back(SRInfo(index, NewVReg, true));
1652          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1653        } else {
1654          RII->second.push_back(SRInfo(index, NewVReg, true));
1655        }
1656        RestoreMBBs.set(MBBId);
1657      }
1658    }
1659
1660    // Update spill weight.
1661    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1662    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1663  }
1664
1665  if (NewVReg && TrySplit && AllCanFold) {
1666    // If all of its def / use can be folded, give it a low spill weight.
1667    LiveInterval &nI = getOrCreateInterval(NewVReg);
1668    nI.weight /= 10.0F;
1669  }
1670}
1671
1672bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1673                        BitVector &RestoreMBBs,
1674                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1675  if (!RestoreMBBs[Id])
1676    return false;
1677  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1678  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1679    if (Restores[i].index == index &&
1680        Restores[i].vreg == vr &&
1681        Restores[i].canFold)
1682      return true;
1683  return false;
1684}
1685
1686void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1687                        BitVector &RestoreMBBs,
1688                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1689  if (!RestoreMBBs[Id])
1690    return;
1691  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1692  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1693    if (Restores[i].index == index && Restores[i].vreg)
1694      Restores[i].index = -1;
1695}
1696
1697/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1698/// spilled and create empty intervals for their uses.
1699void
1700LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1701                                    const TargetRegisterClass* rc,
1702                                    std::vector<LiveInterval*> &NewLIs) {
1703  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1704         re = mri_->reg_end(); ri != re; ) {
1705    MachineOperand &O = ri.getOperand();
1706    MachineInstr *MI = &*ri;
1707    ++ri;
1708    if (O.isDef()) {
1709      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1710             "Register def was not rewritten?");
1711      RemoveMachineInstrFromMaps(MI);
1712      vrm.RemoveMachineInstrFromMaps(MI);
1713      MI->eraseFromParent();
1714    } else {
1715      // This must be an use of an implicit_def so it's not part of the live
1716      // interval. Create a new empty live interval for it.
1717      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1718      unsigned NewVReg = mri_->createVirtualRegister(rc);
1719      vrm.grow();
1720      vrm.setIsImplicitlyDefined(NewVReg);
1721      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1722      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1723        MachineOperand &MO = MI->getOperand(i);
1724        if (MO.isReg() && MO.getReg() == li.reg)
1725          MO.setReg(NewVReg);
1726      }
1727    }
1728  }
1729}
1730
1731namespace {
1732  struct LISorter {
1733    bool operator()(LiveInterval* A, LiveInterval* B) {
1734      return A->beginNumber() < B->beginNumber();
1735    }
1736  };
1737}
1738
1739std::vector<LiveInterval*> LiveIntervals::
1740addIntervalsForSpillsFast(const LiveInterval &li,
1741                          const MachineLoopInfo *loopInfo,
1742                          VirtRegMap &vrm, float& SSWeight) {
1743  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1744
1745  std::vector<LiveInterval*> added;
1746
1747  assert(li.weight != HUGE_VALF &&
1748         "attempt to spill already spilled interval!");
1749
1750  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1751  DEBUG(li.dump());
1752  DOUT << '\n';
1753
1754  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1755
1756  SSWeight = 0.0f;
1757
1758  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1759  while (RI != mri_->reg_end()) {
1760    MachineInstr* MI = &*RI;
1761
1762    SmallVector<unsigned, 2> Indices;
1763    bool HasUse = false;
1764    bool HasDef = false;
1765
1766    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1767      MachineOperand& mop = MI->getOperand(i);
1768      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1769
1770      HasUse |= MI->getOperand(i).isUse();
1771      HasDef |= MI->getOperand(i).isDef();
1772
1773      Indices.push_back(i);
1774    }
1775
1776    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1777                              Indices, true, slot, li.reg)) {
1778      unsigned NewVReg = mri_->createVirtualRegister(rc);
1779      vrm.grow();
1780      vrm.assignVirt2StackSlot(NewVReg, slot);
1781
1782      // create a new register for this spill
1783      LiveInterval &nI = getOrCreateInterval(NewVReg);
1784
1785      // the spill weight is now infinity as it
1786      // cannot be spilled again
1787      nI.weight = HUGE_VALF;
1788
1789      // Rewrite register operands to use the new vreg.
1790      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1791           E = Indices.end(); I != E; ++I) {
1792        MI->getOperand(*I).setReg(NewVReg);
1793
1794        if (MI->getOperand(*I).isUse())
1795          MI->getOperand(*I).setIsKill(true);
1796      }
1797
1798      // Fill in  the new live interval.
1799      unsigned index = getInstructionIndex(MI);
1800      if (HasUse) {
1801        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1802                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1803        DOUT << " +" << LR;
1804        nI.addRange(LR);
1805        vrm.addRestorePoint(NewVReg, MI);
1806      }
1807      if (HasDef) {
1808        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1809                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1810        DOUT << " +" << LR;
1811        nI.addRange(LR);
1812        vrm.addSpillPoint(NewVReg, true, MI);
1813      }
1814
1815      added.push_back(&nI);
1816
1817      DOUT << "\t\t\t\tadded new interval: ";
1818      DEBUG(nI.dump());
1819      DOUT << '\n';
1820
1821      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1822      if (HasUse) {
1823        if (HasDef)
1824          SSWeight += getSpillWeight(true, true, loopDepth);
1825        else
1826          SSWeight += getSpillWeight(false, true, loopDepth);
1827      } else
1828        SSWeight += getSpillWeight(true, false, loopDepth);
1829    }
1830
1831
1832    RI = mri_->reg_begin(li.reg);
1833  }
1834
1835  // Clients expect the new intervals to be returned in sorted order.
1836  std::sort(added.begin(), added.end(), LISorter());
1837
1838  return added;
1839}
1840
1841std::vector<LiveInterval*> LiveIntervals::
1842addIntervalsForSpills(const LiveInterval &li,
1843                      SmallVectorImpl<LiveInterval*> &SpillIs,
1844                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1845                      float &SSWeight) {
1846
1847  if (EnableFastSpilling)
1848    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1849
1850  assert(li.weight != HUGE_VALF &&
1851         "attempt to spill already spilled interval!");
1852
1853  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1854  li.print(DOUT, tri_);
1855  DOUT << '\n';
1856
1857  // Spill slot weight.
1858  SSWeight = 0.0f;
1859
1860  // Each bit specify whether a spill is required in the MBB.
1861  BitVector SpillMBBs(mf_->getNumBlockIDs());
1862  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1863  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1864  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1865  DenseMap<unsigned,unsigned> MBBVRegsMap;
1866  std::vector<LiveInterval*> NewLIs;
1867  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1868
1869  unsigned NumValNums = li.getNumValNums();
1870  SmallVector<MachineInstr*, 4> ReMatDefs;
1871  ReMatDefs.resize(NumValNums, NULL);
1872  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1873  ReMatOrigDefs.resize(NumValNums, NULL);
1874  SmallVector<int, 4> ReMatIds;
1875  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1876  BitVector ReMatDelete(NumValNums);
1877  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1878
1879  // Spilling a split live interval. It cannot be split any further. Also,
1880  // it's also guaranteed to be a single val# / range interval.
1881  if (vrm.getPreSplitReg(li.reg)) {
1882    vrm.setIsSplitFromReg(li.reg, 0);
1883    // Unset the split kill marker on the last use.
1884    unsigned KillIdx = vrm.getKillPoint(li.reg);
1885    if (KillIdx) {
1886      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1887      assert(KillMI && "Last use disappeared?");
1888      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1889      assert(KillOp != -1 && "Last use disappeared?");
1890      KillMI->getOperand(KillOp).setIsKill(false);
1891    }
1892    vrm.removeKillPoint(li.reg);
1893    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1894    Slot = vrm.getStackSlot(li.reg);
1895    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1896    MachineInstr *ReMatDefMI = DefIsReMat ?
1897      vrm.getReMaterializedMI(li.reg) : NULL;
1898    int LdSlot = 0;
1899    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1900    bool isLoad = isLoadSS ||
1901      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1902    bool IsFirstRange = true;
1903    for (LiveInterval::Ranges::const_iterator
1904           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1905      // If this is a split live interval with multiple ranges, it means there
1906      // are two-address instructions that re-defined the value. Only the
1907      // first def can be rematerialized!
1908      if (IsFirstRange) {
1909        // Note ReMatOrigDefMI has already been deleted.
1910        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1911                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1912                             false, vrm, rc, ReMatIds, loopInfo,
1913                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1914                             MBBVRegsMap, NewLIs, SSWeight);
1915      } else {
1916        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1917                             Slot, 0, false, false, false,
1918                             false, vrm, rc, ReMatIds, loopInfo,
1919                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1920                             MBBVRegsMap, NewLIs, SSWeight);
1921      }
1922      IsFirstRange = false;
1923    }
1924
1925    SSWeight = 0.0f;  // Already accounted for when split.
1926    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1927    return NewLIs;
1928  }
1929
1930  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1931  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1932    TrySplit = false;
1933  if (TrySplit)
1934    ++numSplits;
1935  bool NeedStackSlot = false;
1936  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1937       i != e; ++i) {
1938    const VNInfo *VNI = *i;
1939    unsigned VN = VNI->id;
1940    unsigned DefIdx = VNI->def;
1941    if (DefIdx == ~1U)
1942      continue; // Dead val#.
1943    // Is the def for the val# rematerializable?
1944    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1945      ? 0 : getInstructionFromIndex(DefIdx);
1946    bool dummy;
1947    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1948      // Remember how to remat the def of this val#.
1949      ReMatOrigDefs[VN] = ReMatDefMI;
1950      // Original def may be modified so we have to make a copy here.
1951      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1952      ClonedMIs.push_back(Clone);
1953      ReMatDefs[VN] = Clone;
1954
1955      bool CanDelete = true;
1956      if (VNI->hasPHIKill) {
1957        // A kill is a phi node, not all of its uses can be rematerialized.
1958        // It must not be deleted.
1959        CanDelete = false;
1960        // Need a stack slot if there is any live range where uses cannot be
1961        // rematerialized.
1962        NeedStackSlot = true;
1963      }
1964      if (CanDelete)
1965        ReMatDelete.set(VN);
1966    } else {
1967      // Need a stack slot if there is any live range where uses cannot be
1968      // rematerialized.
1969      NeedStackSlot = true;
1970    }
1971  }
1972
1973  // One stack slot per live interval.
1974  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1975    Slot = vrm.assignVirt2StackSlot(li.reg);
1976
1977  // Create new intervals and rewrite defs and uses.
1978  for (LiveInterval::Ranges::const_iterator
1979         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1980    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1981    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1982    bool DefIsReMat = ReMatDefMI != NULL;
1983    bool CanDelete = ReMatDelete[I->valno->id];
1984    int LdSlot = 0;
1985    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1986    bool isLoad = isLoadSS ||
1987      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1988    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1989                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1990                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1991                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1992                               MBBVRegsMap, NewLIs, SSWeight);
1993  }
1994
1995  // Insert spills / restores if we are splitting.
1996  if (!TrySplit) {
1997    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1998    return NewLIs;
1999  }
2000
2001  SmallPtrSet<LiveInterval*, 4> AddedKill;
2002  SmallVector<unsigned, 2> Ops;
2003  if (NeedStackSlot) {
2004    int Id = SpillMBBs.find_first();
2005    while (Id != -1) {
2006      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2007      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2008      std::vector<SRInfo> &spills = SpillIdxes[Id];
2009      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2010        int index = spills[i].index;
2011        unsigned VReg = spills[i].vreg;
2012        LiveInterval &nI = getOrCreateInterval(VReg);
2013        bool isReMat = vrm.isReMaterialized(VReg);
2014        MachineInstr *MI = getInstructionFromIndex(index);
2015        bool CanFold = false;
2016        bool FoundUse = false;
2017        Ops.clear();
2018        if (spills[i].canFold) {
2019          CanFold = true;
2020          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2021            MachineOperand &MO = MI->getOperand(j);
2022            if (!MO.isReg() || MO.getReg() != VReg)
2023              continue;
2024
2025            Ops.push_back(j);
2026            if (MO.isDef())
2027              continue;
2028            if (isReMat ||
2029                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2030                                                RestoreMBBs, RestoreIdxes))) {
2031              // MI has two-address uses of the same register. If the use
2032              // isn't the first and only use in the BB, then we can't fold
2033              // it. FIXME: Move this to rewriteInstructionsForSpills.
2034              CanFold = false;
2035              break;
2036            }
2037            FoundUse = true;
2038          }
2039        }
2040        // Fold the store into the def if possible.
2041        bool Folded = false;
2042        if (CanFold && !Ops.empty()) {
2043          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2044            Folded = true;
2045            if (FoundUse) {
2046              // Also folded uses, do not issue a load.
2047              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2048              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2049            }
2050            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2051          }
2052        }
2053
2054        // Otherwise tell the spiller to issue a spill.
2055        if (!Folded) {
2056          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2057          bool isKill = LR->end == getStoreIndex(index);
2058          if (!MI->registerDefIsDead(nI.reg))
2059            // No need to spill a dead def.
2060            vrm.addSpillPoint(VReg, isKill, MI);
2061          if (isKill)
2062            AddedKill.insert(&nI);
2063        }
2064
2065        // Update spill slot weight.
2066        if (!isReMat)
2067          SSWeight += getSpillWeight(true, false, loopDepth);
2068      }
2069      Id = SpillMBBs.find_next(Id);
2070    }
2071  }
2072
2073  int Id = RestoreMBBs.find_first();
2074  while (Id != -1) {
2075    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2076    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2077
2078    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2079    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2080      int index = restores[i].index;
2081      if (index == -1)
2082        continue;
2083      unsigned VReg = restores[i].vreg;
2084      LiveInterval &nI = getOrCreateInterval(VReg);
2085      bool isReMat = vrm.isReMaterialized(VReg);
2086      MachineInstr *MI = getInstructionFromIndex(index);
2087      bool CanFold = false;
2088      Ops.clear();
2089      if (restores[i].canFold) {
2090        CanFold = true;
2091        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2092          MachineOperand &MO = MI->getOperand(j);
2093          if (!MO.isReg() || MO.getReg() != VReg)
2094            continue;
2095
2096          if (MO.isDef()) {
2097            // If this restore were to be folded, it would have been folded
2098            // already.
2099            CanFold = false;
2100            break;
2101          }
2102          Ops.push_back(j);
2103        }
2104      }
2105
2106      // Fold the load into the use if possible.
2107      bool Folded = false;
2108      if (CanFold && !Ops.empty()) {
2109        if (!isReMat)
2110          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2111        else {
2112          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2113          int LdSlot = 0;
2114          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2115          // If the rematerializable def is a load, also try to fold it.
2116          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2117            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2118                                          Ops, isLoadSS, LdSlot, VReg);
2119          if (!Folded) {
2120            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2121            if (ImpUse) {
2122              // Re-matting an instruction with virtual register use. Add the
2123              // register as an implicit use on the use MI and update the register
2124              // interval's spill weight to HUGE_VALF to prevent it from being
2125              // spilled.
2126              LiveInterval &ImpLi = getInterval(ImpUse);
2127              ImpLi.weight = HUGE_VALF;
2128              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2129            }
2130          }
2131        }
2132      }
2133      // If folding is not possible / failed, then tell the spiller to issue a
2134      // load / rematerialization for us.
2135      if (Folded)
2136        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2137      else
2138        vrm.addRestorePoint(VReg, MI);
2139
2140      // Update spill slot weight.
2141      if (!isReMat)
2142        SSWeight += getSpillWeight(false, true, loopDepth);
2143    }
2144    Id = RestoreMBBs.find_next(Id);
2145  }
2146
2147  // Finalize intervals: add kills, finalize spill weights, and filter out
2148  // dead intervals.
2149  std::vector<LiveInterval*> RetNewLIs;
2150  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2151    LiveInterval *LI = NewLIs[i];
2152    if (!LI->empty()) {
2153      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2154      if (!AddedKill.count(LI)) {
2155        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2156        unsigned LastUseIdx = getBaseIndex(LR->end);
2157        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2158        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2159        assert(UseIdx != -1);
2160        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2161          LastUse->getOperand(UseIdx).setIsKill();
2162          vrm.addKillPoint(LI->reg, LastUseIdx);
2163        }
2164      }
2165      RetNewLIs.push_back(LI);
2166    }
2167  }
2168
2169  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2170  return RetNewLIs;
2171}
2172
2173/// hasAllocatableSuperReg - Return true if the specified physical register has
2174/// any super register that's allocatable.
2175bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2176  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2177    if (allocatableRegs_[*AS] && hasInterval(*AS))
2178      return true;
2179  return false;
2180}
2181
2182/// getRepresentativeReg - Find the largest super register of the specified
2183/// physical register.
2184unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2185  // Find the largest super-register that is allocatable.
2186  unsigned BestReg = Reg;
2187  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2188    unsigned SuperReg = *AS;
2189    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2190      BestReg = SuperReg;
2191      break;
2192    }
2193  }
2194  return BestReg;
2195}
2196
2197/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2198/// specified interval that conflicts with the specified physical register.
2199unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2200                                                   unsigned PhysReg) const {
2201  unsigned NumConflicts = 0;
2202  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2203  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2204         E = mri_->reg_end(); I != E; ++I) {
2205    MachineOperand &O = I.getOperand();
2206    MachineInstr *MI = O.getParent();
2207    unsigned Index = getInstructionIndex(MI);
2208    if (pli.liveAt(Index))
2209      ++NumConflicts;
2210  }
2211  return NumConflicts;
2212}
2213
2214/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2215/// around all defs and uses of the specified interval.
2216void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2217                                            unsigned PhysReg, VirtRegMap &vrm) {
2218  unsigned SpillReg = getRepresentativeReg(PhysReg);
2219
2220  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2221    // If there are registers which alias PhysReg, but which are not a
2222    // sub-register of the chosen representative super register. Assert
2223    // since we can't handle it yet.
2224    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2225           tri_->isSuperRegister(*AS, SpillReg));
2226
2227  LiveInterval &pli = getInterval(SpillReg);
2228  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2229  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2230         E = mri_->reg_end(); I != E; ++I) {
2231    MachineOperand &O = I.getOperand();
2232    MachineInstr *MI = O.getParent();
2233    if (SeenMIs.count(MI))
2234      continue;
2235    SeenMIs.insert(MI);
2236    unsigned Index = getInstructionIndex(MI);
2237    if (pli.liveAt(Index)) {
2238      vrm.addEmergencySpill(SpillReg, MI);
2239      unsigned StartIdx = getLoadIndex(Index);
2240      unsigned EndIdx = getStoreIndex(Index)+1;
2241      if (pli.isInOneLiveRange(StartIdx, EndIdx))
2242        pli.removeRange(StartIdx, EndIdx);
2243      else {
2244        cerr << "Ran out of registers during register allocation!\n";
2245        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2246          cerr << "Please check your inline asm statement for invalid "
2247               << "constraints:\n";
2248          MI->print(cerr.stream(), tm_);
2249        }
2250        exit(1);
2251      }
2252      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2253        if (!hasInterval(*AS))
2254          continue;
2255        LiveInterval &spli = getInterval(*AS);
2256        if (spli.liveAt(Index))
2257          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2258      }
2259    }
2260  }
2261}
2262
2263LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2264                                                   MachineInstr* startInst) {
2265  LiveInterval& Interval = getOrCreateInterval(reg);
2266  VNInfo* VN = Interval.getNextValue(
2267            getInstructionIndex(startInst) + InstrSlots::DEF,
2268            startInst, getVNInfoAllocator());
2269  VN->hasPHIKill = true;
2270  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2271  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2272               getMBBEndIdx(startInst->getParent()) + 1, VN);
2273  Interval.addRange(LR);
2274
2275  return LR;
2276}
2277