LiveIntervalAnalysis.cpp revision 3c75ba858b4e2070993cc1241ba74ead17f647d6
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  // Virtual registers may be defined multiple times (due to phi
205  // elimination and 2-addr elimination).  Much of what we do only has to be
206  // done once for the vreg.  We use an empty interval to detect the first
207  // time we see a vreg.
208  if (interval.empty()) {
209    // Get the Idx of the defining instructions.
210    unsigned defIndex = getDefIndex(MIIdx);
211    VNInfo *ValNo;
212    MachineInstr *CopyMI = NULL;
213    unsigned SrcReg, DstReg;
214    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215        tii_->isMoveInstr(*mi, SrcReg, DstReg))
216      CopyMI = mi;
217    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      unsigned killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230      else
231        killIdx = defIndex+1;
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.none() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DOUT << " +" << LR << "\n";
241        interval.addKill(ValNo, killIdx);
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex,
251                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252                    ValNo);
253    DOUT << " +" << NewLR;
254    interval.addRange(NewLR);
255
256    // Iterate over all of the blocks that the variable is completely
257    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258    // live interval.
259    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260      if (vi.AliveBlocks[i]) {
261        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262        if (!MBB->empty()) {
263          LiveRange LR(getMBBStartIdx(i),
264                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265                       ValNo);
266          interval.addRange(LR);
267          DOUT << " +" << LR;
268        }
269      }
270    }
271
272    // Finally, this virtual register is live from the start of any killing
273    // block to the 'use' slot of the killing instruction.
274    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275      MachineInstr *Kill = vi.Kills[i];
276      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277      LiveRange LR(getMBBStartIdx(Kill->getParent()),
278                   killIdx, ValNo);
279      interval.addRange(LR);
280      interval.addKill(ValNo, killIdx);
281      DOUT << " +" << LR;
282    }
283
284  } else {
285    // If this is the second time we see a virtual register definition, it
286    // must be due to phi elimination or two addr elimination.  If this is
287    // the result of two address elimination, then the vreg is one of the
288    // def-and-use register operand.
289    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290      // If this is a two-address definition, then we have already processed
291      // the live range.  The only problem is that we didn't realize there
292      // are actually two values in the live interval.  Because of this we
293      // need to take the LiveRegion that defines this register and split it
294      // into two values.
295      assert(interval.containsOneValue());
296      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297      unsigned RedefIndex = getDefIndex(MIIdx);
298
299      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300      VNInfo *OldValNo = OldLR->valno;
301
302      // Delete the initial value, which should be short and continuous,
303      // because the 2-addr copy must be in the same MBB as the redef.
304      interval.removeRange(DefIndex, RedefIndex);
305
306      // Two-address vregs should always only be redefined once.  This means
307      // that at this point, there should be exactly one value number in it.
308      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309
310      // The new value number (#1) is defined by the instruction we claimed
311      // defined value #0.
312      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
313                                            VNInfoAllocator);
314
315      // Value#0 is now defined by the 2-addr instruction.
316      OldValNo->def  = RedefIndex;
317      OldValNo->copy = 0;
318
319      // Add the new live interval which replaces the range for the input copy.
320      LiveRange LR(DefIndex, RedefIndex, ValNo);
321      DOUT << " replace range with " << LR;
322      interval.addRange(LR);
323      interval.addKill(ValNo, RedefIndex);
324
325      // If this redefinition is dead, we need to add a dummy unit live
326      // range covering the def slot.
327      if (mi->registerDefIsDead(interval.reg, tri_))
328        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
329
330      DOUT << " RESULT: ";
331      interval.print(DOUT, tri_);
332
333    } else {
334      // Otherwise, this must be because of phi elimination.  If this is the
335      // first redefinition of the vreg that we have seen, go back and change
336      // the live range in the PHI block to be a different value number.
337      if (interval.containsOneValue()) {
338        assert(vi.Kills.size() == 1 &&
339               "PHI elimination vreg should have one kill, the PHI itself!");
340
341        // Remove the old range that we now know has an incorrect number.
342        VNInfo *VNI = interval.getValNumInfo(0);
343        MachineInstr *Killer = vi.Kills[0];
344        unsigned Start = getMBBStartIdx(Killer->getParent());
345        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346        DOUT << " Removing [" << Start << "," << End << "] from: ";
347        interval.print(DOUT, tri_); DOUT << "\n";
348        interval.removeRange(Start, End);
349        VNI->hasPHIKill = true;
350        DOUT << " RESULT: "; interval.print(DOUT, tri_);
351
352        // Replace the interval with one of a NEW value number.  Note that this
353        // value number isn't actually defined by an instruction, weird huh? :)
354        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355        DOUT << " replace range with " << LR;
356        interval.addRange(LR);
357        interval.addKill(LR.valno, End);
358        DOUT << " RESULT: "; interval.print(DOUT, tri_);
359      }
360
361      // In the case of PHI elimination, each variable definition is only
362      // live until the end of the block.  We've already taken care of the
363      // rest of the live range.
364      unsigned defIndex = getDefIndex(MIIdx);
365
366      VNInfo *ValNo;
367      MachineInstr *CopyMI = NULL;
368      unsigned SrcReg, DstReg;
369      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370          tii_->isMoveInstr(*mi, SrcReg, DstReg))
371        CopyMI = mi;
372      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373
374      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375      LiveRange LR(defIndex, killIndex, ValNo);
376      interval.addRange(LR);
377      interval.addKill(ValNo, killIndex);
378      ValNo->hasPHIKill = true;
379      DOUT << " +" << LR;
380    }
381  }
382
383  DOUT << '\n';
384}
385
386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387                                              MachineBasicBlock::iterator mi,
388                                              unsigned MIIdx,
389                                              LiveInterval &interval,
390                                              MachineInstr *CopyMI) {
391  // A physical register cannot be live across basic block, so its
392  // lifetime must end somewhere in its defining basic block.
393  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394
395  unsigned baseIndex = MIIdx;
396  unsigned start = getDefIndex(baseIndex);
397  unsigned end = start;
398
399  // If it is not used after definition, it is considered dead at
400  // the instruction defining it. Hence its interval is:
401  // [defSlot(def), defSlot(def)+1)
402  if (mi->registerDefIsDead(interval.reg, tri_)) {
403    DOUT << " dead";
404    end = getDefIndex(start) + 1;
405    goto exit;
406  }
407
408  // If it is not dead on definition, it must be killed by a
409  // subsequent instruction. Hence its interval is:
410  // [defSlot(def), useSlot(kill)+1)
411  while (++mi != MBB->end()) {
412    baseIndex += InstrSlots::NUM;
413    if (mi->killsRegister(interval.reg, tri_)) {
414      DOUT << " killed";
415      end = getUseIndex(baseIndex) + 1;
416      goto exit;
417    } else if (mi->modifiesRegister(interval.reg, tri_)) {
418      // Another instruction redefines the register before it is ever read.
419      // Then the register is essentially dead at the instruction that defines
420      // it. Hence its interval is:
421      // [defSlot(def), defSlot(def)+1)
422      DOUT << " dead";
423      end = getDefIndex(start) + 1;
424      goto exit;
425    }
426  }
427
428  // The only case we should have a dead physreg here without a killing or
429  // instruction where we know it's dead is if it is live-in to the function
430  // and never used.
431  assert(!CopyMI && "physreg was not killed in defining block!");
432  end = getDefIndex(start) + 1;  // It's dead.
433
434exit:
435  assert(start < end && "did not find end of interval?");
436
437  // Already exists? Extend old live interval.
438  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439  VNInfo *ValNo = (OldLR != interval.end())
440    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441  LiveRange LR(start, end, ValNo);
442  interval.addRange(LR);
443  interval.addKill(LR.valno, end);
444  DOUT << " +" << LR << '\n';
445}
446
447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448                                      MachineBasicBlock::iterator MI,
449                                      unsigned MIIdx,
450                                      unsigned reg) {
451  if (TargetRegisterInfo::isVirtualRegister(reg))
452    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453  else if (allocatableRegs_[reg]) {
454    MachineInstr *CopyMI = NULL;
455    unsigned SrcReg, DstReg;
456    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457        tii_->isMoveInstr(*MI, SrcReg, DstReg))
458      CopyMI = MI;
459    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460    // Def of a register also defines its sub-registers.
461    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462      // If MI also modifies the sub-register explicitly, avoid processing it
463      // more than once. Do not pass in TRI here so it checks for exact match.
464      if (!MI->modifiesRegister(*AS))
465        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
466  }
467}
468
469void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
470                                         unsigned MIIdx,
471                                         LiveInterval &interval, bool isAlias) {
472  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
473
474  // Look for kills, if it reaches a def before it's killed, then it shouldn't
475  // be considered a livein.
476  MachineBasicBlock::iterator mi = MBB->begin();
477  unsigned baseIndex = MIIdx;
478  unsigned start = baseIndex;
479  unsigned end = start;
480  while (mi != MBB->end()) {
481    if (mi->killsRegister(interval.reg, tri_)) {
482      DOUT << " killed";
483      end = getUseIndex(baseIndex) + 1;
484      goto exit;
485    } else if (mi->modifiesRegister(interval.reg, tri_)) {
486      // Another instruction redefines the register before it is ever read.
487      // Then the register is essentially dead at the instruction that defines
488      // it. Hence its interval is:
489      // [defSlot(def), defSlot(def)+1)
490      DOUT << " dead";
491      end = getDefIndex(start) + 1;
492      goto exit;
493    }
494
495    baseIndex += InstrSlots::NUM;
496    ++mi;
497  }
498
499exit:
500  // Live-in register might not be used at all.
501  if (end == MIIdx) {
502    if (isAlias) {
503      DOUT << " dead";
504      end = getDefIndex(MIIdx) + 1;
505    } else {
506      DOUT << " live through";
507      end = baseIndex;
508    }
509  }
510
511  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
512  interval.addRange(LR);
513  interval.addKill(LR.valno, end);
514  DOUT << " +" << LR << '\n';
515}
516
517/// computeIntervals - computes the live intervals for virtual
518/// registers. for some ordering of the machine instructions [1,N] a
519/// live interval is an interval [i, j) where 1 <= i <= j < N for
520/// which a variable is live
521void LiveIntervals::computeIntervals() {
522  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
523       << "********** Function: "
524       << ((Value*)mf_->getFunction())->getName() << '\n';
525  // Track the index of the current machine instr.
526  unsigned MIIndex = 0;
527  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
528       MBBI != E; ++MBBI) {
529    MachineBasicBlock *MBB = MBBI;
530    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
531
532    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
533
534    // Create intervals for live-ins to this BB first.
535    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
536           LE = MBB->livein_end(); LI != LE; ++LI) {
537      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
538      // Multiple live-ins can alias the same register.
539      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
540        if (!hasInterval(*AS))
541          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
542                               true);
543    }
544
545    for (; MI != miEnd; ++MI) {
546      DOUT << MIIndex << "\t" << *MI;
547
548      // Handle defs.
549      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
550        MachineOperand &MO = MI->getOperand(i);
551        // handle register defs - build intervals
552        if (MO.isRegister() && MO.getReg() && MO.isDef())
553          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
554      }
555
556      MIIndex += InstrSlots::NUM;
557    }
558  }
559}
560
561bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
562                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
563  std::vector<IdxMBBPair>::const_iterator I =
564    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
565
566  bool ResVal = false;
567  while (I != Idx2MBBMap.end()) {
568    if (LR.end <= I->first)
569      break;
570    MBBs.push_back(I->second);
571    ResVal = true;
572    ++I;
573  }
574  return ResVal;
575}
576
577
578LiveInterval LiveIntervals::createInterval(unsigned reg) {
579  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
580                       HUGE_VALF : 0.0F;
581  return LiveInterval(reg, Weight);
582}
583
584/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
585/// copy field and returns the source register that defines it.
586unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
587  if (!VNI->copy)
588    return 0;
589
590  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
591    return VNI->copy->getOperand(1).getReg();
592  unsigned SrcReg, DstReg;
593  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
594    return SrcReg;
595  assert(0 && "Unrecognized copy instruction!");
596  return 0;
597}
598
599//===----------------------------------------------------------------------===//
600// Register allocator hooks.
601//
602
603/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
604/// allow one) virtual register operand, then its uses are implicitly using
605/// the register. Returns the virtual register.
606unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
607                                            MachineInstr *MI) const {
608  unsigned RegOp = 0;
609  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
610    MachineOperand &MO = MI->getOperand(i);
611    if (!MO.isRegister() || !MO.isUse())
612      continue;
613    unsigned Reg = MO.getReg();
614    if (Reg == 0 || Reg == li.reg)
615      continue;
616    // FIXME: For now, only remat MI with at most one register operand.
617    assert(!RegOp &&
618           "Can't rematerialize instruction with multiple register operand!");
619    RegOp = MO.getReg();
620    break;
621  }
622  return RegOp;
623}
624
625/// isValNoAvailableAt - Return true if the val# of the specified interval
626/// which reaches the given instruction also reaches the specified use index.
627bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
628                                       unsigned UseIdx) const {
629  unsigned Index = getInstructionIndex(MI);
630  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
631  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
632  return UI != li.end() && UI->valno == ValNo;
633}
634
635/// isReMaterializable - Returns true if the definition MI of the specified
636/// val# of the specified interval is re-materializable.
637bool LiveIntervals::isReMaterializable(const LiveInterval &li,
638                                       const VNInfo *ValNo, MachineInstr *MI,
639                                       bool &isLoad) {
640  if (DisableReMat)
641    return false;
642
643  isLoad = false;
644  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
645    return true;
646
647  int FrameIdx = 0;
648  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
649      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
650    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
651    // this but remember this is not safe to fold into a two-address
652    // instruction.
653    // This is a load from fixed stack slot. It can be rematerialized.
654    return true;
655
656  if (tii_->isTriviallyReMaterializable(MI)) {
657    const TargetInstrDesc &TID = MI->getDesc();
658    isLoad = TID.isSimpleLoad();
659
660    unsigned ImpUse = getReMatImplicitUse(li, MI);
661    if (ImpUse) {
662      const LiveInterval &ImpLi = getInterval(ImpUse);
663      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
664             re = mri_->use_end(); ri != re; ++ri) {
665        MachineInstr *UseMI = &*ri;
666        unsigned UseIdx = getInstructionIndex(UseMI);
667        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
668          continue;
669        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
670          return false;
671      }
672    }
673    return true;
674  }
675
676  return false;
677}
678
679/// isReMaterializable - Returns true if every definition of MI of every
680/// val# of the specified interval is re-materializable.
681bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
682  isLoad = false;
683  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
684       i != e; ++i) {
685    const VNInfo *VNI = *i;
686    unsigned DefIdx = VNI->def;
687    if (DefIdx == ~1U)
688      continue; // Dead val#.
689    // Is the def for the val# rematerializable?
690    if (DefIdx == ~0u)
691      return false;
692    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
693    bool DefIsLoad = false;
694    if (!ReMatDefMI ||
695        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
696      return false;
697    isLoad |= DefIsLoad;
698  }
699  return true;
700}
701
702/// FilterFoldedOps - Filter out two-address use operands. Return
703/// true if it finds any issue with the operands that ought to prevent
704/// folding.
705static bool FilterFoldedOps(MachineInstr *MI,
706                            SmallVector<unsigned, 2> &Ops,
707                            unsigned &MRInfo,
708                            SmallVector<unsigned, 2> &FoldOps) {
709  const TargetInstrDesc &TID = MI->getDesc();
710
711  MRInfo = 0;
712  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
713    unsigned OpIdx = Ops[i];
714    MachineOperand &MO = MI->getOperand(OpIdx);
715    // FIXME: fold subreg use.
716    if (MO.getSubReg())
717      return true;
718    if (MO.isDef())
719      MRInfo |= (unsigned)VirtRegMap::isMod;
720    else {
721      // Filter out two-address use operand(s).
722      if (!MO.isImplicit() &&
723          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
724        MRInfo = VirtRegMap::isModRef;
725        continue;
726      }
727      MRInfo |= (unsigned)VirtRegMap::isRef;
728    }
729    FoldOps.push_back(OpIdx);
730  }
731  return false;
732}
733
734
735/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
736/// slot / to reg or any rematerialized load into ith operand of specified
737/// MI. If it is successul, MI is updated with the newly created MI and
738/// returns true.
739bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
740                                         VirtRegMap &vrm, MachineInstr *DefMI,
741                                         unsigned InstrIdx,
742                                         SmallVector<unsigned, 2> &Ops,
743                                         bool isSS, int Slot, unsigned Reg) {
744  // If it is an implicit def instruction, just delete it.
745  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
746    RemoveMachineInstrFromMaps(MI);
747    vrm.RemoveMachineInstrFromMaps(MI);
748    MI->eraseFromParent();
749    ++numFolds;
750    return true;
751  }
752
753  // Filter the list of operand indexes that are to be folded. Abort if
754  // any operand will prevent folding.
755  unsigned MRInfo = 0;
756  SmallVector<unsigned, 2> FoldOps;
757  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
758    return false;
759
760  // The only time it's safe to fold into a two address instruction is when
761  // it's folding reload and spill from / into a spill stack slot.
762  if (DefMI && (MRInfo & VirtRegMap::isMod))
763    return false;
764
765  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
766                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
767  if (fmi) {
768    // Remember this instruction uses the spill slot.
769    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
770
771    // Attempt to fold the memory reference into the instruction. If
772    // we can do this, we don't need to insert spill code.
773    if (lv_)
774      lv_->instructionChanged(MI, fmi);
775    else
776      fmi->copyKillDeadInfo(MI, tri_);
777    MachineBasicBlock &MBB = *MI->getParent();
778    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
779      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
780    vrm.transferSpillPts(MI, fmi);
781    vrm.transferRestorePts(MI, fmi);
782    vrm.transferEmergencySpills(MI, fmi);
783    mi2iMap_.erase(MI);
784    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
785    mi2iMap_[fmi] = InstrIdx;
786    MI = MBB.insert(MBB.erase(MI), fmi);
787    ++numFolds;
788    return true;
789  }
790  return false;
791}
792
793/// canFoldMemoryOperand - Returns true if the specified load / store
794/// folding is possible.
795bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
796                                         SmallVector<unsigned, 2> &Ops,
797                                         bool ReMat) const {
798  // Filter the list of operand indexes that are to be folded. Abort if
799  // any operand will prevent folding.
800  unsigned MRInfo = 0;
801  SmallVector<unsigned, 2> FoldOps;
802  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
803    return false;
804
805  // It's only legal to remat for a use, not a def.
806  if (ReMat && (MRInfo & VirtRegMap::isMod))
807    return false;
808
809  return tii_->canFoldMemoryOperand(MI, FoldOps);
810}
811
812bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
813  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
814  for (LiveInterval::Ranges::const_iterator
815         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
816    std::vector<IdxMBBPair>::const_iterator II =
817      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
818    if (II == Idx2MBBMap.end())
819      continue;
820    if (I->end > II->first)  // crossing a MBB.
821      return false;
822    MBBs.insert(II->second);
823    if (MBBs.size() > 1)
824      return false;
825  }
826  return true;
827}
828
829/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
830/// interval on to-be re-materialized operands of MI) with new register.
831void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
832                                       MachineInstr *MI, unsigned NewVReg,
833                                       VirtRegMap &vrm) {
834  // There is an implicit use. That means one of the other operand is
835  // being remat'ed and the remat'ed instruction has li.reg as an
836  // use operand. Make sure we rewrite that as well.
837  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
838    MachineOperand &MO = MI->getOperand(i);
839    if (!MO.isRegister())
840      continue;
841    unsigned Reg = MO.getReg();
842    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
843      continue;
844    if (!vrm.isReMaterialized(Reg))
845      continue;
846    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
847    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
848    if (UseMO)
849      UseMO->setReg(NewVReg);
850  }
851}
852
853/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
854/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
855bool LiveIntervals::
856rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
857                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
858                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
859                 unsigned Slot, int LdSlot,
860                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
861                 VirtRegMap &vrm,
862                 const TargetRegisterClass* rc,
863                 SmallVector<int, 4> &ReMatIds,
864                 const MachineLoopInfo *loopInfo,
865                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
866                 std::map<unsigned,unsigned> &MBBVRegsMap,
867                 std::vector<LiveInterval*> &NewLIs) {
868  bool CanFold = false;
869 RestartInstruction:
870  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
871    MachineOperand& mop = MI->getOperand(i);
872    if (!mop.isRegister())
873      continue;
874    unsigned Reg = mop.getReg();
875    unsigned RegI = Reg;
876    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
877      continue;
878    if (Reg != li.reg)
879      continue;
880
881    bool TryFold = !DefIsReMat;
882    bool FoldSS = true; // Default behavior unless it's a remat.
883    int FoldSlot = Slot;
884    if (DefIsReMat) {
885      // If this is the rematerializable definition MI itself and
886      // all of its uses are rematerialized, simply delete it.
887      if (MI == ReMatOrigDefMI && CanDelete) {
888        DOUT << "\t\t\t\tErasing re-materlizable def: ";
889        DOUT << MI << '\n';
890        RemoveMachineInstrFromMaps(MI);
891        vrm.RemoveMachineInstrFromMaps(MI);
892        MI->eraseFromParent();
893        break;
894      }
895
896      // If def for this use can't be rematerialized, then try folding.
897      // If def is rematerializable and it's a load, also try folding.
898      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
899      if (isLoad) {
900        // Try fold loads (from stack slot, constant pool, etc.) into uses.
901        FoldSS = isLoadSS;
902        FoldSlot = LdSlot;
903      }
904    }
905
906    // Scan all of the operands of this instruction rewriting operands
907    // to use NewVReg instead of li.reg as appropriate.  We do this for
908    // two reasons:
909    //
910    //   1. If the instr reads the same spilled vreg multiple times, we
911    //      want to reuse the NewVReg.
912    //   2. If the instr is a two-addr instruction, we are required to
913    //      keep the src/dst regs pinned.
914    //
915    // Keep track of whether we replace a use and/or def so that we can
916    // create the spill interval with the appropriate range.
917
918    HasUse = mop.isUse();
919    HasDef = mop.isDef();
920    SmallVector<unsigned, 2> Ops;
921    Ops.push_back(i);
922    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
923      const MachineOperand &MOj = MI->getOperand(j);
924      if (!MOj.isRegister())
925        continue;
926      unsigned RegJ = MOj.getReg();
927      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
928        continue;
929      if (RegJ == RegI) {
930        Ops.push_back(j);
931        HasUse |= MOj.isUse();
932        HasDef |= MOj.isDef();
933      }
934    }
935
936    if (TryFold) {
937      // Do not fold load / store here if we are splitting. We'll find an
938      // optimal point to insert a load / store later.
939      if (!TrySplit) {
940        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
941                                 Ops, FoldSS, FoldSlot, Reg)) {
942          // Folding the load/store can completely change the instruction in
943          // unpredictable ways, rescan it from the beginning.
944          HasUse = false;
945          HasDef = false;
946          CanFold = false;
947          goto RestartInstruction;
948        }
949      } else {
950        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
951      }
952    } else
953      CanFold = false;
954
955    // Create a new virtual register for the spill interval.
956    bool CreatedNewVReg = false;
957    if (NewVReg == 0) {
958      NewVReg = mri_->createVirtualRegister(rc);
959      vrm.grow();
960      CreatedNewVReg = true;
961    }
962    mop.setReg(NewVReg);
963    if (mop.isImplicit())
964      rewriteImplicitOps(li, MI, NewVReg, vrm);
965
966    // Reuse NewVReg for other reads.
967    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
968      MachineOperand &mopj = MI->getOperand(Ops[j]);
969      mopj.setReg(NewVReg);
970      if (mopj.isImplicit())
971        rewriteImplicitOps(li, MI, NewVReg, vrm);
972    }
973
974    if (CreatedNewVReg) {
975      if (DefIsReMat) {
976        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
977        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
978          // Each valnum may have its own remat id.
979          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
980        } else {
981          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
982        }
983        if (!CanDelete || (HasUse && HasDef)) {
984          // If this is a two-addr instruction then its use operands are
985          // rematerializable but its def is not. It should be assigned a
986          // stack slot.
987          vrm.assignVirt2StackSlot(NewVReg, Slot);
988        }
989      } else {
990        vrm.assignVirt2StackSlot(NewVReg, Slot);
991      }
992    } else if (HasUse && HasDef &&
993               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
994      // If this interval hasn't been assigned a stack slot (because earlier
995      // def is a deleted remat def), do it now.
996      assert(Slot != VirtRegMap::NO_STACK_SLOT);
997      vrm.assignVirt2StackSlot(NewVReg, Slot);
998    }
999
1000    // Re-matting an instruction with virtual register use. Add the
1001    // register as an implicit use on the use MI.
1002    if (DefIsReMat && ImpUse)
1003      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1004
1005    // create a new register interval for this spill / remat.
1006    LiveInterval &nI = getOrCreateInterval(NewVReg);
1007    if (CreatedNewVReg) {
1008      NewLIs.push_back(&nI);
1009      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1010      if (TrySplit)
1011        vrm.setIsSplitFromReg(NewVReg, li.reg);
1012    }
1013
1014    if (HasUse) {
1015      if (CreatedNewVReg) {
1016        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1017                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1018        DOUT << " +" << LR;
1019        nI.addRange(LR);
1020      } else {
1021        // Extend the split live interval to this def / use.
1022        unsigned End = getUseIndex(index)+1;
1023        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1024                     nI.getValNumInfo(nI.getNumValNums()-1));
1025        DOUT << " +" << LR;
1026        nI.addRange(LR);
1027      }
1028    }
1029    if (HasDef) {
1030      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1031                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1032      DOUT << " +" << LR;
1033      nI.addRange(LR);
1034    }
1035
1036    DOUT << "\t\t\t\tAdded new interval: ";
1037    nI.print(DOUT, tri_);
1038    DOUT << '\n';
1039  }
1040  return CanFold;
1041}
1042bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1043                                   const VNInfo *VNI,
1044                                   MachineBasicBlock *MBB, unsigned Idx) const {
1045  unsigned End = getMBBEndIdx(MBB);
1046  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1047    unsigned KillIdx = VNI->kills[j];
1048    if (KillIdx > Idx && KillIdx < End)
1049      return true;
1050  }
1051  return false;
1052}
1053
1054static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1055  const VNInfo *VNI = NULL;
1056  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1057         e = li.vni_end(); i != e; ++i)
1058    if ((*i)->def == DefIdx) {
1059      VNI = *i;
1060      break;
1061    }
1062  return VNI;
1063}
1064
1065/// RewriteInfo - Keep track of machine instrs that will be rewritten
1066/// during spilling.
1067struct RewriteInfo {
1068  unsigned Index;
1069  MachineInstr *MI;
1070  bool HasUse;
1071  bool HasDef;
1072  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1073    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1074};
1075
1076struct RewriteInfoCompare {
1077  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1078    return LHS.Index < RHS.Index;
1079  }
1080};
1081
1082void LiveIntervals::
1083rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1084                    LiveInterval::Ranges::const_iterator &I,
1085                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1086                    unsigned Slot, int LdSlot,
1087                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1088                    VirtRegMap &vrm,
1089                    const TargetRegisterClass* rc,
1090                    SmallVector<int, 4> &ReMatIds,
1091                    const MachineLoopInfo *loopInfo,
1092                    BitVector &SpillMBBs,
1093                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1094                    BitVector &RestoreMBBs,
1095                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1096                    std::map<unsigned,unsigned> &MBBVRegsMap,
1097                    std::vector<LiveInterval*> &NewLIs) {
1098  bool AllCanFold = true;
1099  unsigned NewVReg = 0;
1100  unsigned start = getBaseIndex(I->start);
1101  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1102
1103  // First collect all the def / use in this live range that will be rewritten.
1104  // Make sure they are sorted according instruction index.
1105  std::vector<RewriteInfo> RewriteMIs;
1106  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1107         re = mri_->reg_end(); ri != re; ) {
1108    MachineInstr *MI = &(*ri);
1109    MachineOperand &O = ri.getOperand();
1110    ++ri;
1111    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1112    unsigned index = getInstructionIndex(MI);
1113    if (index < start || index >= end)
1114      continue;
1115    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1116  }
1117  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1118
1119  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1120  // Now rewrite the defs and uses.
1121  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1122    RewriteInfo &rwi = RewriteMIs[i];
1123    ++i;
1124    unsigned index = rwi.Index;
1125    bool MIHasUse = rwi.HasUse;
1126    bool MIHasDef = rwi.HasDef;
1127    MachineInstr *MI = rwi.MI;
1128    // If MI def and/or use the same register multiple times, then there
1129    // are multiple entries.
1130    unsigned NumUses = MIHasUse;
1131    while (i != e && RewriteMIs[i].MI == MI) {
1132      assert(RewriteMIs[i].Index == index);
1133      bool isUse = RewriteMIs[i].HasUse;
1134      if (isUse) ++NumUses;
1135      MIHasUse |= isUse;
1136      MIHasDef |= RewriteMIs[i].HasDef;
1137      ++i;
1138    }
1139    MachineBasicBlock *MBB = MI->getParent();
1140
1141    if (ImpUse && MI != ReMatDefMI) {
1142      // Re-matting an instruction with virtual register use. Update the
1143      // register interval's spill weight to HUGE_VALF to prevent it from
1144      // being spilled.
1145      LiveInterval &ImpLi = getInterval(ImpUse);
1146      ImpLi.weight = HUGE_VALF;
1147    }
1148
1149    unsigned MBBId = MBB->getNumber();
1150    unsigned ThisVReg = 0;
1151    if (TrySplit) {
1152      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1153      if (NVI != MBBVRegsMap.end()) {
1154        ThisVReg = NVI->second;
1155        // One common case:
1156        // x = use
1157        // ...
1158        // ...
1159        // def = ...
1160        //     = use
1161        // It's better to start a new interval to avoid artifically
1162        // extend the new interval.
1163        if (MIHasDef && !MIHasUse) {
1164          MBBVRegsMap.erase(MBB->getNumber());
1165          ThisVReg = 0;
1166        }
1167      }
1168    }
1169
1170    bool IsNew = ThisVReg == 0;
1171    if (IsNew) {
1172      // This ends the previous live interval. If all of its def / use
1173      // can be folded, give it a low spill weight.
1174      if (NewVReg && TrySplit && AllCanFold) {
1175        LiveInterval &nI = getOrCreateInterval(NewVReg);
1176        nI.weight /= 10.0F;
1177      }
1178      AllCanFold = true;
1179    }
1180    NewVReg = ThisVReg;
1181
1182    bool HasDef = false;
1183    bool HasUse = false;
1184    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1185                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1186                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1187                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1188                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1189    if (!HasDef && !HasUse)
1190      continue;
1191
1192    AllCanFold &= CanFold;
1193
1194    // Update weight of spill interval.
1195    LiveInterval &nI = getOrCreateInterval(NewVReg);
1196    if (!TrySplit) {
1197      // The spill weight is now infinity as it cannot be spilled again.
1198      nI.weight = HUGE_VALF;
1199      continue;
1200    }
1201
1202    // Keep track of the last def and first use in each MBB.
1203    if (HasDef) {
1204      if (MI != ReMatOrigDefMI || !CanDelete) {
1205        bool HasKill = false;
1206        if (!HasUse)
1207          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1208        else {
1209          // If this is a two-address code, then this index starts a new VNInfo.
1210          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1211          if (VNI)
1212            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1213        }
1214        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1215          SpillIdxes.find(MBBId);
1216        if (!HasKill) {
1217          if (SII == SpillIdxes.end()) {
1218            std::vector<SRInfo> S;
1219            S.push_back(SRInfo(index, NewVReg, true));
1220            SpillIdxes.insert(std::make_pair(MBBId, S));
1221          } else if (SII->second.back().vreg != NewVReg) {
1222            SII->second.push_back(SRInfo(index, NewVReg, true));
1223          } else if ((int)index > SII->second.back().index) {
1224            // If there is an earlier def and this is a two-address
1225            // instruction, then it's not possible to fold the store (which
1226            // would also fold the load).
1227            SRInfo &Info = SII->second.back();
1228            Info.index = index;
1229            Info.canFold = !HasUse;
1230          }
1231          SpillMBBs.set(MBBId);
1232        } else if (SII != SpillIdxes.end() &&
1233                   SII->second.back().vreg == NewVReg &&
1234                   (int)index > SII->second.back().index) {
1235          // There is an earlier def that's not killed (must be two-address).
1236          // The spill is no longer needed.
1237          SII->second.pop_back();
1238          if (SII->second.empty()) {
1239            SpillIdxes.erase(MBBId);
1240            SpillMBBs.reset(MBBId);
1241          }
1242        }
1243      }
1244    }
1245
1246    if (HasUse) {
1247      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1248        SpillIdxes.find(MBBId);
1249      if (SII != SpillIdxes.end() &&
1250          SII->second.back().vreg == NewVReg &&
1251          (int)index > SII->second.back().index)
1252        // Use(s) following the last def, it's not safe to fold the spill.
1253        SII->second.back().canFold = false;
1254      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1255        RestoreIdxes.find(MBBId);
1256      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1257        // If we are splitting live intervals, only fold if it's the first
1258        // use and there isn't another use later in the MBB.
1259        RII->second.back().canFold = false;
1260      else if (IsNew) {
1261        // Only need a reload if there isn't an earlier def / use.
1262        if (RII == RestoreIdxes.end()) {
1263          std::vector<SRInfo> Infos;
1264          Infos.push_back(SRInfo(index, NewVReg, true));
1265          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1266        } else {
1267          RII->second.push_back(SRInfo(index, NewVReg, true));
1268        }
1269        RestoreMBBs.set(MBBId);
1270      }
1271    }
1272
1273    // Update spill weight.
1274    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1275    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1276  }
1277
1278  if (NewVReg && TrySplit && AllCanFold) {
1279    // If all of its def / use can be folded, give it a low spill weight.
1280    LiveInterval &nI = getOrCreateInterval(NewVReg);
1281    nI.weight /= 10.0F;
1282  }
1283}
1284
1285bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1286                        BitVector &RestoreMBBs,
1287                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1288  if (!RestoreMBBs[Id])
1289    return false;
1290  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1291  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1292    if (Restores[i].index == index &&
1293        Restores[i].vreg == vr &&
1294        Restores[i].canFold)
1295      return true;
1296  return false;
1297}
1298
1299void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1300                        BitVector &RestoreMBBs,
1301                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1302  if (!RestoreMBBs[Id])
1303    return;
1304  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1305  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1306    if (Restores[i].index == index && Restores[i].vreg)
1307      Restores[i].index = -1;
1308}
1309
1310
1311std::vector<LiveInterval*> LiveIntervals::
1312addIntervalsForSpills(const LiveInterval &li,
1313                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1314  // Since this is called after the analysis is done we don't know if
1315  // LiveVariables is available
1316  lv_ = getAnalysisToUpdate<LiveVariables>();
1317
1318  assert(li.weight != HUGE_VALF &&
1319         "attempt to spill already spilled interval!");
1320
1321  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1322  li.print(DOUT, tri_);
1323  DOUT << '\n';
1324
1325  // Each bit specify whether it a spill is required in the MBB.
1326  BitVector SpillMBBs(mf_->getNumBlockIDs());
1327  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1328  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1329  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1330  std::map<unsigned,unsigned> MBBVRegsMap;
1331  std::vector<LiveInterval*> NewLIs;
1332  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1333
1334  unsigned NumValNums = li.getNumValNums();
1335  SmallVector<MachineInstr*, 4> ReMatDefs;
1336  ReMatDefs.resize(NumValNums, NULL);
1337  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1338  ReMatOrigDefs.resize(NumValNums, NULL);
1339  SmallVector<int, 4> ReMatIds;
1340  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1341  BitVector ReMatDelete(NumValNums);
1342  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1343
1344  // Spilling a split live interval. It cannot be split any further. Also,
1345  // it's also guaranteed to be a single val# / range interval.
1346  if (vrm.getPreSplitReg(li.reg)) {
1347    vrm.setIsSplitFromReg(li.reg, 0);
1348    // Unset the split kill marker on the last use.
1349    unsigned KillIdx = vrm.getKillPoint(li.reg);
1350    if (KillIdx) {
1351      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1352      assert(KillMI && "Last use disappeared?");
1353      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1354      assert(KillOp != -1 && "Last use disappeared?");
1355      KillMI->getOperand(KillOp).setIsKill(false);
1356    }
1357    vrm.removeKillPoint(li.reg);
1358    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1359    Slot = vrm.getStackSlot(li.reg);
1360    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1361    MachineInstr *ReMatDefMI = DefIsReMat ?
1362      vrm.getReMaterializedMI(li.reg) : NULL;
1363    int LdSlot = 0;
1364    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1365    bool isLoad = isLoadSS ||
1366      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1367    bool IsFirstRange = true;
1368    for (LiveInterval::Ranges::const_iterator
1369           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1370      // If this is a split live interval with multiple ranges, it means there
1371      // are two-address instructions that re-defined the value. Only the
1372      // first def can be rematerialized!
1373      if (IsFirstRange) {
1374        // Note ReMatOrigDefMI has already been deleted.
1375        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1376                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1377                             false, vrm, rc, ReMatIds, loopInfo,
1378                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1379                             MBBVRegsMap, NewLIs);
1380      } else {
1381        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1382                             Slot, 0, false, false, false,
1383                             false, vrm, rc, ReMatIds, loopInfo,
1384                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1385                             MBBVRegsMap, NewLIs);
1386      }
1387      IsFirstRange = false;
1388    }
1389    return NewLIs;
1390  }
1391
1392  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1393  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1394    TrySplit = false;
1395  if (TrySplit)
1396    ++numSplits;
1397  bool NeedStackSlot = false;
1398  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1399       i != e; ++i) {
1400    const VNInfo *VNI = *i;
1401    unsigned VN = VNI->id;
1402    unsigned DefIdx = VNI->def;
1403    if (DefIdx == ~1U)
1404      continue; // Dead val#.
1405    // Is the def for the val# rematerializable?
1406    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1407      ? 0 : getInstructionFromIndex(DefIdx);
1408    bool dummy;
1409    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1410      // Remember how to remat the def of this val#.
1411      ReMatOrigDefs[VN] = ReMatDefMI;
1412      // Original def may be modified so we have to make a copy here. vrm must
1413      // delete these!
1414      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1415
1416      bool CanDelete = true;
1417      if (VNI->hasPHIKill) {
1418        // A kill is a phi node, not all of its uses can be rematerialized.
1419        // It must not be deleted.
1420        CanDelete = false;
1421        // Need a stack slot if there is any live range where uses cannot be
1422        // rematerialized.
1423        NeedStackSlot = true;
1424      }
1425      if (CanDelete)
1426        ReMatDelete.set(VN);
1427    } else {
1428      // Need a stack slot if there is any live range where uses cannot be
1429      // rematerialized.
1430      NeedStackSlot = true;
1431    }
1432  }
1433
1434  // One stack slot per live interval.
1435  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1436    Slot = vrm.assignVirt2StackSlot(li.reg);
1437
1438  // Create new intervals and rewrite defs and uses.
1439  for (LiveInterval::Ranges::const_iterator
1440         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1441    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1442    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1443    bool DefIsReMat = ReMatDefMI != NULL;
1444    bool CanDelete = ReMatDelete[I->valno->id];
1445    int LdSlot = 0;
1446    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1447    bool isLoad = isLoadSS ||
1448      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1449    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1450                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1451                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1452                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1453                               MBBVRegsMap, NewLIs);
1454  }
1455
1456  // Insert spills / restores if we are splitting.
1457  if (!TrySplit)
1458    return NewLIs;
1459
1460  SmallPtrSet<LiveInterval*, 4> AddedKill;
1461  SmallVector<unsigned, 2> Ops;
1462  if (NeedStackSlot) {
1463    int Id = SpillMBBs.find_first();
1464    while (Id != -1) {
1465      std::vector<SRInfo> &spills = SpillIdxes[Id];
1466      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1467        int index = spills[i].index;
1468        unsigned VReg = spills[i].vreg;
1469        LiveInterval &nI = getOrCreateInterval(VReg);
1470        bool isReMat = vrm.isReMaterialized(VReg);
1471        MachineInstr *MI = getInstructionFromIndex(index);
1472        bool CanFold = false;
1473        bool FoundUse = false;
1474        Ops.clear();
1475        if (spills[i].canFold) {
1476          CanFold = true;
1477          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1478            MachineOperand &MO = MI->getOperand(j);
1479            if (!MO.isRegister() || MO.getReg() != VReg)
1480              continue;
1481
1482            Ops.push_back(j);
1483            if (MO.isDef())
1484              continue;
1485            if (isReMat ||
1486                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1487                                                RestoreMBBs, RestoreIdxes))) {
1488              // MI has two-address uses of the same register. If the use
1489              // isn't the first and only use in the BB, then we can't fold
1490              // it. FIXME: Move this to rewriteInstructionsForSpills.
1491              CanFold = false;
1492              break;
1493            }
1494            FoundUse = true;
1495          }
1496        }
1497        // Fold the store into the def if possible.
1498        bool Folded = false;
1499        if (CanFold && !Ops.empty()) {
1500          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1501            Folded = true;
1502            if (FoundUse > 0) {
1503              // Also folded uses, do not issue a load.
1504              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1505              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1506            }
1507            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1508          }
1509        }
1510
1511        // Else tell the spiller to issue a spill.
1512        if (!Folded) {
1513          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1514          bool isKill = LR->end == getStoreIndex(index);
1515          vrm.addSpillPoint(VReg, isKill, MI);
1516          if (isKill)
1517            AddedKill.insert(&nI);
1518        }
1519      }
1520      Id = SpillMBBs.find_next(Id);
1521    }
1522  }
1523
1524  int Id = RestoreMBBs.find_first();
1525  while (Id != -1) {
1526    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1527    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1528      int index = restores[i].index;
1529      if (index == -1)
1530        continue;
1531      unsigned VReg = restores[i].vreg;
1532      LiveInterval &nI = getOrCreateInterval(VReg);
1533      MachineInstr *MI = getInstructionFromIndex(index);
1534      bool CanFold = false;
1535      Ops.clear();
1536      if (restores[i].canFold) {
1537        CanFold = true;
1538        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1539          MachineOperand &MO = MI->getOperand(j);
1540          if (!MO.isRegister() || MO.getReg() != VReg)
1541            continue;
1542
1543          if (MO.isDef()) {
1544            // If this restore were to be folded, it would have been folded
1545            // already.
1546            CanFold = false;
1547            break;
1548          }
1549          Ops.push_back(j);
1550        }
1551      }
1552
1553      // Fold the load into the use if possible.
1554      bool Folded = false;
1555      if (CanFold && !Ops.empty()) {
1556        if (!vrm.isReMaterialized(VReg))
1557          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1558        else {
1559          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1560          int LdSlot = 0;
1561          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1562          // If the rematerializable def is a load, also try to fold it.
1563          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1564            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1565                                          Ops, isLoadSS, LdSlot, VReg);
1566          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1567          if (ImpUse) {
1568            // Re-matting an instruction with virtual register use. Add the
1569            // register as an implicit use on the use MI and update the register
1570            // interval's spill weight to HUGE_VALF to prevent it from being
1571            // spilled.
1572            LiveInterval &ImpLi = getInterval(ImpUse);
1573            ImpLi.weight = HUGE_VALF;
1574            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1575          }
1576        }
1577      }
1578      // If folding is not possible / failed, then tell the spiller to issue a
1579      // load / rematerialization for us.
1580      if (Folded)
1581        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1582      else
1583        vrm.addRestorePoint(VReg, MI);
1584    }
1585    Id = RestoreMBBs.find_next(Id);
1586  }
1587
1588  // Finalize intervals: add kills, finalize spill weights, and filter out
1589  // dead intervals.
1590  std::vector<LiveInterval*> RetNewLIs;
1591  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1592    LiveInterval *LI = NewLIs[i];
1593    if (!LI->empty()) {
1594      LI->weight /= LI->getSize();
1595      if (!AddedKill.count(LI)) {
1596        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1597        unsigned LastUseIdx = getBaseIndex(LR->end);
1598        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1599        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1600        assert(UseIdx != -1);
1601        if (LastUse->getOperand(UseIdx).isImplicit() ||
1602            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1603          LastUse->getOperand(UseIdx).setIsKill();
1604          vrm.addKillPoint(LI->reg, LastUseIdx);
1605        }
1606      }
1607      RetNewLIs.push_back(LI);
1608    }
1609  }
1610
1611  return RetNewLIs;
1612}
1613
1614/// hasAllocatableSuperReg - Return true if the specified physical register has
1615/// any super register that's allocatable.
1616bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1617  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1618    if (allocatableRegs_[*AS] && hasInterval(*AS))
1619      return true;
1620  return false;
1621}
1622
1623/// getRepresentativeReg - Find the largest super register of the specified
1624/// physical register.
1625unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1626  // Find the largest super-register that is allocatable.
1627  unsigned BestReg = Reg;
1628  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1629    unsigned SuperReg = *AS;
1630    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1631      BestReg = SuperReg;
1632      break;
1633    }
1634  }
1635  return BestReg;
1636}
1637
1638/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1639/// specified interval that conflicts with the specified physical register.
1640unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1641                                                   unsigned PhysReg) const {
1642  unsigned NumConflicts = 0;
1643  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1644  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1645         E = mri_->reg_end(); I != E; ++I) {
1646    MachineOperand &O = I.getOperand();
1647    MachineInstr *MI = O.getParent();
1648    unsigned Index = getInstructionIndex(MI);
1649    if (pli.liveAt(Index))
1650      ++NumConflicts;
1651  }
1652  return NumConflicts;
1653}
1654
1655/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1656/// around all defs and uses of the specified interval.
1657void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1658                                            unsigned PhysReg, VirtRegMap &vrm) {
1659  unsigned SpillReg = getRepresentativeReg(PhysReg);
1660
1661  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1662    // If there are registers which alias PhysReg, but which are not a
1663    // sub-register of the chosen representative super register. Assert
1664    // since we can't handle it yet.
1665    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1666           tri_->isSuperRegister(*AS, SpillReg));
1667
1668  LiveInterval &pli = getInterval(SpillReg);
1669  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1670  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1671         E = mri_->reg_end(); I != E; ++I) {
1672    MachineOperand &O = I.getOperand();
1673    MachineInstr *MI = O.getParent();
1674    if (SeenMIs.count(MI))
1675      continue;
1676    SeenMIs.insert(MI);
1677    unsigned Index = getInstructionIndex(MI);
1678    if (pli.liveAt(Index)) {
1679      vrm.addEmergencySpill(SpillReg, MI);
1680      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1681      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1682        if (!hasInterval(*AS))
1683          continue;
1684        LiveInterval &spli = getInterval(*AS);
1685        if (spli.liveAt(Index))
1686          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1687      }
1688    }
1689  }
1690}
1691