LiveIntervalAnalysis.cpp revision 909ab8b41a1f6072d24af951b70511d8a586d8ac
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
205    DOUT << "is a implicit_def\n";
206    return;
207  }
208
209  // Virtual registers may be defined multiple times (due to phi
210  // elimination and 2-addr elimination).  Much of what we do only has to be
211  // done once for the vreg.  We use an empty interval to detect the first
212  // time we see a vreg.
213  if (interval.empty()) {
214    // Get the Idx of the defining instructions.
215    unsigned defIndex = getDefIndex(MIIdx);
216    VNInfo *ValNo;
217    MachineInstr *CopyMI = NULL;
218    unsigned SrcReg, DstReg;
219    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
220        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
221        tii_->isMoveInstr(*mi, SrcReg, DstReg))
222      CopyMI = mi;
223    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
224
225    assert(ValNo->id == 0 && "First value in interval is not 0?");
226
227    // Loop over all of the blocks that the vreg is defined in.  There are
228    // two cases we have to handle here.  The most common case is a vreg
229    // whose lifetime is contained within a basic block.  In this case there
230    // will be a single kill, in MBB, which comes after the definition.
231    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
232      // FIXME: what about dead vars?
233      unsigned killIdx;
234      if (vi.Kills[0] != mi)
235        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
236      else
237        killIdx = defIndex+1;
238
239      // If the kill happens after the definition, we have an intra-block
240      // live range.
241      if (killIdx > defIndex) {
242        assert(vi.AliveBlocks.none() &&
243               "Shouldn't be alive across any blocks!");
244        LiveRange LR(defIndex, killIdx, ValNo);
245        interval.addRange(LR);
246        DOUT << " +" << LR << "\n";
247        interval.addKill(ValNo, killIdx);
248        return;
249      }
250    }
251
252    // The other case we handle is when a virtual register lives to the end
253    // of the defining block, potentially live across some blocks, then is
254    // live into some number of blocks, but gets killed.  Start by adding a
255    // range that goes from this definition to the end of the defining block.
256    LiveRange NewLR(defIndex,
257                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
258                    ValNo);
259    DOUT << " +" << NewLR;
260    interval.addRange(NewLR);
261
262    // Iterate over all of the blocks that the variable is completely
263    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
264    // live interval.
265    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
266      if (vi.AliveBlocks[i]) {
267        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
268        if (!MBB->empty()) {
269          LiveRange LR(getMBBStartIdx(i),
270                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
271                       ValNo);
272          interval.addRange(LR);
273          DOUT << " +" << LR;
274        }
275      }
276    }
277
278    // Finally, this virtual register is live from the start of any killing
279    // block to the 'use' slot of the killing instruction.
280    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
281      MachineInstr *Kill = vi.Kills[i];
282      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
283      LiveRange LR(getMBBStartIdx(Kill->getParent()),
284                   killIdx, ValNo);
285      interval.addRange(LR);
286      interval.addKill(ValNo, killIdx);
287      DOUT << " +" << LR;
288    }
289
290  } else {
291    // If this is the second time we see a virtual register definition, it
292    // must be due to phi elimination or two addr elimination.  If this is
293    // the result of two address elimination, then the vreg is one of the
294    // def-and-use register operand.
295    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
296      // If this is a two-address definition, then we have already processed
297      // the live range.  The only problem is that we didn't realize there
298      // are actually two values in the live interval.  Because of this we
299      // need to take the LiveRegion that defines this register and split it
300      // into two values.
301      assert(interval.containsOneValue());
302      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
303      unsigned RedefIndex = getDefIndex(MIIdx);
304
305      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
306      VNInfo *OldValNo = OldLR->valno;
307
308      // Delete the initial value, which should be short and continuous,
309      // because the 2-addr copy must be in the same MBB as the redef.
310      interval.removeRange(DefIndex, RedefIndex);
311
312      // Two-address vregs should always only be redefined once.  This means
313      // that at this point, there should be exactly one value number in it.
314      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
315
316      // The new value number (#1) is defined by the instruction we claimed
317      // defined value #0.
318      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
319                                            VNInfoAllocator);
320
321      // Value#0 is now defined by the 2-addr instruction.
322      OldValNo->def  = RedefIndex;
323      OldValNo->copy = 0;
324
325      // Add the new live interval which replaces the range for the input copy.
326      LiveRange LR(DefIndex, RedefIndex, ValNo);
327      DOUT << " replace range with " << LR;
328      interval.addRange(LR);
329      interval.addKill(ValNo, RedefIndex);
330
331      // If this redefinition is dead, we need to add a dummy unit live
332      // range covering the def slot.
333      if (mi->registerDefIsDead(interval.reg, tri_))
334        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
335
336      DOUT << " RESULT: ";
337      interval.print(DOUT, tri_);
338
339    } else {
340      // Otherwise, this must be because of phi elimination.  If this is the
341      // first redefinition of the vreg that we have seen, go back and change
342      // the live range in the PHI block to be a different value number.
343      if (interval.containsOneValue()) {
344        assert(vi.Kills.size() == 1 &&
345               "PHI elimination vreg should have one kill, the PHI itself!");
346
347        // Remove the old range that we now know has an incorrect number.
348        VNInfo *VNI = interval.getValNumInfo(0);
349        MachineInstr *Killer = vi.Kills[0];
350        unsigned Start = getMBBStartIdx(Killer->getParent());
351        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
352        DOUT << " Removing [" << Start << "," << End << "] from: ";
353        interval.print(DOUT, tri_); DOUT << "\n";
354        interval.removeRange(Start, End);
355        VNI->hasPHIKill = true;
356        DOUT << " RESULT: "; interval.print(DOUT, tri_);
357
358        // Replace the interval with one of a NEW value number.  Note that this
359        // value number isn't actually defined by an instruction, weird huh? :)
360        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
361        DOUT << " replace range with " << LR;
362        interval.addRange(LR);
363        interval.addKill(LR.valno, End);
364        DOUT << " RESULT: "; interval.print(DOUT, tri_);
365      }
366
367      // In the case of PHI elimination, each variable definition is only
368      // live until the end of the block.  We've already taken care of the
369      // rest of the live range.
370      unsigned defIndex = getDefIndex(MIIdx);
371
372      VNInfo *ValNo;
373      MachineInstr *CopyMI = NULL;
374      unsigned SrcReg, DstReg;
375      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
376          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
377          tii_->isMoveInstr(*mi, SrcReg, DstReg))
378        CopyMI = mi;
379      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
380
381      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
382      LiveRange LR(defIndex, killIndex, ValNo);
383      interval.addRange(LR);
384      interval.addKill(ValNo, killIndex);
385      ValNo->hasPHIKill = true;
386      DOUT << " +" << LR;
387    }
388  }
389
390  DOUT << '\n';
391}
392
393void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
394                                              MachineBasicBlock::iterator mi,
395                                              unsigned MIIdx,
396                                              LiveInterval &interval,
397                                              MachineInstr *CopyMI) {
398  // A physical register cannot be live across basic block, so its
399  // lifetime must end somewhere in its defining basic block.
400  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
401
402  unsigned baseIndex = MIIdx;
403  unsigned start = getDefIndex(baseIndex);
404  unsigned end = start;
405
406  // If it is not used after definition, it is considered dead at
407  // the instruction defining it. Hence its interval is:
408  // [defSlot(def), defSlot(def)+1)
409  if (mi->registerDefIsDead(interval.reg, tri_)) {
410    DOUT << " dead";
411    end = getDefIndex(start) + 1;
412    goto exit;
413  }
414
415  // If it is not dead on definition, it must be killed by a
416  // subsequent instruction. Hence its interval is:
417  // [defSlot(def), useSlot(kill)+1)
418  while (++mi != MBB->end()) {
419    baseIndex += InstrSlots::NUM;
420    if (mi->killsRegister(interval.reg, tri_)) {
421      DOUT << " killed";
422      end = getUseIndex(baseIndex) + 1;
423      goto exit;
424    } else if (mi->modifiesRegister(interval.reg, tri_)) {
425      // Another instruction redefines the register before it is ever read.
426      // Then the register is essentially dead at the instruction that defines
427      // it. Hence its interval is:
428      // [defSlot(def), defSlot(def)+1)
429      DOUT << " dead";
430      end = getDefIndex(start) + 1;
431      goto exit;
432    }
433  }
434
435  // The only case we should have a dead physreg here without a killing or
436  // instruction where we know it's dead is if it is live-in to the function
437  // and never used.
438  assert(!CopyMI && "physreg was not killed in defining block!");
439  end = getDefIndex(start) + 1;  // It's dead.
440
441exit:
442  assert(start < end && "did not find end of interval?");
443
444  // Already exists? Extend old live interval.
445  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
446  VNInfo *ValNo = (OldLR != interval.end())
447    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
448  LiveRange LR(start, end, ValNo);
449  interval.addRange(LR);
450  interval.addKill(LR.valno, end);
451  DOUT << " +" << LR << '\n';
452}
453
454void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
455                                      MachineBasicBlock::iterator MI,
456                                      unsigned MIIdx,
457                                      unsigned reg) {
458  if (TargetRegisterInfo::isVirtualRegister(reg))
459    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
460  else if (allocatableRegs_[reg]) {
461    MachineInstr *CopyMI = NULL;
462    unsigned SrcReg, DstReg;
463    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
464        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
465        tii_->isMoveInstr(*MI, SrcReg, DstReg))
466      CopyMI = MI;
467    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
468    // Def of a register also defines its sub-registers.
469    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
470      // If MI also modifies the sub-register explicitly, avoid processing it
471      // more than once. Do not pass in TRI here so it checks for exact match.
472      if (!MI->modifiesRegister(*AS))
473        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
474  }
475}
476
477void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
478                                         unsigned MIIdx,
479                                         LiveInterval &interval, bool isAlias) {
480  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
481
482  // Look for kills, if it reaches a def before it's killed, then it shouldn't
483  // be considered a livein.
484  MachineBasicBlock::iterator mi = MBB->begin();
485  unsigned baseIndex = MIIdx;
486  unsigned start = baseIndex;
487  unsigned end = start;
488  while (mi != MBB->end()) {
489    if (mi->killsRegister(interval.reg, tri_)) {
490      DOUT << " killed";
491      end = getUseIndex(baseIndex) + 1;
492      goto exit;
493    } else if (mi->modifiesRegister(interval.reg, tri_)) {
494      // Another instruction redefines the register before it is ever read.
495      // Then the register is essentially dead at the instruction that defines
496      // it. Hence its interval is:
497      // [defSlot(def), defSlot(def)+1)
498      DOUT << " dead";
499      end = getDefIndex(start) + 1;
500      goto exit;
501    }
502
503    baseIndex += InstrSlots::NUM;
504    ++mi;
505  }
506
507exit:
508  // Live-in register might not be used at all.
509  if (end == MIIdx) {
510    if (isAlias) {
511      DOUT << " dead";
512      end = getDefIndex(MIIdx) + 1;
513    } else {
514      DOUT << " live through";
515      end = baseIndex;
516    }
517  }
518
519  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
520  interval.addRange(LR);
521  interval.addKill(LR.valno, end);
522  DOUT << " +" << LR << '\n';
523}
524
525/// computeIntervals - computes the live intervals for virtual
526/// registers. for some ordering of the machine instructions [1,N] a
527/// live interval is an interval [i, j) where 1 <= i <= j < N for
528/// which a variable is live
529void LiveIntervals::computeIntervals() {
530  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
531       << "********** Function: "
532       << ((Value*)mf_->getFunction())->getName() << '\n';
533  // Track the index of the current machine instr.
534  unsigned MIIndex = 0;
535  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
536       MBBI != E; ++MBBI) {
537    MachineBasicBlock *MBB = MBBI;
538    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
539
540    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
541
542    // Create intervals for live-ins to this BB first.
543    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
544           LE = MBB->livein_end(); LI != LE; ++LI) {
545      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
546      // Multiple live-ins can alias the same register.
547      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
548        if (!hasInterval(*AS))
549          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
550                               true);
551    }
552
553    for (; MI != miEnd; ++MI) {
554      DOUT << MIIndex << "\t" << *MI;
555
556      // Handle defs.
557      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
558        MachineOperand &MO = MI->getOperand(i);
559        // handle register defs - build intervals
560        if (MO.isRegister() && MO.getReg() && MO.isDef())
561          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
562      }
563
564      MIIndex += InstrSlots::NUM;
565    }
566  }
567}
568
569bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
570                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
571  std::vector<IdxMBBPair>::const_iterator I =
572    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
573
574  bool ResVal = false;
575  while (I != Idx2MBBMap.end()) {
576    if (LR.end <= I->first)
577      break;
578    MBBs.push_back(I->second);
579    ResVal = true;
580    ++I;
581  }
582  return ResVal;
583}
584
585
586LiveInterval LiveIntervals::createInterval(unsigned reg) {
587  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
588                       HUGE_VALF : 0.0F;
589  return LiveInterval(reg, Weight);
590}
591
592/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
593/// copy field and returns the source register that defines it.
594unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
595  if (!VNI->copy)
596    return 0;
597
598  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
599    return VNI->copy->getOperand(1).getReg();
600  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
601    return VNI->copy->getOperand(2).getReg();
602  unsigned SrcReg, DstReg;
603  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
604    return SrcReg;
605  assert(0 && "Unrecognized copy instruction!");
606  return 0;
607}
608
609//===----------------------------------------------------------------------===//
610// Register allocator hooks.
611//
612
613/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
614/// allow one) virtual register operand, then its uses are implicitly using
615/// the register. Returns the virtual register.
616unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
617                                            MachineInstr *MI) const {
618  unsigned RegOp = 0;
619  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
620    MachineOperand &MO = MI->getOperand(i);
621    if (!MO.isRegister() || !MO.isUse())
622      continue;
623    unsigned Reg = MO.getReg();
624    if (Reg == 0 || Reg == li.reg)
625      continue;
626    // FIXME: For now, only remat MI with at most one register operand.
627    assert(!RegOp &&
628           "Can't rematerialize instruction with multiple register operand!");
629    RegOp = MO.getReg();
630    break;
631  }
632  return RegOp;
633}
634
635/// isValNoAvailableAt - Return true if the val# of the specified interval
636/// which reaches the given instruction also reaches the specified use index.
637bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
638                                       unsigned UseIdx) const {
639  unsigned Index = getInstructionIndex(MI);
640  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
641  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
642  return UI != li.end() && UI->valno == ValNo;
643}
644
645/// isReMaterializable - Returns true if the definition MI of the specified
646/// val# of the specified interval is re-materializable.
647bool LiveIntervals::isReMaterializable(const LiveInterval &li,
648                                       const VNInfo *ValNo, MachineInstr *MI,
649                                       bool &isLoad) {
650  if (DisableReMat)
651    return false;
652
653  isLoad = false;
654  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
655    return true;
656
657  int FrameIdx = 0;
658  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
659      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
660    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
661    // this but remember this is not safe to fold into a two-address
662    // instruction.
663    // This is a load from fixed stack slot. It can be rematerialized.
664    return true;
665
666  if (tii_->isTriviallyReMaterializable(MI)) {
667    const TargetInstrDesc &TID = MI->getDesc();
668    isLoad = TID.isSimpleLoad();
669
670    unsigned ImpUse = getReMatImplicitUse(li, MI);
671    if (ImpUse) {
672      const LiveInterval &ImpLi = getInterval(ImpUse);
673      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
674             re = mri_->use_end(); ri != re; ++ri) {
675        MachineInstr *UseMI = &*ri;
676        unsigned UseIdx = getInstructionIndex(UseMI);
677        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
678          continue;
679        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
680          return false;
681      }
682    }
683    return true;
684  }
685
686  return false;
687}
688
689/// isReMaterializable - Returns true if every definition of MI of every
690/// val# of the specified interval is re-materializable.
691bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
692  isLoad = false;
693  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
694       i != e; ++i) {
695    const VNInfo *VNI = *i;
696    unsigned DefIdx = VNI->def;
697    if (DefIdx == ~1U)
698      continue; // Dead val#.
699    // Is the def for the val# rematerializable?
700    if (DefIdx == ~0u)
701      return false;
702    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
703    bool DefIsLoad = false;
704    if (!ReMatDefMI ||
705        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
706      return false;
707    isLoad |= DefIsLoad;
708  }
709  return true;
710}
711
712/// FilterFoldedOps - Filter out two-address use operands. Return
713/// true if it finds any issue with the operands that ought to prevent
714/// folding.
715static bool FilterFoldedOps(MachineInstr *MI,
716                            SmallVector<unsigned, 2> &Ops,
717                            unsigned &MRInfo,
718                            SmallVector<unsigned, 2> &FoldOps) {
719  const TargetInstrDesc &TID = MI->getDesc();
720
721  MRInfo = 0;
722  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
723    unsigned OpIdx = Ops[i];
724    MachineOperand &MO = MI->getOperand(OpIdx);
725    // FIXME: fold subreg use.
726    if (MO.getSubReg())
727      return true;
728    if (MO.isDef())
729      MRInfo |= (unsigned)VirtRegMap::isMod;
730    else {
731      // Filter out two-address use operand(s).
732      if (!MO.isImplicit() &&
733          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
734        MRInfo = VirtRegMap::isModRef;
735        continue;
736      }
737      MRInfo |= (unsigned)VirtRegMap::isRef;
738    }
739    FoldOps.push_back(OpIdx);
740  }
741  return false;
742}
743
744
745/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
746/// slot / to reg or any rematerialized load into ith operand of specified
747/// MI. If it is successul, MI is updated with the newly created MI and
748/// returns true.
749bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
750                                         VirtRegMap &vrm, MachineInstr *DefMI,
751                                         unsigned InstrIdx,
752                                         SmallVector<unsigned, 2> &Ops,
753                                         bool isSS, int Slot, unsigned Reg) {
754  // If it is an implicit def instruction, just delete it.
755  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
756    RemoveMachineInstrFromMaps(MI);
757    vrm.RemoveMachineInstrFromMaps(MI);
758    MI->eraseFromParent();
759    ++numFolds;
760    return true;
761  }
762
763  // Filter the list of operand indexes that are to be folded. Abort if
764  // any operand will prevent folding.
765  unsigned MRInfo = 0;
766  SmallVector<unsigned, 2> FoldOps;
767  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
768    return false;
769
770  // The only time it's safe to fold into a two address instruction is when
771  // it's folding reload and spill from / into a spill stack slot.
772  if (DefMI && (MRInfo & VirtRegMap::isMod))
773    return false;
774
775  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
776                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
777  if (fmi) {
778    // Remember this instruction uses the spill slot.
779    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
780
781    // Attempt to fold the memory reference into the instruction. If
782    // we can do this, we don't need to insert spill code.
783    if (lv_)
784      lv_->instructionChanged(MI, fmi);
785    else
786      fmi->copyKillDeadInfo(MI, tri_);
787    MachineBasicBlock &MBB = *MI->getParent();
788    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
789      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
790    vrm.transferSpillPts(MI, fmi);
791    vrm.transferRestorePts(MI, fmi);
792    vrm.transferEmergencySpills(MI, fmi);
793    mi2iMap_.erase(MI);
794    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
795    mi2iMap_[fmi] = InstrIdx;
796    MI = MBB.insert(MBB.erase(MI), fmi);
797    ++numFolds;
798    return true;
799  }
800  return false;
801}
802
803/// canFoldMemoryOperand - Returns true if the specified load / store
804/// folding is possible.
805bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
806                                         SmallVector<unsigned, 2> &Ops,
807                                         bool ReMat) const {
808  // Filter the list of operand indexes that are to be folded. Abort if
809  // any operand will prevent folding.
810  unsigned MRInfo = 0;
811  SmallVector<unsigned, 2> FoldOps;
812  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
813    return false;
814
815  // It's only legal to remat for a use, not a def.
816  if (ReMat && (MRInfo & VirtRegMap::isMod))
817    return false;
818
819  return tii_->canFoldMemoryOperand(MI, FoldOps);
820}
821
822bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
823  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
824  for (LiveInterval::Ranges::const_iterator
825         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
826    std::vector<IdxMBBPair>::const_iterator II =
827      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
828    if (II == Idx2MBBMap.end())
829      continue;
830    if (I->end > II->first)  // crossing a MBB.
831      return false;
832    MBBs.insert(II->second);
833    if (MBBs.size() > 1)
834      return false;
835  }
836  return true;
837}
838
839/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
840/// interval on to-be re-materialized operands of MI) with new register.
841void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
842                                       MachineInstr *MI, unsigned NewVReg,
843                                       VirtRegMap &vrm) {
844  // There is an implicit use. That means one of the other operand is
845  // being remat'ed and the remat'ed instruction has li.reg as an
846  // use operand. Make sure we rewrite that as well.
847  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
848    MachineOperand &MO = MI->getOperand(i);
849    if (!MO.isRegister())
850      continue;
851    unsigned Reg = MO.getReg();
852    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
853      continue;
854    if (!vrm.isReMaterialized(Reg))
855      continue;
856    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
857    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
858    if (UseMO)
859      UseMO->setReg(NewVReg);
860  }
861}
862
863/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
864/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
865bool LiveIntervals::
866rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
867                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
868                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
869                 unsigned Slot, int LdSlot,
870                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
871                 VirtRegMap &vrm,
872                 const TargetRegisterClass* rc,
873                 SmallVector<int, 4> &ReMatIds,
874                 const MachineLoopInfo *loopInfo,
875                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
876                 std::map<unsigned,unsigned> &MBBVRegsMap,
877                 std::vector<LiveInterval*> &NewLIs) {
878  bool CanFold = false;
879 RestartInstruction:
880  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
881    MachineOperand& mop = MI->getOperand(i);
882    if (!mop.isRegister())
883      continue;
884    unsigned Reg = mop.getReg();
885    unsigned RegI = Reg;
886    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
887      continue;
888    if (Reg != li.reg)
889      continue;
890
891    bool TryFold = !DefIsReMat;
892    bool FoldSS = true; // Default behavior unless it's a remat.
893    int FoldSlot = Slot;
894    if (DefIsReMat) {
895      // If this is the rematerializable definition MI itself and
896      // all of its uses are rematerialized, simply delete it.
897      if (MI == ReMatOrigDefMI && CanDelete) {
898        DOUT << "\t\t\t\tErasing re-materlizable def: ";
899        DOUT << MI << '\n';
900        RemoveMachineInstrFromMaps(MI);
901        vrm.RemoveMachineInstrFromMaps(MI);
902        MI->eraseFromParent();
903        break;
904      }
905
906      // If def for this use can't be rematerialized, then try folding.
907      // If def is rematerializable and it's a load, also try folding.
908      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
909      if (isLoad) {
910        // Try fold loads (from stack slot, constant pool, etc.) into uses.
911        FoldSS = isLoadSS;
912        FoldSlot = LdSlot;
913      }
914    }
915
916    // Scan all of the operands of this instruction rewriting operands
917    // to use NewVReg instead of li.reg as appropriate.  We do this for
918    // two reasons:
919    //
920    //   1. If the instr reads the same spilled vreg multiple times, we
921    //      want to reuse the NewVReg.
922    //   2. If the instr is a two-addr instruction, we are required to
923    //      keep the src/dst regs pinned.
924    //
925    // Keep track of whether we replace a use and/or def so that we can
926    // create the spill interval with the appropriate range.
927
928    HasUse = mop.isUse();
929    HasDef = mop.isDef();
930    SmallVector<unsigned, 2> Ops;
931    Ops.push_back(i);
932    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
933      const MachineOperand &MOj = MI->getOperand(j);
934      if (!MOj.isRegister())
935        continue;
936      unsigned RegJ = MOj.getReg();
937      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
938        continue;
939      if (RegJ == RegI) {
940        Ops.push_back(j);
941        HasUse |= MOj.isUse();
942        HasDef |= MOj.isDef();
943      }
944    }
945
946    if (TryFold) {
947      // Do not fold load / store here if we are splitting. We'll find an
948      // optimal point to insert a load / store later.
949      if (!TrySplit) {
950        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
951                                 Ops, FoldSS, FoldSlot, Reg)) {
952          // Folding the load/store can completely change the instruction in
953          // unpredictable ways, rescan it from the beginning.
954          HasUse = false;
955          HasDef = false;
956          CanFold = false;
957          if (isRemoved(MI))
958            break;
959          goto RestartInstruction;
960        }
961      } else {
962        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
963      }
964    } else
965      CanFold = false;
966
967    // Create a new virtual register for the spill interval.
968    bool CreatedNewVReg = false;
969    if (NewVReg == 0) {
970      NewVReg = mri_->createVirtualRegister(rc);
971      vrm.grow();
972      CreatedNewVReg = true;
973    }
974    mop.setReg(NewVReg);
975    if (mop.isImplicit())
976      rewriteImplicitOps(li, MI, NewVReg, vrm);
977
978    // Reuse NewVReg for other reads.
979    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
980      MachineOperand &mopj = MI->getOperand(Ops[j]);
981      mopj.setReg(NewVReg);
982      if (mopj.isImplicit())
983        rewriteImplicitOps(li, MI, NewVReg, vrm);
984    }
985
986    if (CreatedNewVReg) {
987      if (DefIsReMat) {
988        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
989        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
990          // Each valnum may have its own remat id.
991          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
992        } else {
993          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
994        }
995        if (!CanDelete || (HasUse && HasDef)) {
996          // If this is a two-addr instruction then its use operands are
997          // rematerializable but its def is not. It should be assigned a
998          // stack slot.
999          vrm.assignVirt2StackSlot(NewVReg, Slot);
1000        }
1001      } else {
1002        vrm.assignVirt2StackSlot(NewVReg, Slot);
1003      }
1004    } else if (HasUse && HasDef &&
1005               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1006      // If this interval hasn't been assigned a stack slot (because earlier
1007      // def is a deleted remat def), do it now.
1008      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1009      vrm.assignVirt2StackSlot(NewVReg, Slot);
1010    }
1011
1012    // Re-matting an instruction with virtual register use. Add the
1013    // register as an implicit use on the use MI.
1014    if (DefIsReMat && ImpUse)
1015      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1016
1017    // create a new register interval for this spill / remat.
1018    LiveInterval &nI = getOrCreateInterval(NewVReg);
1019    if (CreatedNewVReg) {
1020      NewLIs.push_back(&nI);
1021      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1022      if (TrySplit)
1023        vrm.setIsSplitFromReg(NewVReg, li.reg);
1024    }
1025
1026    if (HasUse) {
1027      if (CreatedNewVReg) {
1028        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1029                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1030        DOUT << " +" << LR;
1031        nI.addRange(LR);
1032      } else {
1033        // Extend the split live interval to this def / use.
1034        unsigned End = getUseIndex(index)+1;
1035        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1036                     nI.getValNumInfo(nI.getNumValNums()-1));
1037        DOUT << " +" << LR;
1038        nI.addRange(LR);
1039      }
1040    }
1041    if (HasDef) {
1042      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1043                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1044      DOUT << " +" << LR;
1045      nI.addRange(LR);
1046    }
1047
1048    DOUT << "\t\t\t\tAdded new interval: ";
1049    nI.print(DOUT, tri_);
1050    DOUT << '\n';
1051  }
1052  return CanFold;
1053}
1054bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1055                                   const VNInfo *VNI,
1056                                   MachineBasicBlock *MBB, unsigned Idx) const {
1057  unsigned End = getMBBEndIdx(MBB);
1058  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1059    unsigned KillIdx = VNI->kills[j];
1060    if (KillIdx > Idx && KillIdx < End)
1061      return true;
1062  }
1063  return false;
1064}
1065
1066static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1067  const VNInfo *VNI = NULL;
1068  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1069         e = li.vni_end(); i != e; ++i)
1070    if ((*i)->def == DefIdx) {
1071      VNI = *i;
1072      break;
1073    }
1074  return VNI;
1075}
1076
1077/// RewriteInfo - Keep track of machine instrs that will be rewritten
1078/// during spilling.
1079struct RewriteInfo {
1080  unsigned Index;
1081  MachineInstr *MI;
1082  bool HasUse;
1083  bool HasDef;
1084  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1085    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1086};
1087
1088struct RewriteInfoCompare {
1089  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1090    return LHS.Index < RHS.Index;
1091  }
1092};
1093
1094void LiveIntervals::
1095rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1096                    LiveInterval::Ranges::const_iterator &I,
1097                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1098                    unsigned Slot, int LdSlot,
1099                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1100                    VirtRegMap &vrm,
1101                    const TargetRegisterClass* rc,
1102                    SmallVector<int, 4> &ReMatIds,
1103                    const MachineLoopInfo *loopInfo,
1104                    BitVector &SpillMBBs,
1105                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1106                    BitVector &RestoreMBBs,
1107                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1108                    std::map<unsigned,unsigned> &MBBVRegsMap,
1109                    std::vector<LiveInterval*> &NewLIs) {
1110  bool AllCanFold = true;
1111  unsigned NewVReg = 0;
1112  unsigned start = getBaseIndex(I->start);
1113  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1114
1115  // First collect all the def / use in this live range that will be rewritten.
1116  // Make sure they are sorted according to instruction index.
1117  std::vector<RewriteInfo> RewriteMIs;
1118  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1119         re = mri_->reg_end(); ri != re; ) {
1120    MachineInstr *MI = &*ri;
1121    MachineOperand &O = ri.getOperand();
1122    ++ri;
1123    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1124    unsigned index = getInstructionIndex(MI);
1125    if (index < start || index >= end)
1126      continue;
1127    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1128  }
1129  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1130
1131  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1132  // Now rewrite the defs and uses.
1133  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1134    RewriteInfo &rwi = RewriteMIs[i];
1135    ++i;
1136    unsigned index = rwi.Index;
1137    bool MIHasUse = rwi.HasUse;
1138    bool MIHasDef = rwi.HasDef;
1139    MachineInstr *MI = rwi.MI;
1140    // If MI def and/or use the same register multiple times, then there
1141    // are multiple entries.
1142    unsigned NumUses = MIHasUse;
1143    while (i != e && RewriteMIs[i].MI == MI) {
1144      assert(RewriteMIs[i].Index == index);
1145      bool isUse = RewriteMIs[i].HasUse;
1146      if (isUse) ++NumUses;
1147      MIHasUse |= isUse;
1148      MIHasDef |= RewriteMIs[i].HasDef;
1149      ++i;
1150    }
1151    MachineBasicBlock *MBB = MI->getParent();
1152
1153    if (ImpUse && MI != ReMatDefMI) {
1154      // Re-matting an instruction with virtual register use. Update the
1155      // register interval's spill weight to HUGE_VALF to prevent it from
1156      // being spilled.
1157      LiveInterval &ImpLi = getInterval(ImpUse);
1158      ImpLi.weight = HUGE_VALF;
1159    }
1160
1161    unsigned MBBId = MBB->getNumber();
1162    unsigned ThisVReg = 0;
1163    if (TrySplit) {
1164      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1165      if (NVI != MBBVRegsMap.end()) {
1166        ThisVReg = NVI->second;
1167        // One common case:
1168        // x = use
1169        // ...
1170        // ...
1171        // def = ...
1172        //     = use
1173        // It's better to start a new interval to avoid artifically
1174        // extend the new interval.
1175        if (MIHasDef && !MIHasUse) {
1176          MBBVRegsMap.erase(MBB->getNumber());
1177          ThisVReg = 0;
1178        }
1179      }
1180    }
1181
1182    bool IsNew = ThisVReg == 0;
1183    if (IsNew) {
1184      // This ends the previous live interval. If all of its def / use
1185      // can be folded, give it a low spill weight.
1186      if (NewVReg && TrySplit && AllCanFold) {
1187        LiveInterval &nI = getOrCreateInterval(NewVReg);
1188        nI.weight /= 10.0F;
1189      }
1190      AllCanFold = true;
1191    }
1192    NewVReg = ThisVReg;
1193
1194    bool HasDef = false;
1195    bool HasUse = false;
1196    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1197                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1198                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1199                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1200                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1201    if (!HasDef && !HasUse)
1202      continue;
1203
1204    AllCanFold &= CanFold;
1205
1206    // Update weight of spill interval.
1207    LiveInterval &nI = getOrCreateInterval(NewVReg);
1208    if (!TrySplit) {
1209      // The spill weight is now infinity as it cannot be spilled again.
1210      nI.weight = HUGE_VALF;
1211      continue;
1212    }
1213
1214    // Keep track of the last def and first use in each MBB.
1215    if (HasDef) {
1216      if (MI != ReMatOrigDefMI || !CanDelete) {
1217        bool HasKill = false;
1218        if (!HasUse)
1219          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1220        else {
1221          // If this is a two-address code, then this index starts a new VNInfo.
1222          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1223          if (VNI)
1224            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1225        }
1226        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1227          SpillIdxes.find(MBBId);
1228        if (!HasKill) {
1229          if (SII == SpillIdxes.end()) {
1230            std::vector<SRInfo> S;
1231            S.push_back(SRInfo(index, NewVReg, true));
1232            SpillIdxes.insert(std::make_pair(MBBId, S));
1233          } else if (SII->second.back().vreg != NewVReg) {
1234            SII->second.push_back(SRInfo(index, NewVReg, true));
1235          } else if ((int)index > SII->second.back().index) {
1236            // If there is an earlier def and this is a two-address
1237            // instruction, then it's not possible to fold the store (which
1238            // would also fold the load).
1239            SRInfo &Info = SII->second.back();
1240            Info.index = index;
1241            Info.canFold = !HasUse;
1242          }
1243          SpillMBBs.set(MBBId);
1244        } else if (SII != SpillIdxes.end() &&
1245                   SII->second.back().vreg == NewVReg &&
1246                   (int)index > SII->second.back().index) {
1247          // There is an earlier def that's not killed (must be two-address).
1248          // The spill is no longer needed.
1249          SII->second.pop_back();
1250          if (SII->second.empty()) {
1251            SpillIdxes.erase(MBBId);
1252            SpillMBBs.reset(MBBId);
1253          }
1254        }
1255      }
1256    }
1257
1258    if (HasUse) {
1259      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1260        SpillIdxes.find(MBBId);
1261      if (SII != SpillIdxes.end() &&
1262          SII->second.back().vreg == NewVReg &&
1263          (int)index > SII->second.back().index)
1264        // Use(s) following the last def, it's not safe to fold the spill.
1265        SII->second.back().canFold = false;
1266      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1267        RestoreIdxes.find(MBBId);
1268      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1269        // If we are splitting live intervals, only fold if it's the first
1270        // use and there isn't another use later in the MBB.
1271        RII->second.back().canFold = false;
1272      else if (IsNew) {
1273        // Only need a reload if there isn't an earlier def / use.
1274        if (RII == RestoreIdxes.end()) {
1275          std::vector<SRInfo> Infos;
1276          Infos.push_back(SRInfo(index, NewVReg, true));
1277          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1278        } else {
1279          RII->second.push_back(SRInfo(index, NewVReg, true));
1280        }
1281        RestoreMBBs.set(MBBId);
1282      }
1283    }
1284
1285    // Update spill weight.
1286    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1287    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1288  }
1289
1290  if (NewVReg && TrySplit && AllCanFold) {
1291    // If all of its def / use can be folded, give it a low spill weight.
1292    LiveInterval &nI = getOrCreateInterval(NewVReg);
1293    nI.weight /= 10.0F;
1294  }
1295}
1296
1297bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1298                        BitVector &RestoreMBBs,
1299                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1300  if (!RestoreMBBs[Id])
1301    return false;
1302  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1303  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1304    if (Restores[i].index == index &&
1305        Restores[i].vreg == vr &&
1306        Restores[i].canFold)
1307      return true;
1308  return false;
1309}
1310
1311void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1312                        BitVector &RestoreMBBs,
1313                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1314  if (!RestoreMBBs[Id])
1315    return;
1316  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1317  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1318    if (Restores[i].index == index && Restores[i].vreg)
1319      Restores[i].index = -1;
1320}
1321
1322/// removeSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1323/// spilled.
1324void LiveIntervals::removeSpilledImpDefs(const LiveInterval &li,
1325                                         VirtRegMap &vrm) {
1326  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1327         re = mri_->reg_end(); ri != re; ) {
1328    MachineInstr *MI = &*ri;
1329    ++ri;
1330    if (MI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF)
1331      continue;
1332    RemoveMachineInstrFromMaps(MI);
1333    vrm.RemoveMachineInstrFromMaps(MI);
1334    MI->eraseFromParent();
1335  }
1336}
1337
1338
1339std::vector<LiveInterval*> LiveIntervals::
1340addIntervalsForSpills(const LiveInterval &li,
1341                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1342  // Since this is called after the analysis is done we don't know if
1343  // LiveVariables is available
1344  lv_ = getAnalysisToUpdate<LiveVariables>();
1345
1346  assert(li.weight != HUGE_VALF &&
1347         "attempt to spill already spilled interval!");
1348
1349  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1350  li.print(DOUT, tri_);
1351  DOUT << '\n';
1352
1353  // Each bit specify whether it a spill is required in the MBB.
1354  BitVector SpillMBBs(mf_->getNumBlockIDs());
1355  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1356  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1357  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1358  std::map<unsigned,unsigned> MBBVRegsMap;
1359  std::vector<LiveInterval*> NewLIs;
1360  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1361
1362  unsigned NumValNums = li.getNumValNums();
1363  SmallVector<MachineInstr*, 4> ReMatDefs;
1364  ReMatDefs.resize(NumValNums, NULL);
1365  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1366  ReMatOrigDefs.resize(NumValNums, NULL);
1367  SmallVector<int, 4> ReMatIds;
1368  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1369  BitVector ReMatDelete(NumValNums);
1370  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1371
1372  // Spilling a split live interval. It cannot be split any further. Also,
1373  // it's also guaranteed to be a single val# / range interval.
1374  if (vrm.getPreSplitReg(li.reg)) {
1375    vrm.setIsSplitFromReg(li.reg, 0);
1376    // Unset the split kill marker on the last use.
1377    unsigned KillIdx = vrm.getKillPoint(li.reg);
1378    if (KillIdx) {
1379      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1380      assert(KillMI && "Last use disappeared?");
1381      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1382      assert(KillOp != -1 && "Last use disappeared?");
1383      KillMI->getOperand(KillOp).setIsKill(false);
1384    }
1385    vrm.removeKillPoint(li.reg);
1386    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1387    Slot = vrm.getStackSlot(li.reg);
1388    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1389    MachineInstr *ReMatDefMI = DefIsReMat ?
1390      vrm.getReMaterializedMI(li.reg) : NULL;
1391    int LdSlot = 0;
1392    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1393    bool isLoad = isLoadSS ||
1394      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1395    bool IsFirstRange = true;
1396    for (LiveInterval::Ranges::const_iterator
1397           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1398      // If this is a split live interval with multiple ranges, it means there
1399      // are two-address instructions that re-defined the value. Only the
1400      // first def can be rematerialized!
1401      if (IsFirstRange) {
1402        // Note ReMatOrigDefMI has already been deleted.
1403        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1404                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1405                             false, vrm, rc, ReMatIds, loopInfo,
1406                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1407                             MBBVRegsMap, NewLIs);
1408      } else {
1409        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1410                             Slot, 0, false, false, false,
1411                             false, vrm, rc, ReMatIds, loopInfo,
1412                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1413                             MBBVRegsMap, NewLIs);
1414      }
1415      IsFirstRange = false;
1416    }
1417
1418    removeSpilledImpDefs(li, vrm);
1419    return NewLIs;
1420  }
1421
1422  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1423  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1424    TrySplit = false;
1425  if (TrySplit)
1426    ++numSplits;
1427  bool NeedStackSlot = false;
1428  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1429       i != e; ++i) {
1430    const VNInfo *VNI = *i;
1431    unsigned VN = VNI->id;
1432    unsigned DefIdx = VNI->def;
1433    if (DefIdx == ~1U)
1434      continue; // Dead val#.
1435    // Is the def for the val# rematerializable?
1436    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1437      ? 0 : getInstructionFromIndex(DefIdx);
1438    bool dummy;
1439    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1440      // Remember how to remat the def of this val#.
1441      ReMatOrigDefs[VN] = ReMatDefMI;
1442      // Original def may be modified so we have to make a copy here. vrm must
1443      // delete these!
1444      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1445
1446      bool CanDelete = true;
1447      if (VNI->hasPHIKill) {
1448        // A kill is a phi node, not all of its uses can be rematerialized.
1449        // It must not be deleted.
1450        CanDelete = false;
1451        // Need a stack slot if there is any live range where uses cannot be
1452        // rematerialized.
1453        NeedStackSlot = true;
1454      }
1455      if (CanDelete)
1456        ReMatDelete.set(VN);
1457    } else {
1458      // Need a stack slot if there is any live range where uses cannot be
1459      // rematerialized.
1460      NeedStackSlot = true;
1461    }
1462  }
1463
1464  // One stack slot per live interval.
1465  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1466    Slot = vrm.assignVirt2StackSlot(li.reg);
1467
1468  // Create new intervals and rewrite defs and uses.
1469  for (LiveInterval::Ranges::const_iterator
1470         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1471    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1472    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1473    bool DefIsReMat = ReMatDefMI != NULL;
1474    bool CanDelete = ReMatDelete[I->valno->id];
1475    int LdSlot = 0;
1476    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1477    bool isLoad = isLoadSS ||
1478      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1479    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1480                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1481                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1482                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1483                               MBBVRegsMap, NewLIs);
1484  }
1485
1486  // Insert spills / restores if we are splitting.
1487  if (!TrySplit) {
1488    removeSpilledImpDefs(li, vrm);
1489    return NewLIs;
1490  }
1491
1492  SmallPtrSet<LiveInterval*, 4> AddedKill;
1493  SmallVector<unsigned, 2> Ops;
1494  if (NeedStackSlot) {
1495    int Id = SpillMBBs.find_first();
1496    while (Id != -1) {
1497      std::vector<SRInfo> &spills = SpillIdxes[Id];
1498      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1499        int index = spills[i].index;
1500        unsigned VReg = spills[i].vreg;
1501        LiveInterval &nI = getOrCreateInterval(VReg);
1502        bool isReMat = vrm.isReMaterialized(VReg);
1503        MachineInstr *MI = getInstructionFromIndex(index);
1504        bool CanFold = false;
1505        bool FoundUse = false;
1506        Ops.clear();
1507        if (spills[i].canFold) {
1508          CanFold = true;
1509          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1510            MachineOperand &MO = MI->getOperand(j);
1511            if (!MO.isRegister() || MO.getReg() != VReg)
1512              continue;
1513
1514            Ops.push_back(j);
1515            if (MO.isDef())
1516              continue;
1517            if (isReMat ||
1518                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1519                                                RestoreMBBs, RestoreIdxes))) {
1520              // MI has two-address uses of the same register. If the use
1521              // isn't the first and only use in the BB, then we can't fold
1522              // it. FIXME: Move this to rewriteInstructionsForSpills.
1523              CanFold = false;
1524              break;
1525            }
1526            FoundUse = true;
1527          }
1528        }
1529        // Fold the store into the def if possible.
1530        bool Folded = false;
1531        if (CanFold && !Ops.empty()) {
1532          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1533            Folded = true;
1534            if (FoundUse > 0) {
1535              // Also folded uses, do not issue a load.
1536              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1537              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1538            }
1539            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1540          }
1541        }
1542
1543        // Otherwise tell the spiller to issue a spill.
1544        if (!Folded) {
1545          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1546          bool isKill = LR->end == getStoreIndex(index);
1547          vrm.addSpillPoint(VReg, isKill, MI);
1548          if (isKill)
1549            AddedKill.insert(&nI);
1550        }
1551      }
1552      Id = SpillMBBs.find_next(Id);
1553    }
1554  }
1555
1556  int Id = RestoreMBBs.find_first();
1557  while (Id != -1) {
1558    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1559    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1560      int index = restores[i].index;
1561      if (index == -1)
1562        continue;
1563      unsigned VReg = restores[i].vreg;
1564      LiveInterval &nI = getOrCreateInterval(VReg);
1565      MachineInstr *MI = getInstructionFromIndex(index);
1566      bool CanFold = false;
1567      Ops.clear();
1568      if (restores[i].canFold) {
1569        CanFold = true;
1570        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1571          MachineOperand &MO = MI->getOperand(j);
1572          if (!MO.isRegister() || MO.getReg() != VReg)
1573            continue;
1574
1575          if (MO.isDef()) {
1576            // If this restore were to be folded, it would have been folded
1577            // already.
1578            CanFold = false;
1579            break;
1580          }
1581          Ops.push_back(j);
1582        }
1583      }
1584
1585      // Fold the load into the use if possible.
1586      bool Folded = false;
1587      if (CanFold && !Ops.empty()) {
1588        if (!vrm.isReMaterialized(VReg))
1589          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1590        else {
1591          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1592          int LdSlot = 0;
1593          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1594          // If the rematerializable def is a load, also try to fold it.
1595          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1596            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1597                                          Ops, isLoadSS, LdSlot, VReg);
1598          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1599          if (ImpUse) {
1600            // Re-matting an instruction with virtual register use. Add the
1601            // register as an implicit use on the use MI and update the register
1602            // interval's spill weight to HUGE_VALF to prevent it from being
1603            // spilled.
1604            LiveInterval &ImpLi = getInterval(ImpUse);
1605            ImpLi.weight = HUGE_VALF;
1606            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1607          }
1608        }
1609      }
1610      // If folding is not possible / failed, then tell the spiller to issue a
1611      // load / rematerialization for us.
1612      if (Folded)
1613        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1614      else
1615        vrm.addRestorePoint(VReg, MI);
1616    }
1617    Id = RestoreMBBs.find_next(Id);
1618  }
1619
1620  // Finalize intervals: add kills, finalize spill weights, and filter out
1621  // dead intervals.
1622  std::vector<LiveInterval*> RetNewLIs;
1623  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1624    LiveInterval *LI = NewLIs[i];
1625    if (!LI->empty()) {
1626      LI->weight /= LI->getSize();
1627      if (!AddedKill.count(LI)) {
1628        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1629        unsigned LastUseIdx = getBaseIndex(LR->end);
1630        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1631        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1632        assert(UseIdx != -1);
1633        if (LastUse->getOperand(UseIdx).isImplicit() ||
1634            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1635          LastUse->getOperand(UseIdx).setIsKill();
1636          vrm.addKillPoint(LI->reg, LastUseIdx);
1637        }
1638      }
1639      RetNewLIs.push_back(LI);
1640    }
1641  }
1642
1643  removeSpilledImpDefs(li, vrm);
1644  return RetNewLIs;
1645}
1646
1647/// hasAllocatableSuperReg - Return true if the specified physical register has
1648/// any super register that's allocatable.
1649bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1650  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1651    if (allocatableRegs_[*AS] && hasInterval(*AS))
1652      return true;
1653  return false;
1654}
1655
1656/// getRepresentativeReg - Find the largest super register of the specified
1657/// physical register.
1658unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1659  // Find the largest super-register that is allocatable.
1660  unsigned BestReg = Reg;
1661  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1662    unsigned SuperReg = *AS;
1663    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1664      BestReg = SuperReg;
1665      break;
1666    }
1667  }
1668  return BestReg;
1669}
1670
1671/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1672/// specified interval that conflicts with the specified physical register.
1673unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1674                                                   unsigned PhysReg) const {
1675  unsigned NumConflicts = 0;
1676  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1677  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1678         E = mri_->reg_end(); I != E; ++I) {
1679    MachineOperand &O = I.getOperand();
1680    MachineInstr *MI = O.getParent();
1681    unsigned Index = getInstructionIndex(MI);
1682    if (pli.liveAt(Index))
1683      ++NumConflicts;
1684  }
1685  return NumConflicts;
1686}
1687
1688/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1689/// around all defs and uses of the specified interval.
1690void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1691                                            unsigned PhysReg, VirtRegMap &vrm) {
1692  unsigned SpillReg = getRepresentativeReg(PhysReg);
1693
1694  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1695    // If there are registers which alias PhysReg, but which are not a
1696    // sub-register of the chosen representative super register. Assert
1697    // since we can't handle it yet.
1698    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1699           tri_->isSuperRegister(*AS, SpillReg));
1700
1701  LiveInterval &pli = getInterval(SpillReg);
1702  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1703  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1704         E = mri_->reg_end(); I != E; ++I) {
1705    MachineOperand &O = I.getOperand();
1706    MachineInstr *MI = O.getParent();
1707    if (SeenMIs.count(MI))
1708      continue;
1709    SeenMIs.insert(MI);
1710    unsigned Index = getInstructionIndex(MI);
1711    if (pli.liveAt(Index)) {
1712      vrm.addEmergencySpill(SpillReg, MI);
1713      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1714      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1715        if (!hasInterval(*AS))
1716          continue;
1717        LiveInterval &spli = getInterval(*AS);
1718        if (spli.liveAt(Index))
1719          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1720      }
1721    }
1722  }
1723}
1724