LiveIntervalAnalysis.cpp revision 44d37357591b0845b0cbf26e0aaadc1a7bffeaea
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39// Hidden options for help debugging.
40static cl::opt<bool> DisableReMat("disable-rematerialization",
41                                  cl::init(false), cl::Hidden);
42
43static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44                               cl::init(true), cl::Hidden);
45static cl::opt<int> SplitLimit("split-limit",
46                               cl::init(-1), cl::Hidden);
47
48STATISTIC(numIntervals, "Number of original intervals");
49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
51STATISTIC(numSplits   , "Number of intervals split");
52
53char LiveIntervals::ID = 0;
54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
55
56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57  AU.addPreserved<LiveVariables>();
58  AU.addRequired<LiveVariables>();
59  AU.addPreservedID(MachineLoopInfoID);
60  AU.addPreservedID(MachineDominatorsID);
61  AU.addPreservedID(PHIEliminationID);
62  AU.addRequiredID(PHIEliminationID);
63  AU.addRequiredID(TwoAddressInstructionPassID);
64  MachineFunctionPass::getAnalysisUsage(AU);
65}
66
67void LiveIntervals::releaseMemory() {
68  Idx2MBBMap.clear();
69  mi2iMap_.clear();
70  i2miMap_.clear();
71  r2iMap_.clear();
72  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73  VNInfoAllocator.Reset();
74  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
75    delete ClonedMIs[i];
76}
77
78void LiveIntervals::computeNumbering() {
79  Index2MiMap OldI2MI = i2miMap_;
80
81  Idx2MBBMap.clear();
82  MBB2IdxMap.clear();
83  mi2iMap_.clear();
84  i2miMap_.clear();
85
86  // Number MachineInstrs and MachineBasicBlocks.
87  // Initialize MBB indexes to a sentinal.
88  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
89
90  unsigned MIIndex = 0;
91  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
92       MBB != E; ++MBB) {
93    unsigned StartIdx = MIIndex;
94
95    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
96         I != E; ++I) {
97      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
98      assert(inserted && "multiple MachineInstr -> index mappings");
99      i2miMap_.push_back(I);
100      MIIndex += InstrSlots::NUM;
101    }
102
103    // Set the MBB2IdxMap entry for this MBB.
104    MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
105      ? std::make_pair(StartIdx, StartIdx)  // Empty MBB
106      : std::make_pair(StartIdx, MIIndex - 1);
107    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
108  }
109  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
110
111  if (!OldI2MI.empty())
112    for (iterator I = begin(), E = end(); I != E; ++I)
113      for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
114           LI != LE; ++LI) {
115        unsigned offset = LI->start % InstrSlots::NUM;
116        LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
117
118        if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
119          // FIXME: Not correct when the instruction at LI->end has
120          // been removed
121          offset = LI->end % InstrSlots::NUM;
122          LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
123        } else {
124          LI->end = i2miMap_.size() * InstrSlots::NUM;
125        }
126
127        VNInfo* vni = LI->valno;
128        offset = vni->def % InstrSlots::NUM;
129        vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
130
131        for (size_t i = 0; i < vni->kills.size(); ++i) {
132          offset = vni->kills[i] % InstrSlots::NUM;
133          vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
134                          offset;
135        }
136      }
137}
138
139/// runOnMachineFunction - Register allocate the whole function
140///
141bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
142  mf_ = &fn;
143  mri_ = &mf_->getRegInfo();
144  tm_ = &fn.getTarget();
145  tri_ = tm_->getRegisterInfo();
146  tii_ = tm_->getInstrInfo();
147  lv_ = &getAnalysis<LiveVariables>();
148  allocatableRegs_ = tri_->getAllocatableSet(fn);
149
150  computeNumbering();
151  computeIntervals();
152
153  numIntervals += getNumIntervals();
154
155  DOUT << "********** INTERVALS **********\n";
156  for (iterator I = begin(), E = end(); I != E; ++I) {
157    I->second.print(DOUT, tri_);
158    DOUT << "\n";
159  }
160
161  numIntervalsAfter += getNumIntervals();
162  DEBUG(dump());
163  return true;
164}
165
166/// print - Implement the dump method.
167void LiveIntervals::print(std::ostream &O, const Module* ) const {
168  O << "********** INTERVALS **********\n";
169  for (const_iterator I = begin(), E = end(); I != E; ++I) {
170    I->second.print(DOUT, tri_);
171    DOUT << "\n";
172  }
173
174  O << "********** MACHINEINSTRS **********\n";
175  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
176       mbbi != mbbe; ++mbbi) {
177    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
178    for (MachineBasicBlock::iterator mii = mbbi->begin(),
179           mie = mbbi->end(); mii != mie; ++mii) {
180      O << getInstructionIndex(mii) << '\t' << *mii;
181    }
182  }
183}
184
185/// conflictsWithPhysRegDef - Returns true if the specified register
186/// is defined during the duration of the specified interval.
187bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
188                                            VirtRegMap &vrm, unsigned reg) {
189  for (LiveInterval::Ranges::const_iterator
190         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
191    for (unsigned index = getBaseIndex(I->start),
192           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
193         index += InstrSlots::NUM) {
194      // skip deleted instructions
195      while (index != end && !getInstructionFromIndex(index))
196        index += InstrSlots::NUM;
197      if (index == end) break;
198
199      MachineInstr *MI = getInstructionFromIndex(index);
200      unsigned SrcReg, DstReg;
201      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
202        if (SrcReg == li.reg || DstReg == li.reg)
203          continue;
204      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
205        MachineOperand& mop = MI->getOperand(i);
206        if (!mop.isRegister())
207          continue;
208        unsigned PhysReg = mop.getReg();
209        if (PhysReg == 0 || PhysReg == li.reg)
210          continue;
211        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
212          if (!vrm.hasPhys(PhysReg))
213            continue;
214          PhysReg = vrm.getPhys(PhysReg);
215        }
216        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217          return true;
218      }
219    }
220  }
221
222  return false;
223}
224
225void LiveIntervals::printRegName(unsigned reg) const {
226  if (TargetRegisterInfo::isPhysicalRegister(reg))
227    cerr << tri_->getName(reg);
228  else
229    cerr << "%reg" << reg;
230}
231
232void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
233                                             MachineBasicBlock::iterator mi,
234                                             unsigned MIIdx,
235                                             LiveInterval &interval) {
236  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
237  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
238
239  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
240    DOUT << "is a implicit_def\n";
241    return;
242  }
243
244  // Virtual registers may be defined multiple times (due to phi
245  // elimination and 2-addr elimination).  Much of what we do only has to be
246  // done once for the vreg.  We use an empty interval to detect the first
247  // time we see a vreg.
248  if (interval.empty()) {
249    // Get the Idx of the defining instructions.
250    unsigned defIndex = getDefIndex(MIIdx);
251    VNInfo *ValNo;
252    MachineInstr *CopyMI = NULL;
253    unsigned SrcReg, DstReg;
254    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
255        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
256        tii_->isMoveInstr(*mi, SrcReg, DstReg))
257      CopyMI = mi;
258    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
259
260    assert(ValNo->id == 0 && "First value in interval is not 0?");
261
262    // Loop over all of the blocks that the vreg is defined in.  There are
263    // two cases we have to handle here.  The most common case is a vreg
264    // whose lifetime is contained within a basic block.  In this case there
265    // will be a single kill, in MBB, which comes after the definition.
266    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
267      // FIXME: what about dead vars?
268      unsigned killIdx;
269      if (vi.Kills[0] != mi)
270        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
271      else
272        killIdx = defIndex+1;
273
274      // If the kill happens after the definition, we have an intra-block
275      // live range.
276      if (killIdx > defIndex) {
277        assert(vi.AliveBlocks.none() &&
278               "Shouldn't be alive across any blocks!");
279        LiveRange LR(defIndex, killIdx, ValNo);
280        interval.addRange(LR);
281        DOUT << " +" << LR << "\n";
282        interval.addKill(ValNo, killIdx);
283        return;
284      }
285    }
286
287    // The other case we handle is when a virtual register lives to the end
288    // of the defining block, potentially live across some blocks, then is
289    // live into some number of blocks, but gets killed.  Start by adding a
290    // range that goes from this definition to the end of the defining block.
291    LiveRange NewLR(defIndex,
292                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
293                    ValNo);
294    DOUT << " +" << NewLR;
295    interval.addRange(NewLR);
296
297    // Iterate over all of the blocks that the variable is completely
298    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
299    // live interval.
300    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
301      if (vi.AliveBlocks[i]) {
302        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
303        if (!MBB->empty()) {
304          LiveRange LR(getMBBStartIdx(i),
305                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
306                       ValNo);
307          interval.addRange(LR);
308          DOUT << " +" << LR;
309        }
310      }
311    }
312
313    // Finally, this virtual register is live from the start of any killing
314    // block to the 'use' slot of the killing instruction.
315    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
316      MachineInstr *Kill = vi.Kills[i];
317      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
318      LiveRange LR(getMBBStartIdx(Kill->getParent()),
319                   killIdx, ValNo);
320      interval.addRange(LR);
321      interval.addKill(ValNo, killIdx);
322      DOUT << " +" << LR;
323    }
324
325  } else {
326    // If this is the second time we see a virtual register definition, it
327    // must be due to phi elimination or two addr elimination.  If this is
328    // the result of two address elimination, then the vreg is one of the
329    // def-and-use register operand.
330    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
331      // If this is a two-address definition, then we have already processed
332      // the live range.  The only problem is that we didn't realize there
333      // are actually two values in the live interval.  Because of this we
334      // need to take the LiveRegion that defines this register and split it
335      // into two values.
336      assert(interval.containsOneValue());
337      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
338      unsigned RedefIndex = getDefIndex(MIIdx);
339
340      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
341      VNInfo *OldValNo = OldLR->valno;
342
343      // Delete the initial value, which should be short and continuous,
344      // because the 2-addr copy must be in the same MBB as the redef.
345      interval.removeRange(DefIndex, RedefIndex);
346
347      // Two-address vregs should always only be redefined once.  This means
348      // that at this point, there should be exactly one value number in it.
349      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
350
351      // The new value number (#1) is defined by the instruction we claimed
352      // defined value #0.
353      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
354                                            VNInfoAllocator);
355
356      // Value#0 is now defined by the 2-addr instruction.
357      OldValNo->def  = RedefIndex;
358      OldValNo->copy = 0;
359
360      // Add the new live interval which replaces the range for the input copy.
361      LiveRange LR(DefIndex, RedefIndex, ValNo);
362      DOUT << " replace range with " << LR;
363      interval.addRange(LR);
364      interval.addKill(ValNo, RedefIndex);
365
366      // If this redefinition is dead, we need to add a dummy unit live
367      // range covering the def slot.
368      if (mi->registerDefIsDead(interval.reg, tri_))
369        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
370
371      DOUT << " RESULT: ";
372      interval.print(DOUT, tri_);
373
374    } else {
375      // Otherwise, this must be because of phi elimination.  If this is the
376      // first redefinition of the vreg that we have seen, go back and change
377      // the live range in the PHI block to be a different value number.
378      if (interval.containsOneValue()) {
379        assert(vi.Kills.size() == 1 &&
380               "PHI elimination vreg should have one kill, the PHI itself!");
381
382        // Remove the old range that we now know has an incorrect number.
383        VNInfo *VNI = interval.getValNumInfo(0);
384        MachineInstr *Killer = vi.Kills[0];
385        unsigned Start = getMBBStartIdx(Killer->getParent());
386        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
387        DOUT << " Removing [" << Start << "," << End << "] from: ";
388        interval.print(DOUT, tri_); DOUT << "\n";
389        interval.removeRange(Start, End);
390        VNI->hasPHIKill = true;
391        DOUT << " RESULT: "; interval.print(DOUT, tri_);
392
393        // Replace the interval with one of a NEW value number.  Note that this
394        // value number isn't actually defined by an instruction, weird huh? :)
395        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
396        DOUT << " replace range with " << LR;
397        interval.addRange(LR);
398        interval.addKill(LR.valno, End);
399        DOUT << " RESULT: "; interval.print(DOUT, tri_);
400      }
401
402      // In the case of PHI elimination, each variable definition is only
403      // live until the end of the block.  We've already taken care of the
404      // rest of the live range.
405      unsigned defIndex = getDefIndex(MIIdx);
406
407      VNInfo *ValNo;
408      MachineInstr *CopyMI = NULL;
409      unsigned SrcReg, DstReg;
410      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
411          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
412          tii_->isMoveInstr(*mi, SrcReg, DstReg))
413        CopyMI = mi;
414      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
415
416      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
417      LiveRange LR(defIndex, killIndex, ValNo);
418      interval.addRange(LR);
419      interval.addKill(ValNo, killIndex);
420      ValNo->hasPHIKill = true;
421      DOUT << " +" << LR;
422    }
423  }
424
425  DOUT << '\n';
426}
427
428void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
429                                              MachineBasicBlock::iterator mi,
430                                              unsigned MIIdx,
431                                              LiveInterval &interval,
432                                              MachineInstr *CopyMI) {
433  // A physical register cannot be live across basic block, so its
434  // lifetime must end somewhere in its defining basic block.
435  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
436
437  unsigned baseIndex = MIIdx;
438  unsigned start = getDefIndex(baseIndex);
439  unsigned end = start;
440
441  // If it is not used after definition, it is considered dead at
442  // the instruction defining it. Hence its interval is:
443  // [defSlot(def), defSlot(def)+1)
444  if (mi->registerDefIsDead(interval.reg, tri_)) {
445    DOUT << " dead";
446    end = getDefIndex(start) + 1;
447    goto exit;
448  }
449
450  // If it is not dead on definition, it must be killed by a
451  // subsequent instruction. Hence its interval is:
452  // [defSlot(def), useSlot(kill)+1)
453  while (++mi != MBB->end()) {
454    baseIndex += InstrSlots::NUM;
455    if (mi->killsRegister(interval.reg, tri_)) {
456      DOUT << " killed";
457      end = getUseIndex(baseIndex) + 1;
458      goto exit;
459    } else if (mi->modifiesRegister(interval.reg, tri_)) {
460      // Another instruction redefines the register before it is ever read.
461      // Then the register is essentially dead at the instruction that defines
462      // it. Hence its interval is:
463      // [defSlot(def), defSlot(def)+1)
464      DOUT << " dead";
465      end = getDefIndex(start) + 1;
466      goto exit;
467    }
468  }
469
470  // The only case we should have a dead physreg here without a killing or
471  // instruction where we know it's dead is if it is live-in to the function
472  // and never used.
473  assert(!CopyMI && "physreg was not killed in defining block!");
474  end = getDefIndex(start) + 1;  // It's dead.
475
476exit:
477  assert(start < end && "did not find end of interval?");
478
479  // Already exists? Extend old live interval.
480  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
481  VNInfo *ValNo = (OldLR != interval.end())
482    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
483  LiveRange LR(start, end, ValNo);
484  interval.addRange(LR);
485  interval.addKill(LR.valno, end);
486  DOUT << " +" << LR << '\n';
487}
488
489void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
490                                      MachineBasicBlock::iterator MI,
491                                      unsigned MIIdx,
492                                      unsigned reg) {
493  if (TargetRegisterInfo::isVirtualRegister(reg))
494    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
495  else if (allocatableRegs_[reg]) {
496    MachineInstr *CopyMI = NULL;
497    unsigned SrcReg, DstReg;
498    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
499        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
500        tii_->isMoveInstr(*MI, SrcReg, DstReg))
501      CopyMI = MI;
502    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
503    // Def of a register also defines its sub-registers.
504    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
505      // If MI also modifies the sub-register explicitly, avoid processing it
506      // more than once. Do not pass in TRI here so it checks for exact match.
507      if (!MI->modifiesRegister(*AS))
508        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
509  }
510}
511
512void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
513                                         unsigned MIIdx,
514                                         LiveInterval &interval, bool isAlias) {
515  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
516
517  // Look for kills, if it reaches a def before it's killed, then it shouldn't
518  // be considered a livein.
519  MachineBasicBlock::iterator mi = MBB->begin();
520  unsigned baseIndex = MIIdx;
521  unsigned start = baseIndex;
522  unsigned end = start;
523  while (mi != MBB->end()) {
524    if (mi->killsRegister(interval.reg, tri_)) {
525      DOUT << " killed";
526      end = getUseIndex(baseIndex) + 1;
527      goto exit;
528    } else if (mi->modifiesRegister(interval.reg, tri_)) {
529      // Another instruction redefines the register before it is ever read.
530      // Then the register is essentially dead at the instruction that defines
531      // it. Hence its interval is:
532      // [defSlot(def), defSlot(def)+1)
533      DOUT << " dead";
534      end = getDefIndex(start) + 1;
535      goto exit;
536    }
537
538    baseIndex += InstrSlots::NUM;
539    ++mi;
540  }
541
542exit:
543  // Live-in register might not be used at all.
544  if (end == MIIdx) {
545    if (isAlias) {
546      DOUT << " dead";
547      end = getDefIndex(MIIdx) + 1;
548    } else {
549      DOUT << " live through";
550      end = baseIndex;
551    }
552  }
553
554  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
555  interval.addRange(LR);
556  interval.addKill(LR.valno, end);
557  DOUT << " +" << LR << '\n';
558}
559
560/// computeIntervals - computes the live intervals for virtual
561/// registers. for some ordering of the machine instructions [1,N] a
562/// live interval is an interval [i, j) where 1 <= i <= j < N for
563/// which a variable is live
564void LiveIntervals::computeIntervals() {
565  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
566       << "********** Function: "
567       << ((Value*)mf_->getFunction())->getName() << '\n';
568  // Track the index of the current machine instr.
569  unsigned MIIndex = 0;
570  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
571       MBBI != E; ++MBBI) {
572    MachineBasicBlock *MBB = MBBI;
573    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
574
575    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
576
577    // Create intervals for live-ins to this BB first.
578    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
579           LE = MBB->livein_end(); LI != LE; ++LI) {
580      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
581      // Multiple live-ins can alias the same register.
582      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
583        if (!hasInterval(*AS))
584          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
585                               true);
586    }
587
588    for (; MI != miEnd; ++MI) {
589      DOUT << MIIndex << "\t" << *MI;
590
591      // Handle defs.
592      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
593        MachineOperand &MO = MI->getOperand(i);
594        // handle register defs - build intervals
595        if (MO.isRegister() && MO.getReg() && MO.isDef())
596          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
597      }
598
599      MIIndex += InstrSlots::NUM;
600    }
601  }
602}
603
604bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
605                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
606  std::vector<IdxMBBPair>::const_iterator I =
607    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
608
609  bool ResVal = false;
610  while (I != Idx2MBBMap.end()) {
611    if (LR.end <= I->first)
612      break;
613    MBBs.push_back(I->second);
614    ResVal = true;
615    ++I;
616  }
617  return ResVal;
618}
619
620
621LiveInterval LiveIntervals::createInterval(unsigned reg) {
622  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
623                       HUGE_VALF : 0.0F;
624  return LiveInterval(reg, Weight);
625}
626
627/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
628/// copy field and returns the source register that defines it.
629unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
630  if (!VNI->copy)
631    return 0;
632
633  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
634    return VNI->copy->getOperand(1).getReg();
635  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
636    return VNI->copy->getOperand(2).getReg();
637  unsigned SrcReg, DstReg;
638  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
639    return SrcReg;
640  assert(0 && "Unrecognized copy instruction!");
641  return 0;
642}
643
644//===----------------------------------------------------------------------===//
645// Register allocator hooks.
646//
647
648/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
649/// allow one) virtual register operand, then its uses are implicitly using
650/// the register. Returns the virtual register.
651unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
652                                            MachineInstr *MI) const {
653  unsigned RegOp = 0;
654  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
655    MachineOperand &MO = MI->getOperand(i);
656    if (!MO.isRegister() || !MO.isUse())
657      continue;
658    unsigned Reg = MO.getReg();
659    if (Reg == 0 || Reg == li.reg)
660      continue;
661    // FIXME: For now, only remat MI with at most one register operand.
662    assert(!RegOp &&
663           "Can't rematerialize instruction with multiple register operand!");
664    RegOp = MO.getReg();
665    break;
666  }
667  return RegOp;
668}
669
670/// isValNoAvailableAt - Return true if the val# of the specified interval
671/// which reaches the given instruction also reaches the specified use index.
672bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
673                                       unsigned UseIdx) const {
674  unsigned Index = getInstructionIndex(MI);
675  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
676  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
677  return UI != li.end() && UI->valno == ValNo;
678}
679
680/// isReMaterializable - Returns true if the definition MI of the specified
681/// val# of the specified interval is re-materializable.
682bool LiveIntervals::isReMaterializable(const LiveInterval &li,
683                                       const VNInfo *ValNo, MachineInstr *MI,
684                                       bool &isLoad) {
685  if (DisableReMat)
686    return false;
687
688  isLoad = false;
689  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
690    return true;
691
692  int FrameIdx = 0;
693  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
694      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
695    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
696    // this but remember this is not safe to fold into a two-address
697    // instruction.
698    // This is a load from fixed stack slot. It can be rematerialized.
699    return true;
700
701  if (tii_->isTriviallyReMaterializable(MI)) {
702    const TargetInstrDesc &TID = MI->getDesc();
703    isLoad = TID.isSimpleLoad();
704
705    unsigned ImpUse = getReMatImplicitUse(li, MI);
706    if (ImpUse) {
707      const LiveInterval &ImpLi = getInterval(ImpUse);
708      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
709             re = mri_->use_end(); ri != re; ++ri) {
710        MachineInstr *UseMI = &*ri;
711        unsigned UseIdx = getInstructionIndex(UseMI);
712        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
713          continue;
714        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
715          return false;
716      }
717    }
718    return true;
719  }
720
721  return false;
722}
723
724/// isReMaterializable - Returns true if every definition of MI of every
725/// val# of the specified interval is re-materializable.
726bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
727  isLoad = false;
728  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
729       i != e; ++i) {
730    const VNInfo *VNI = *i;
731    unsigned DefIdx = VNI->def;
732    if (DefIdx == ~1U)
733      continue; // Dead val#.
734    // Is the def for the val# rematerializable?
735    if (DefIdx == ~0u)
736      return false;
737    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
738    bool DefIsLoad = false;
739    if (!ReMatDefMI ||
740        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
741      return false;
742    isLoad |= DefIsLoad;
743  }
744  return true;
745}
746
747/// FilterFoldedOps - Filter out two-address use operands. Return
748/// true if it finds any issue with the operands that ought to prevent
749/// folding.
750static bool FilterFoldedOps(MachineInstr *MI,
751                            SmallVector<unsigned, 2> &Ops,
752                            unsigned &MRInfo,
753                            SmallVector<unsigned, 2> &FoldOps) {
754  const TargetInstrDesc &TID = MI->getDesc();
755
756  MRInfo = 0;
757  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
758    unsigned OpIdx = Ops[i];
759    MachineOperand &MO = MI->getOperand(OpIdx);
760    // FIXME: fold subreg use.
761    if (MO.getSubReg())
762      return true;
763    if (MO.isDef())
764      MRInfo |= (unsigned)VirtRegMap::isMod;
765    else {
766      // Filter out two-address use operand(s).
767      if (!MO.isImplicit() &&
768          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
769        MRInfo = VirtRegMap::isModRef;
770        continue;
771      }
772      MRInfo |= (unsigned)VirtRegMap::isRef;
773    }
774    FoldOps.push_back(OpIdx);
775  }
776  return false;
777}
778
779
780/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
781/// slot / to reg or any rematerialized load into ith operand of specified
782/// MI. If it is successul, MI is updated with the newly created MI and
783/// returns true.
784bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
785                                         VirtRegMap &vrm, MachineInstr *DefMI,
786                                         unsigned InstrIdx,
787                                         SmallVector<unsigned, 2> &Ops,
788                                         bool isSS, int Slot, unsigned Reg) {
789  // If it is an implicit def instruction, just delete it.
790  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
791    RemoveMachineInstrFromMaps(MI);
792    vrm.RemoveMachineInstrFromMaps(MI);
793    MI->eraseFromParent();
794    ++numFolds;
795    return true;
796  }
797
798  // Filter the list of operand indexes that are to be folded. Abort if
799  // any operand will prevent folding.
800  unsigned MRInfo = 0;
801  SmallVector<unsigned, 2> FoldOps;
802  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
803    return false;
804
805  // The only time it's safe to fold into a two address instruction is when
806  // it's folding reload and spill from / into a spill stack slot.
807  if (DefMI && (MRInfo & VirtRegMap::isMod))
808    return false;
809
810  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
811                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
812  if (fmi) {
813    // Remember this instruction uses the spill slot.
814    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
815
816    // Attempt to fold the memory reference into the instruction. If
817    // we can do this, we don't need to insert spill code.
818    if (lv_)
819      lv_->instructionChanged(MI, fmi);
820    else
821      fmi->copyKillDeadInfo(MI, tri_);
822    MachineBasicBlock &MBB = *MI->getParent();
823    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
824      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
825    vrm.transferSpillPts(MI, fmi);
826    vrm.transferRestorePts(MI, fmi);
827    vrm.transferEmergencySpills(MI, fmi);
828    mi2iMap_.erase(MI);
829    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
830    mi2iMap_[fmi] = InstrIdx;
831    MI = MBB.insert(MBB.erase(MI), fmi);
832    ++numFolds;
833    return true;
834  }
835  return false;
836}
837
838/// canFoldMemoryOperand - Returns true if the specified load / store
839/// folding is possible.
840bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
841                                         SmallVector<unsigned, 2> &Ops,
842                                         bool ReMat) const {
843  // Filter the list of operand indexes that are to be folded. Abort if
844  // any operand will prevent folding.
845  unsigned MRInfo = 0;
846  SmallVector<unsigned, 2> FoldOps;
847  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
848    return false;
849
850  // It's only legal to remat for a use, not a def.
851  if (ReMat && (MRInfo & VirtRegMap::isMod))
852    return false;
853
854  return tii_->canFoldMemoryOperand(MI, FoldOps);
855}
856
857bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
858  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
859  for (LiveInterval::Ranges::const_iterator
860         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
861    std::vector<IdxMBBPair>::const_iterator II =
862      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
863    if (II == Idx2MBBMap.end())
864      continue;
865    if (I->end > II->first)  // crossing a MBB.
866      return false;
867    MBBs.insert(II->second);
868    if (MBBs.size() > 1)
869      return false;
870  }
871  return true;
872}
873
874/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
875/// interval on to-be re-materialized operands of MI) with new register.
876void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
877                                       MachineInstr *MI, unsigned NewVReg,
878                                       VirtRegMap &vrm) {
879  // There is an implicit use. That means one of the other operand is
880  // being remat'ed and the remat'ed instruction has li.reg as an
881  // use operand. Make sure we rewrite that as well.
882  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
883    MachineOperand &MO = MI->getOperand(i);
884    if (!MO.isRegister())
885      continue;
886    unsigned Reg = MO.getReg();
887    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
888      continue;
889    if (!vrm.isReMaterialized(Reg))
890      continue;
891    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
892    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
893    if (UseMO)
894      UseMO->setReg(NewVReg);
895  }
896}
897
898/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
899/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
900bool LiveIntervals::
901rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
902                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
903                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
904                 unsigned Slot, int LdSlot,
905                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
906                 VirtRegMap &vrm,
907                 const TargetRegisterClass* rc,
908                 SmallVector<int, 4> &ReMatIds,
909                 const MachineLoopInfo *loopInfo,
910                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
911                 std::map<unsigned,unsigned> &MBBVRegsMap,
912                 std::vector<LiveInterval*> &NewLIs) {
913  bool CanFold = false;
914 RestartInstruction:
915  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
916    MachineOperand& mop = MI->getOperand(i);
917    if (!mop.isRegister())
918      continue;
919    unsigned Reg = mop.getReg();
920    unsigned RegI = Reg;
921    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
922      continue;
923    if (Reg != li.reg)
924      continue;
925
926    bool TryFold = !DefIsReMat;
927    bool FoldSS = true; // Default behavior unless it's a remat.
928    int FoldSlot = Slot;
929    if (DefIsReMat) {
930      // If this is the rematerializable definition MI itself and
931      // all of its uses are rematerialized, simply delete it.
932      if (MI == ReMatOrigDefMI && CanDelete) {
933        DOUT << "\t\t\t\tErasing re-materlizable def: ";
934        DOUT << MI << '\n';
935        RemoveMachineInstrFromMaps(MI);
936        vrm.RemoveMachineInstrFromMaps(MI);
937        MI->eraseFromParent();
938        break;
939      }
940
941      // If def for this use can't be rematerialized, then try folding.
942      // If def is rematerializable and it's a load, also try folding.
943      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
944      if (isLoad) {
945        // Try fold loads (from stack slot, constant pool, etc.) into uses.
946        FoldSS = isLoadSS;
947        FoldSlot = LdSlot;
948      }
949    }
950
951    // Scan all of the operands of this instruction rewriting operands
952    // to use NewVReg instead of li.reg as appropriate.  We do this for
953    // two reasons:
954    //
955    //   1. If the instr reads the same spilled vreg multiple times, we
956    //      want to reuse the NewVReg.
957    //   2. If the instr is a two-addr instruction, we are required to
958    //      keep the src/dst regs pinned.
959    //
960    // Keep track of whether we replace a use and/or def so that we can
961    // create the spill interval with the appropriate range.
962
963    HasUse = mop.isUse();
964    HasDef = mop.isDef();
965    SmallVector<unsigned, 2> Ops;
966    Ops.push_back(i);
967    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
968      const MachineOperand &MOj = MI->getOperand(j);
969      if (!MOj.isRegister())
970        continue;
971      unsigned RegJ = MOj.getReg();
972      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
973        continue;
974      if (RegJ == RegI) {
975        Ops.push_back(j);
976        HasUse |= MOj.isUse();
977        HasDef |= MOj.isDef();
978      }
979    }
980
981    if (TryFold) {
982      // Do not fold load / store here if we are splitting. We'll find an
983      // optimal point to insert a load / store later.
984      if (!TrySplit) {
985        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
986                                 Ops, FoldSS, FoldSlot, Reg)) {
987          // Folding the load/store can completely change the instruction in
988          // unpredictable ways, rescan it from the beginning.
989          HasUse = false;
990          HasDef = false;
991          CanFold = false;
992          if (isRemoved(MI))
993            break;
994          goto RestartInstruction;
995        }
996      } else {
997        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
998      }
999    } else
1000      CanFold = false;
1001
1002    // Create a new virtual register for the spill interval.
1003    bool CreatedNewVReg = false;
1004    if (NewVReg == 0) {
1005      NewVReg = mri_->createVirtualRegister(rc);
1006      vrm.grow();
1007      CreatedNewVReg = true;
1008    }
1009    mop.setReg(NewVReg);
1010    if (mop.isImplicit())
1011      rewriteImplicitOps(li, MI, NewVReg, vrm);
1012
1013    // Reuse NewVReg for other reads.
1014    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1015      MachineOperand &mopj = MI->getOperand(Ops[j]);
1016      mopj.setReg(NewVReg);
1017      if (mopj.isImplicit())
1018        rewriteImplicitOps(li, MI, NewVReg, vrm);
1019    }
1020
1021    if (CreatedNewVReg) {
1022      if (DefIsReMat) {
1023        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1024        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1025          // Each valnum may have its own remat id.
1026          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1027        } else {
1028          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1029        }
1030        if (!CanDelete || (HasUse && HasDef)) {
1031          // If this is a two-addr instruction then its use operands are
1032          // rematerializable but its def is not. It should be assigned a
1033          // stack slot.
1034          vrm.assignVirt2StackSlot(NewVReg, Slot);
1035        }
1036      } else {
1037        vrm.assignVirt2StackSlot(NewVReg, Slot);
1038      }
1039    } else if (HasUse && HasDef &&
1040               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1041      // If this interval hasn't been assigned a stack slot (because earlier
1042      // def is a deleted remat def), do it now.
1043      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1044      vrm.assignVirt2StackSlot(NewVReg, Slot);
1045    }
1046
1047    // Re-matting an instruction with virtual register use. Add the
1048    // register as an implicit use on the use MI.
1049    if (DefIsReMat && ImpUse)
1050      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1051
1052    // create a new register interval for this spill / remat.
1053    LiveInterval &nI = getOrCreateInterval(NewVReg);
1054    if (CreatedNewVReg) {
1055      NewLIs.push_back(&nI);
1056      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1057      if (TrySplit)
1058        vrm.setIsSplitFromReg(NewVReg, li.reg);
1059    }
1060
1061    if (HasUse) {
1062      if (CreatedNewVReg) {
1063        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1064                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1065        DOUT << " +" << LR;
1066        nI.addRange(LR);
1067      } else {
1068        // Extend the split live interval to this def / use.
1069        unsigned End = getUseIndex(index)+1;
1070        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1071                     nI.getValNumInfo(nI.getNumValNums()-1));
1072        DOUT << " +" << LR;
1073        nI.addRange(LR);
1074      }
1075    }
1076    if (HasDef) {
1077      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1078                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1079      DOUT << " +" << LR;
1080      nI.addRange(LR);
1081    }
1082
1083    DOUT << "\t\t\t\tAdded new interval: ";
1084    nI.print(DOUT, tri_);
1085    DOUT << '\n';
1086  }
1087  return CanFold;
1088}
1089bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1090                                   const VNInfo *VNI,
1091                                   MachineBasicBlock *MBB, unsigned Idx) const {
1092  unsigned End = getMBBEndIdx(MBB);
1093  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1094    unsigned KillIdx = VNI->kills[j];
1095    if (KillIdx > Idx && KillIdx < End)
1096      return true;
1097  }
1098  return false;
1099}
1100
1101static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1102  const VNInfo *VNI = NULL;
1103  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1104         e = li.vni_end(); i != e; ++i)
1105    if ((*i)->def == DefIdx) {
1106      VNI = *i;
1107      break;
1108    }
1109  return VNI;
1110}
1111
1112/// RewriteInfo - Keep track of machine instrs that will be rewritten
1113/// during spilling.
1114namespace {
1115  struct RewriteInfo {
1116    unsigned Index;
1117    MachineInstr *MI;
1118    bool HasUse;
1119    bool HasDef;
1120    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1121      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1122  };
1123
1124  struct RewriteInfoCompare {
1125    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1126      return LHS.Index < RHS.Index;
1127    }
1128  };
1129}
1130
1131void LiveIntervals::
1132rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1133                    LiveInterval::Ranges::const_iterator &I,
1134                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1135                    unsigned Slot, int LdSlot,
1136                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1137                    VirtRegMap &vrm,
1138                    const TargetRegisterClass* rc,
1139                    SmallVector<int, 4> &ReMatIds,
1140                    const MachineLoopInfo *loopInfo,
1141                    BitVector &SpillMBBs,
1142                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1143                    BitVector &RestoreMBBs,
1144                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1145                    std::map<unsigned,unsigned> &MBBVRegsMap,
1146                    std::vector<LiveInterval*> &NewLIs) {
1147  bool AllCanFold = true;
1148  unsigned NewVReg = 0;
1149  unsigned start = getBaseIndex(I->start);
1150  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1151
1152  // First collect all the def / use in this live range that will be rewritten.
1153  // Make sure they are sorted according to instruction index.
1154  std::vector<RewriteInfo> RewriteMIs;
1155  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1156         re = mri_->reg_end(); ri != re; ) {
1157    MachineInstr *MI = &*ri;
1158    MachineOperand &O = ri.getOperand();
1159    ++ri;
1160    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1161    unsigned index = getInstructionIndex(MI);
1162    if (index < start || index >= end)
1163      continue;
1164    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1165  }
1166  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1167
1168  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1169  // Now rewrite the defs and uses.
1170  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1171    RewriteInfo &rwi = RewriteMIs[i];
1172    ++i;
1173    unsigned index = rwi.Index;
1174    bool MIHasUse = rwi.HasUse;
1175    bool MIHasDef = rwi.HasDef;
1176    MachineInstr *MI = rwi.MI;
1177    // If MI def and/or use the same register multiple times, then there
1178    // are multiple entries.
1179    unsigned NumUses = MIHasUse;
1180    while (i != e && RewriteMIs[i].MI == MI) {
1181      assert(RewriteMIs[i].Index == index);
1182      bool isUse = RewriteMIs[i].HasUse;
1183      if (isUse) ++NumUses;
1184      MIHasUse |= isUse;
1185      MIHasDef |= RewriteMIs[i].HasDef;
1186      ++i;
1187    }
1188    MachineBasicBlock *MBB = MI->getParent();
1189
1190    if (ImpUse && MI != ReMatDefMI) {
1191      // Re-matting an instruction with virtual register use. Update the
1192      // register interval's spill weight to HUGE_VALF to prevent it from
1193      // being spilled.
1194      LiveInterval &ImpLi = getInterval(ImpUse);
1195      ImpLi.weight = HUGE_VALF;
1196    }
1197
1198    unsigned MBBId = MBB->getNumber();
1199    unsigned ThisVReg = 0;
1200    if (TrySplit) {
1201      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1202      if (NVI != MBBVRegsMap.end()) {
1203        ThisVReg = NVI->second;
1204        // One common case:
1205        // x = use
1206        // ...
1207        // ...
1208        // def = ...
1209        //     = use
1210        // It's better to start a new interval to avoid artifically
1211        // extend the new interval.
1212        if (MIHasDef && !MIHasUse) {
1213          MBBVRegsMap.erase(MBB->getNumber());
1214          ThisVReg = 0;
1215        }
1216      }
1217    }
1218
1219    bool IsNew = ThisVReg == 0;
1220    if (IsNew) {
1221      // This ends the previous live interval. If all of its def / use
1222      // can be folded, give it a low spill weight.
1223      if (NewVReg && TrySplit && AllCanFold) {
1224        LiveInterval &nI = getOrCreateInterval(NewVReg);
1225        nI.weight /= 10.0F;
1226      }
1227      AllCanFold = true;
1228    }
1229    NewVReg = ThisVReg;
1230
1231    bool HasDef = false;
1232    bool HasUse = false;
1233    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1234                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1235                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1236                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1237                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1238    if (!HasDef && !HasUse)
1239      continue;
1240
1241    AllCanFold &= CanFold;
1242
1243    // Update weight of spill interval.
1244    LiveInterval &nI = getOrCreateInterval(NewVReg);
1245    if (!TrySplit) {
1246      // The spill weight is now infinity as it cannot be spilled again.
1247      nI.weight = HUGE_VALF;
1248      continue;
1249    }
1250
1251    // Keep track of the last def and first use in each MBB.
1252    if (HasDef) {
1253      if (MI != ReMatOrigDefMI || !CanDelete) {
1254        bool HasKill = false;
1255        if (!HasUse)
1256          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1257        else {
1258          // If this is a two-address code, then this index starts a new VNInfo.
1259          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1260          if (VNI)
1261            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1262        }
1263        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1264          SpillIdxes.find(MBBId);
1265        if (!HasKill) {
1266          if (SII == SpillIdxes.end()) {
1267            std::vector<SRInfo> S;
1268            S.push_back(SRInfo(index, NewVReg, true));
1269            SpillIdxes.insert(std::make_pair(MBBId, S));
1270          } else if (SII->second.back().vreg != NewVReg) {
1271            SII->second.push_back(SRInfo(index, NewVReg, true));
1272          } else if ((int)index > SII->second.back().index) {
1273            // If there is an earlier def and this is a two-address
1274            // instruction, then it's not possible to fold the store (which
1275            // would also fold the load).
1276            SRInfo &Info = SII->second.back();
1277            Info.index = index;
1278            Info.canFold = !HasUse;
1279          }
1280          SpillMBBs.set(MBBId);
1281        } else if (SII != SpillIdxes.end() &&
1282                   SII->second.back().vreg == NewVReg &&
1283                   (int)index > SII->second.back().index) {
1284          // There is an earlier def that's not killed (must be two-address).
1285          // The spill is no longer needed.
1286          SII->second.pop_back();
1287          if (SII->second.empty()) {
1288            SpillIdxes.erase(MBBId);
1289            SpillMBBs.reset(MBBId);
1290          }
1291        }
1292      }
1293    }
1294
1295    if (HasUse) {
1296      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1297        SpillIdxes.find(MBBId);
1298      if (SII != SpillIdxes.end() &&
1299          SII->second.back().vreg == NewVReg &&
1300          (int)index > SII->second.back().index)
1301        // Use(s) following the last def, it's not safe to fold the spill.
1302        SII->second.back().canFold = false;
1303      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1304        RestoreIdxes.find(MBBId);
1305      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1306        // If we are splitting live intervals, only fold if it's the first
1307        // use and there isn't another use later in the MBB.
1308        RII->second.back().canFold = false;
1309      else if (IsNew) {
1310        // Only need a reload if there isn't an earlier def / use.
1311        if (RII == RestoreIdxes.end()) {
1312          std::vector<SRInfo> Infos;
1313          Infos.push_back(SRInfo(index, NewVReg, true));
1314          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1315        } else {
1316          RII->second.push_back(SRInfo(index, NewVReg, true));
1317        }
1318        RestoreMBBs.set(MBBId);
1319      }
1320    }
1321
1322    // Update spill weight.
1323    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1324    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1325  }
1326
1327  if (NewVReg && TrySplit && AllCanFold) {
1328    // If all of its def / use can be folded, give it a low spill weight.
1329    LiveInterval &nI = getOrCreateInterval(NewVReg);
1330    nI.weight /= 10.0F;
1331  }
1332}
1333
1334bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1335                        BitVector &RestoreMBBs,
1336                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1337  if (!RestoreMBBs[Id])
1338    return false;
1339  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1340  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1341    if (Restores[i].index == index &&
1342        Restores[i].vreg == vr &&
1343        Restores[i].canFold)
1344      return true;
1345  return false;
1346}
1347
1348void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1349                        BitVector &RestoreMBBs,
1350                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1351  if (!RestoreMBBs[Id])
1352    return;
1353  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1354  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1355    if (Restores[i].index == index && Restores[i].vreg)
1356      Restores[i].index = -1;
1357}
1358
1359/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1360/// spilled and create empty intervals for their uses.
1361void
1362LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1363                                    const TargetRegisterClass* rc,
1364                                    std::vector<LiveInterval*> &NewLIs) {
1365  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1366         re = mri_->reg_end(); ri != re; ) {
1367    MachineOperand &O = ri.getOperand();
1368    MachineInstr *MI = &*ri;
1369    ++ri;
1370    if (O.isDef()) {
1371      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1372             "Register def was not rewritten?");
1373      RemoveMachineInstrFromMaps(MI);
1374      vrm.RemoveMachineInstrFromMaps(MI);
1375      MI->eraseFromParent();
1376    } else {
1377      // This must be an use of an implicit_def so it's not part of the live
1378      // interval. Create a new empty live interval for it.
1379      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1380      unsigned NewVReg = mri_->createVirtualRegister(rc);
1381      vrm.grow();
1382      vrm.setIsImplicitlyDefined(NewVReg);
1383      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1384      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1385        MachineOperand &MO = MI->getOperand(i);
1386        if (MO.isReg() && MO.getReg() == li.reg)
1387          MO.setReg(NewVReg);
1388      }
1389    }
1390  }
1391}
1392
1393
1394std::vector<LiveInterval*> LiveIntervals::
1395addIntervalsForSpills(const LiveInterval &li,
1396                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1397  // Since this is called after the analysis is done we don't know if
1398  // LiveVariables is available
1399  lv_ = getAnalysisToUpdate<LiveVariables>();
1400
1401  assert(li.weight != HUGE_VALF &&
1402         "attempt to spill already spilled interval!");
1403
1404  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1405  li.print(DOUT, tri_);
1406  DOUT << '\n';
1407
1408  // Each bit specify whether it a spill is required in the MBB.
1409  BitVector SpillMBBs(mf_->getNumBlockIDs());
1410  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1411  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1412  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1413  std::map<unsigned,unsigned> MBBVRegsMap;
1414  std::vector<LiveInterval*> NewLIs;
1415  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1416
1417  unsigned NumValNums = li.getNumValNums();
1418  SmallVector<MachineInstr*, 4> ReMatDefs;
1419  ReMatDefs.resize(NumValNums, NULL);
1420  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1421  ReMatOrigDefs.resize(NumValNums, NULL);
1422  SmallVector<int, 4> ReMatIds;
1423  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1424  BitVector ReMatDelete(NumValNums);
1425  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1426
1427  // Spilling a split live interval. It cannot be split any further. Also,
1428  // it's also guaranteed to be a single val# / range interval.
1429  if (vrm.getPreSplitReg(li.reg)) {
1430    vrm.setIsSplitFromReg(li.reg, 0);
1431    // Unset the split kill marker on the last use.
1432    unsigned KillIdx = vrm.getKillPoint(li.reg);
1433    if (KillIdx) {
1434      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1435      assert(KillMI && "Last use disappeared?");
1436      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1437      assert(KillOp != -1 && "Last use disappeared?");
1438      KillMI->getOperand(KillOp).setIsKill(false);
1439    }
1440    vrm.removeKillPoint(li.reg);
1441    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1442    Slot = vrm.getStackSlot(li.reg);
1443    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1444    MachineInstr *ReMatDefMI = DefIsReMat ?
1445      vrm.getReMaterializedMI(li.reg) : NULL;
1446    int LdSlot = 0;
1447    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1448    bool isLoad = isLoadSS ||
1449      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1450    bool IsFirstRange = true;
1451    for (LiveInterval::Ranges::const_iterator
1452           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1453      // If this is a split live interval with multiple ranges, it means there
1454      // are two-address instructions that re-defined the value. Only the
1455      // first def can be rematerialized!
1456      if (IsFirstRange) {
1457        // Note ReMatOrigDefMI has already been deleted.
1458        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1459                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1460                             false, vrm, rc, ReMatIds, loopInfo,
1461                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1462                             MBBVRegsMap, NewLIs);
1463      } else {
1464        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1465                             Slot, 0, false, false, false,
1466                             false, vrm, rc, ReMatIds, loopInfo,
1467                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1468                             MBBVRegsMap, NewLIs);
1469      }
1470      IsFirstRange = false;
1471    }
1472
1473    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1474    return NewLIs;
1475  }
1476
1477  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1478  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1479    TrySplit = false;
1480  if (TrySplit)
1481    ++numSplits;
1482  bool NeedStackSlot = false;
1483  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1484       i != e; ++i) {
1485    const VNInfo *VNI = *i;
1486    unsigned VN = VNI->id;
1487    unsigned DefIdx = VNI->def;
1488    if (DefIdx == ~1U)
1489      continue; // Dead val#.
1490    // Is the def for the val# rematerializable?
1491    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1492      ? 0 : getInstructionFromIndex(DefIdx);
1493    bool dummy;
1494    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1495      // Remember how to remat the def of this val#.
1496      ReMatOrigDefs[VN] = ReMatDefMI;
1497      // Original def may be modified so we have to make a copy here. vrm must
1498      // delete these!
1499      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1500
1501      bool CanDelete = true;
1502      if (VNI->hasPHIKill) {
1503        // A kill is a phi node, not all of its uses can be rematerialized.
1504        // It must not be deleted.
1505        CanDelete = false;
1506        // Need a stack slot if there is any live range where uses cannot be
1507        // rematerialized.
1508        NeedStackSlot = true;
1509      }
1510      if (CanDelete)
1511        ReMatDelete.set(VN);
1512    } else {
1513      // Need a stack slot if there is any live range where uses cannot be
1514      // rematerialized.
1515      NeedStackSlot = true;
1516    }
1517  }
1518
1519  // One stack slot per live interval.
1520  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1521    Slot = vrm.assignVirt2StackSlot(li.reg);
1522
1523  // Create new intervals and rewrite defs and uses.
1524  for (LiveInterval::Ranges::const_iterator
1525         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1526    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1527    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1528    bool DefIsReMat = ReMatDefMI != NULL;
1529    bool CanDelete = ReMatDelete[I->valno->id];
1530    int LdSlot = 0;
1531    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1532    bool isLoad = isLoadSS ||
1533      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1534    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1535                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1536                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1537                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1538                               MBBVRegsMap, NewLIs);
1539  }
1540
1541  // Insert spills / restores if we are splitting.
1542  if (!TrySplit) {
1543    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1544    return NewLIs;
1545  }
1546
1547  SmallPtrSet<LiveInterval*, 4> AddedKill;
1548  SmallVector<unsigned, 2> Ops;
1549  if (NeedStackSlot) {
1550    int Id = SpillMBBs.find_first();
1551    while (Id != -1) {
1552      std::vector<SRInfo> &spills = SpillIdxes[Id];
1553      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1554        int index = spills[i].index;
1555        unsigned VReg = spills[i].vreg;
1556        LiveInterval &nI = getOrCreateInterval(VReg);
1557        bool isReMat = vrm.isReMaterialized(VReg);
1558        MachineInstr *MI = getInstructionFromIndex(index);
1559        bool CanFold = false;
1560        bool FoundUse = false;
1561        Ops.clear();
1562        if (spills[i].canFold) {
1563          CanFold = true;
1564          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1565            MachineOperand &MO = MI->getOperand(j);
1566            if (!MO.isRegister() || MO.getReg() != VReg)
1567              continue;
1568
1569            Ops.push_back(j);
1570            if (MO.isDef())
1571              continue;
1572            if (isReMat ||
1573                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1574                                                RestoreMBBs, RestoreIdxes))) {
1575              // MI has two-address uses of the same register. If the use
1576              // isn't the first and only use in the BB, then we can't fold
1577              // it. FIXME: Move this to rewriteInstructionsForSpills.
1578              CanFold = false;
1579              break;
1580            }
1581            FoundUse = true;
1582          }
1583        }
1584        // Fold the store into the def if possible.
1585        bool Folded = false;
1586        if (CanFold && !Ops.empty()) {
1587          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1588            Folded = true;
1589            if (FoundUse > 0) {
1590              // Also folded uses, do not issue a load.
1591              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1592              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1593            }
1594            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1595          }
1596        }
1597
1598        // Otherwise tell the spiller to issue a spill.
1599        if (!Folded) {
1600          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1601          bool isKill = LR->end == getStoreIndex(index);
1602          if (!MI->registerDefIsDead(nI.reg))
1603            // No need to spill a dead def.
1604            vrm.addSpillPoint(VReg, isKill, MI);
1605          if (isKill)
1606            AddedKill.insert(&nI);
1607        }
1608      }
1609      Id = SpillMBBs.find_next(Id);
1610    }
1611  }
1612
1613  int Id = RestoreMBBs.find_first();
1614  while (Id != -1) {
1615    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1616    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1617      int index = restores[i].index;
1618      if (index == -1)
1619        continue;
1620      unsigned VReg = restores[i].vreg;
1621      LiveInterval &nI = getOrCreateInterval(VReg);
1622      MachineInstr *MI = getInstructionFromIndex(index);
1623      bool CanFold = false;
1624      Ops.clear();
1625      if (restores[i].canFold) {
1626        CanFold = true;
1627        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1628          MachineOperand &MO = MI->getOperand(j);
1629          if (!MO.isRegister() || MO.getReg() != VReg)
1630            continue;
1631
1632          if (MO.isDef()) {
1633            // If this restore were to be folded, it would have been folded
1634            // already.
1635            CanFold = false;
1636            break;
1637          }
1638          Ops.push_back(j);
1639        }
1640      }
1641
1642      // Fold the load into the use if possible.
1643      bool Folded = false;
1644      if (CanFold && !Ops.empty()) {
1645        if (!vrm.isReMaterialized(VReg))
1646          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1647        else {
1648          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1649          int LdSlot = 0;
1650          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1651          // If the rematerializable def is a load, also try to fold it.
1652          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1653            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1654                                          Ops, isLoadSS, LdSlot, VReg);
1655          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1656          if (ImpUse) {
1657            // Re-matting an instruction with virtual register use. Add the
1658            // register as an implicit use on the use MI and update the register
1659            // interval's spill weight to HUGE_VALF to prevent it from being
1660            // spilled.
1661            LiveInterval &ImpLi = getInterval(ImpUse);
1662            ImpLi.weight = HUGE_VALF;
1663            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1664          }
1665        }
1666      }
1667      // If folding is not possible / failed, then tell the spiller to issue a
1668      // load / rematerialization for us.
1669      if (Folded)
1670        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1671      else
1672        vrm.addRestorePoint(VReg, MI);
1673    }
1674    Id = RestoreMBBs.find_next(Id);
1675  }
1676
1677  // Finalize intervals: add kills, finalize spill weights, and filter out
1678  // dead intervals.
1679  std::vector<LiveInterval*> RetNewLIs;
1680  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1681    LiveInterval *LI = NewLIs[i];
1682    if (!LI->empty()) {
1683      LI->weight /= LI->getSize();
1684      if (!AddedKill.count(LI)) {
1685        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1686        unsigned LastUseIdx = getBaseIndex(LR->end);
1687        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1688        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1689        assert(UseIdx != -1);
1690        if (LastUse->getOperand(UseIdx).isImplicit() ||
1691            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1692          LastUse->getOperand(UseIdx).setIsKill();
1693          vrm.addKillPoint(LI->reg, LastUseIdx);
1694        }
1695      }
1696      RetNewLIs.push_back(LI);
1697    }
1698  }
1699
1700  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1701  return RetNewLIs;
1702}
1703
1704/// hasAllocatableSuperReg - Return true if the specified physical register has
1705/// any super register that's allocatable.
1706bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1707  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1708    if (allocatableRegs_[*AS] && hasInterval(*AS))
1709      return true;
1710  return false;
1711}
1712
1713/// getRepresentativeReg - Find the largest super register of the specified
1714/// physical register.
1715unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1716  // Find the largest super-register that is allocatable.
1717  unsigned BestReg = Reg;
1718  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1719    unsigned SuperReg = *AS;
1720    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1721      BestReg = SuperReg;
1722      break;
1723    }
1724  }
1725  return BestReg;
1726}
1727
1728/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1729/// specified interval that conflicts with the specified physical register.
1730unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1731                                                   unsigned PhysReg) const {
1732  unsigned NumConflicts = 0;
1733  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1734  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1735         E = mri_->reg_end(); I != E; ++I) {
1736    MachineOperand &O = I.getOperand();
1737    MachineInstr *MI = O.getParent();
1738    unsigned Index = getInstructionIndex(MI);
1739    if (pli.liveAt(Index))
1740      ++NumConflicts;
1741  }
1742  return NumConflicts;
1743}
1744
1745/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1746/// around all defs and uses of the specified interval.
1747void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1748                                            unsigned PhysReg, VirtRegMap &vrm) {
1749  unsigned SpillReg = getRepresentativeReg(PhysReg);
1750
1751  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1752    // If there are registers which alias PhysReg, but which are not a
1753    // sub-register of the chosen representative super register. Assert
1754    // since we can't handle it yet.
1755    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1756           tri_->isSuperRegister(*AS, SpillReg));
1757
1758  LiveInterval &pli = getInterval(SpillReg);
1759  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1760  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1761         E = mri_->reg_end(); I != E; ++I) {
1762    MachineOperand &O = I.getOperand();
1763    MachineInstr *MI = O.getParent();
1764    if (SeenMIs.count(MI))
1765      continue;
1766    SeenMIs.insert(MI);
1767    unsigned Index = getInstructionIndex(MI);
1768    if (pli.liveAt(Index)) {
1769      vrm.addEmergencySpill(SpillReg, MI);
1770      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1771      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1772        if (!hasInterval(*AS))
1773          continue;
1774        LiveInterval &spli = getInterval(*AS);
1775        if (spli.liveAt(Index))
1776          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1777      }
1778    }
1779  }
1780}
1781