LiveIntervalAnalysis.cpp revision 70f2f65aacdbc96fe158b2860b5f0bad075ee548
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/STLExtras.h"
38#include <algorithm>
39#include <cmath>
40using namespace llvm;
41
42// Hidden options for help debugging.
43static cl::opt<bool> DisableReMat("disable-rematerialization",
44                                  cl::init(false), cl::Hidden);
45
46static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47                               cl::init(true), cl::Hidden);
48static cl::opt<int> SplitLimit("split-limit",
49                               cl::init(-1), cl::Hidden);
50
51static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals, "Number of original intervals");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70
71  if (!StrongPHIElim) {
72    AU.addPreservedID(PHIEliminationID);
73    AU.addRequiredID(PHIEliminationID);
74  }
75
76  AU.addRequiredID(TwoAddressInstructionPassID);
77  MachineFunctionPass::getAnalysisUsage(AU);
78}
79
80void LiveIntervals::releaseMemory() {
81  // Free the live intervals themselves.
82  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83       E = r2iMap_.end(); I != E; ++I)
84    delete I->second;
85
86  MBB2IdxMap.clear();
87  Idx2MBBMap.clear();
88  mi2iMap_.clear();
89  i2miMap_.clear();
90  r2iMap_.clear();
91  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92  VNInfoAllocator.Reset();
93  while (!ClonedMIs.empty()) {
94    MachineInstr *MI = ClonedMIs.back();
95    ClonedMIs.pop_back();
96    mf_->DeleteMachineInstr(MI);
97  }
98}
99
100void LiveIntervals::computeNumbering() {
101  Index2MiMap OldI2MI = i2miMap_;
102  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
103
104  Idx2MBBMap.clear();
105  MBB2IdxMap.clear();
106  mi2iMap_.clear();
107  i2miMap_.clear();
108
109  FunctionSize = 0;
110
111  // Number MachineInstrs and MachineBasicBlocks.
112  // Initialize MBB indexes to a sentinal.
113  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
114
115  unsigned MIIndex = 0;
116  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
117       MBB != E; ++MBB) {
118    unsigned StartIdx = MIIndex;
119
120    // Insert an empty slot at the beginning of each block.
121    MIIndex += InstrSlots::NUM;
122    i2miMap_.push_back(0);
123
124    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125         I != E; ++I) {
126      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127      assert(inserted && "multiple MachineInstr -> index mappings");
128      inserted = true;
129      i2miMap_.push_back(I);
130      MIIndex += InstrSlots::NUM;
131      FunctionSize++;
132
133      // Insert max(1, numdefs) empty slots after every instruction.
134      unsigned Slots = I->getDesc().getNumDefs();
135      if (Slots == 0)
136        Slots = 1;
137      MIIndex += InstrSlots::NUM * Slots;
138      while (Slots--)
139        i2miMap_.push_back(0);
140    }
141
142    // Set the MBB2IdxMap entry for this MBB.
143    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
145  }
146  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
147
148  if (!OldI2MI.empty())
149    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150      for (LiveInterval::iterator LI = OI->second->begin(),
151           LE = OI->second->end(); LI != LE; ++LI) {
152
153        // Remap the start index of the live range to the corresponding new
154        // number, or our best guess at what it _should_ correspond to if the
155        // original instruction has been erased.  This is either the following
156        // instruction or its predecessor.
157        unsigned index = LI->start / InstrSlots::NUM;
158        unsigned offset = LI->start % InstrSlots::NUM;
159        if (offset == InstrSlots::LOAD) {
160          std::vector<IdxMBBPair>::const_iterator I =
161                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162          // Take the pair containing the index
163          std::vector<IdxMBBPair>::const_iterator J =
164                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165
166          LI->start = getMBBStartIdx(J->second);
167        } else {
168          LI->start = mi2iMap_[OldI2MI[index]] + offset;
169        }
170
171        // Remap the ending index in the same way that we remapped the start,
172        // except for the final step where we always map to the immediately
173        // following instruction.
174        index = (LI->end - 1) / InstrSlots::NUM;
175        offset  = LI->end % InstrSlots::NUM;
176        if (offset == InstrSlots::LOAD) {
177          // VReg dies at end of block.
178          std::vector<IdxMBBPair>::const_iterator I =
179                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180          --I;
181
182          LI->end = getMBBEndIdx(I->second) + 1;
183        } else {
184          unsigned idx = index;
185          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
186
187          if (index != OldI2MI.size())
188            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189          else
190            LI->end = InstrSlots::NUM * i2miMap_.size();
191        }
192      }
193
194      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196        VNInfo* vni = *VNI;
197
198        // Remap the VNInfo def index, which works the same as the
199        // start indices above. VN's with special sentinel defs
200        // don't need to be remapped.
201        if (vni->def != ~0U && vni->def != ~1U) {
202          unsigned index = vni->def / InstrSlots::NUM;
203          unsigned offset = vni->def % InstrSlots::NUM;
204          if (offset == InstrSlots::LOAD) {
205            std::vector<IdxMBBPair>::const_iterator I =
206                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207            // Take the pair containing the index
208            std::vector<IdxMBBPair>::const_iterator J =
209                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
210
211            vni->def = getMBBStartIdx(J->second);
212          } else {
213            vni->def = mi2iMap_[OldI2MI[index]] + offset;
214          }
215        }
216
217        // Remap the VNInfo kill indices, which works the same as
218        // the end indices above.
219        for (size_t i = 0; i < vni->kills.size(); ++i) {
220          // PHI kills don't need to be remapped.
221          if (!vni->kills[i]) continue;
222
223          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224          unsigned offset = vni->kills[i] % InstrSlots::NUM;
225          if (offset == InstrSlots::LOAD) {
226            std::vector<IdxMBBPair>::const_iterator I =
227             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228            --I;
229
230            vni->kills[i] = getMBBEndIdx(I->second);
231          } else {
232            unsigned idx = index;
233            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
234
235            if (index != OldI2MI.size())
236              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237                              (idx == index ? offset : 0);
238            else
239              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
240          }
241        }
242      }
243    }
244}
245
246/// runOnMachineFunction - Register allocate the whole function
247///
248bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249  mf_ = &fn;
250  mri_ = &mf_->getRegInfo();
251  tm_ = &fn.getTarget();
252  tri_ = tm_->getRegisterInfo();
253  tii_ = tm_->getInstrInfo();
254  aa_ = &getAnalysis<AliasAnalysis>();
255  lv_ = &getAnalysis<LiveVariables>();
256  allocatableRegs_ = tri_->getAllocatableSet(fn);
257
258  computeNumbering();
259  computeIntervals();
260
261  numIntervals += getNumIntervals();
262
263  DEBUG(dump());
264  return true;
265}
266
267/// print - Implement the dump method.
268void LiveIntervals::print(std::ostream &O, const Module* ) const {
269  O << "********** INTERVALS **********\n";
270  for (const_iterator I = begin(), E = end(); I != E; ++I) {
271    I->second->print(O, tri_);
272    O << "\n";
273  }
274
275  O << "********** MACHINEINSTRS **********\n";
276  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277       mbbi != mbbe; ++mbbi) {
278    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279    for (MachineBasicBlock::iterator mii = mbbi->begin(),
280           mie = mbbi->end(); mii != mie; ++mii) {
281      O << getInstructionIndex(mii) << '\t' << *mii;
282    }
283  }
284}
285
286/// conflictsWithPhysRegDef - Returns true if the specified register
287/// is defined during the duration of the specified interval.
288bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289                                            VirtRegMap &vrm, unsigned reg) {
290  for (LiveInterval::Ranges::const_iterator
291         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292    for (unsigned index = getBaseIndex(I->start),
293           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294         index += InstrSlots::NUM) {
295      // skip deleted instructions
296      while (index != end && !getInstructionFromIndex(index))
297        index += InstrSlots::NUM;
298      if (index == end) break;
299
300      MachineInstr *MI = getInstructionFromIndex(index);
301      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303        if (SrcReg == li.reg || DstReg == li.reg)
304          continue;
305      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306        MachineOperand& mop = MI->getOperand(i);
307        if (!mop.isReg())
308          continue;
309        unsigned PhysReg = mop.getReg();
310        if (PhysReg == 0 || PhysReg == li.reg)
311          continue;
312        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313          if (!vrm.hasPhys(PhysReg))
314            continue;
315          PhysReg = vrm.getPhys(PhysReg);
316        }
317        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
318          return true;
319      }
320    }
321  }
322
323  return false;
324}
325
326/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327/// it can check use as well.
328bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329                                            unsigned Reg, bool CheckUse,
330                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331  for (LiveInterval::Ranges::const_iterator
332         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333    for (unsigned index = getBaseIndex(I->start),
334           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335         index += InstrSlots::NUM) {
336      // Skip deleted instructions.
337      MachineInstr *MI = 0;
338      while (index != end) {
339        MI = getInstructionFromIndex(index);
340        if (MI)
341          break;
342        index += InstrSlots::NUM;
343      }
344      if (index == end) break;
345
346      if (JoinedCopies.count(MI))
347        continue;
348      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349        MachineOperand& MO = MI->getOperand(i);
350        if (!MO.isReg())
351          continue;
352        if (MO.isUse() && !CheckUse)
353          continue;
354        unsigned PhysReg = MO.getReg();
355        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
356          continue;
357        if (tri_->isSubRegister(Reg, PhysReg))
358          return true;
359      }
360    }
361  }
362
363  return false;
364}
365
366
367void LiveIntervals::printRegName(unsigned reg) const {
368  if (TargetRegisterInfo::isPhysicalRegister(reg))
369    cerr << tri_->getName(reg);
370  else
371    cerr << "%reg" << reg;
372}
373
374void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375                                             MachineBasicBlock::iterator mi,
376                                             unsigned MIIdx, MachineOperand& MO,
377                                             unsigned MOIdx,
378                                             LiveInterval &interval) {
379  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
381
382  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383    DOUT << "is a implicit_def\n";
384    return;
385  }
386
387  // Virtual registers may be defined multiple times (due to phi
388  // elimination and 2-addr elimination).  Much of what we do only has to be
389  // done once for the vreg.  We use an empty interval to detect the first
390  // time we see a vreg.
391  if (interval.empty()) {
392    // Get the Idx of the defining instructions.
393    unsigned defIndex = getDefIndex(MIIdx);
394    // Earlyclobbers move back one.
395    if (MO.isEarlyClobber())
396      defIndex = getUseIndex(MIIdx);
397    VNInfo *ValNo;
398    MachineInstr *CopyMI = NULL;
399    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
403        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404      CopyMI = mi;
405    // Earlyclobbers move back one.
406    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407
408    assert(ValNo->id == 0 && "First value in interval is not 0?");
409
410    // Loop over all of the blocks that the vreg is defined in.  There are
411    // two cases we have to handle here.  The most common case is a vreg
412    // whose lifetime is contained within a basic block.  In this case there
413    // will be a single kill, in MBB, which comes after the definition.
414    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
415      // FIXME: what about dead vars?
416      unsigned killIdx;
417      if (vi.Kills[0] != mi)
418        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419      else
420        killIdx = defIndex+1;
421
422      // If the kill happens after the definition, we have an intra-block
423      // live range.
424      if (killIdx > defIndex) {
425        assert(vi.AliveBlocks.none() &&
426               "Shouldn't be alive across any blocks!");
427        LiveRange LR(defIndex, killIdx, ValNo);
428        interval.addRange(LR);
429        DOUT << " +" << LR << "\n";
430        interval.addKill(ValNo, killIdx);
431        return;
432      }
433    }
434
435    // The other case we handle is when a virtual register lives to the end
436    // of the defining block, potentially live across some blocks, then is
437    // live into some number of blocks, but gets killed.  Start by adding a
438    // range that goes from this definition to the end of the defining block.
439    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
440    DOUT << " +" << NewLR;
441    interval.addRange(NewLR);
442
443    // Iterate over all of the blocks that the variable is completely
444    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445    // live interval.
446    for (int i = vi.AliveBlocks.find_first(); i != -1;
447         i = vi.AliveBlocks.find_next(i)) {
448      LiveRange LR(getMBBStartIdx(i),
449                   getMBBEndIdx(i)+1,  // MBB ends at -1.
450                   ValNo);
451      interval.addRange(LR);
452      DOUT << " +" << LR;
453    }
454
455    // Finally, this virtual register is live from the start of any killing
456    // block to the 'use' slot of the killing instruction.
457    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
458      MachineInstr *Kill = vi.Kills[i];
459      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
460      LiveRange LR(getMBBStartIdx(Kill->getParent()),
461                   killIdx, ValNo);
462      interval.addRange(LR);
463      interval.addKill(ValNo, killIdx);
464      DOUT << " +" << LR;
465    }
466
467  } else {
468    // If this is the second time we see a virtual register definition, it
469    // must be due to phi elimination or two addr elimination.  If this is
470    // the result of two address elimination, then the vreg is one of the
471    // def-and-use register operand.
472    if (mi->isRegTiedToUseOperand(MOIdx)) {
473      // If this is a two-address definition, then we have already processed
474      // the live range.  The only problem is that we didn't realize there
475      // are actually two values in the live interval.  Because of this we
476      // need to take the LiveRegion that defines this register and split it
477      // into two values.
478      assert(interval.containsOneValue());
479      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
480      unsigned RedefIndex = getDefIndex(MIIdx);
481      if (MO.isEarlyClobber())
482        RedefIndex = getUseIndex(MIIdx);
483
484      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
485      VNInfo *OldValNo = OldLR->valno;
486
487      // Delete the initial value, which should be short and continuous,
488      // because the 2-addr copy must be in the same MBB as the redef.
489      interval.removeRange(DefIndex, RedefIndex);
490
491      // Two-address vregs should always only be redefined once.  This means
492      // that at this point, there should be exactly one value number in it.
493      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494
495      // The new value number (#1) is defined by the instruction we claimed
496      // defined value #0.
497      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
498                                            VNInfoAllocator);
499
500      // Value#0 is now defined by the 2-addr instruction.
501      OldValNo->def  = RedefIndex;
502      OldValNo->copy = 0;
503      if (MO.isEarlyClobber())
504        OldValNo->redefByEC = true;
505
506      // Add the new live interval which replaces the range for the input copy.
507      LiveRange LR(DefIndex, RedefIndex, ValNo);
508      DOUT << " replace range with " << LR;
509      interval.addRange(LR);
510      interval.addKill(ValNo, RedefIndex);
511
512      // If this redefinition is dead, we need to add a dummy unit live
513      // range covering the def slot.
514      if (MO.isDead())
515        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
516
517      DOUT << " RESULT: ";
518      interval.print(DOUT, tri_);
519
520    } else {
521      // Otherwise, this must be because of phi elimination.  If this is the
522      // first redefinition of the vreg that we have seen, go back and change
523      // the live range in the PHI block to be a different value number.
524      if (interval.containsOneValue()) {
525        assert(vi.Kills.size() == 1 &&
526               "PHI elimination vreg should have one kill, the PHI itself!");
527
528        // Remove the old range that we now know has an incorrect number.
529        VNInfo *VNI = interval.getValNumInfo(0);
530        MachineInstr *Killer = vi.Kills[0];
531        unsigned Start = getMBBStartIdx(Killer->getParent());
532        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
533        DOUT << " Removing [" << Start << "," << End << "] from: ";
534        interval.print(DOUT, tri_); DOUT << "\n";
535        interval.removeRange(Start, End);
536        VNI->hasPHIKill = true;
537        DOUT << " RESULT: "; interval.print(DOUT, tri_);
538
539        // Replace the interval with one of a NEW value number.  Note that this
540        // value number isn't actually defined by an instruction, weird huh? :)
541        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
542        DOUT << " replace range with " << LR;
543        interval.addRange(LR);
544        interval.addKill(LR.valno, End);
545        DOUT << " RESULT: "; interval.print(DOUT, tri_);
546      }
547
548      // In the case of PHI elimination, each variable definition is only
549      // live until the end of the block.  We've already taken care of the
550      // rest of the live range.
551      unsigned defIndex = getDefIndex(MIIdx);
552      if (MO.isEarlyClobber())
553        defIndex = getUseIndex(MIIdx);
554
555      VNInfo *ValNo;
556      MachineInstr *CopyMI = NULL;
557      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
558      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
559          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
560          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
561          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
562        CopyMI = mi;
563      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
564
565      unsigned killIndex = getMBBEndIdx(mbb) + 1;
566      LiveRange LR(defIndex, killIndex, ValNo);
567      interval.addRange(LR);
568      interval.addKill(ValNo, killIndex);
569      ValNo->hasPHIKill = true;
570      DOUT << " +" << LR;
571    }
572  }
573
574  DOUT << '\n';
575}
576
577void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
578                                              MachineBasicBlock::iterator mi,
579                                              unsigned MIIdx,
580                                              MachineOperand& MO,
581                                              LiveInterval &interval,
582                                              MachineInstr *CopyMI) {
583  // A physical register cannot be live across basic block, so its
584  // lifetime must end somewhere in its defining basic block.
585  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
586
587  unsigned baseIndex = MIIdx;
588  unsigned start = getDefIndex(baseIndex);
589  // Earlyclobbers move back one.
590  if (MO.isEarlyClobber())
591    start = getUseIndex(MIIdx);
592  unsigned end = start;
593
594  // If it is not used after definition, it is considered dead at
595  // the instruction defining it. Hence its interval is:
596  // [defSlot(def), defSlot(def)+1)
597  if (MO.isDead()) {
598    DOUT << " dead";
599    end = start + 1;
600    goto exit;
601  }
602
603  // If it is not dead on definition, it must be killed by a
604  // subsequent instruction. Hence its interval is:
605  // [defSlot(def), useSlot(kill)+1)
606  baseIndex += InstrSlots::NUM;
607  while (++mi != MBB->end()) {
608    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
609           getInstructionFromIndex(baseIndex) == 0)
610      baseIndex += InstrSlots::NUM;
611    if (mi->killsRegister(interval.reg, tri_)) {
612      DOUT << " killed";
613      end = getUseIndex(baseIndex) + 1;
614      goto exit;
615    } else if (mi->modifiesRegister(interval.reg, tri_)) {
616      // Another instruction redefines the register before it is ever read.
617      // Then the register is essentially dead at the instruction that defines
618      // it. Hence its interval is:
619      // [defSlot(def), defSlot(def)+1)
620      DOUT << " dead";
621      end = start + 1;
622      goto exit;
623    }
624
625    baseIndex += InstrSlots::NUM;
626  }
627
628  // The only case we should have a dead physreg here without a killing or
629  // instruction where we know it's dead is if it is live-in to the function
630  // and never used.
631  assert(!CopyMI && "physreg was not killed in defining block!");
632  end = start + 1;
633
634exit:
635  assert(start < end && "did not find end of interval?");
636
637  // Already exists? Extend old live interval.
638  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
639  bool Extend = OldLR != interval.end();
640  VNInfo *ValNo = Extend
641    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
642  if (MO.isEarlyClobber() && Extend)
643    ValNo->redefByEC = true;
644  LiveRange LR(start, end, ValNo);
645  interval.addRange(LR);
646  interval.addKill(LR.valno, end);
647  DOUT << " +" << LR << '\n';
648}
649
650void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
651                                      MachineBasicBlock::iterator MI,
652                                      unsigned MIIdx,
653                                      MachineOperand& MO,
654                                      unsigned MOIdx) {
655  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
656    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
657                             getOrCreateInterval(MO.getReg()));
658  else if (allocatableRegs_[MO.getReg()]) {
659    MachineInstr *CopyMI = NULL;
660    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
661    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
662        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
663        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
664        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
665      CopyMI = MI;
666    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
667                              getOrCreateInterval(MO.getReg()), CopyMI);
668    // Def of a register also defines its sub-registers.
669    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
670      // If MI also modifies the sub-register explicitly, avoid processing it
671      // more than once. Do not pass in TRI here so it checks for exact match.
672      if (!MI->modifiesRegister(*AS))
673        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
674                                  getOrCreateInterval(*AS), 0);
675  }
676}
677
678void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
679                                         unsigned MIIdx,
680                                         LiveInterval &interval, bool isAlias) {
681  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
682
683  // Look for kills, if it reaches a def before it's killed, then it shouldn't
684  // be considered a livein.
685  MachineBasicBlock::iterator mi = MBB->begin();
686  unsigned baseIndex = MIIdx;
687  unsigned start = baseIndex;
688  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
689         getInstructionFromIndex(baseIndex) == 0)
690    baseIndex += InstrSlots::NUM;
691  unsigned end = baseIndex;
692  bool SeenDefUse = false;
693
694  while (mi != MBB->end()) {
695    if (mi->killsRegister(interval.reg, tri_)) {
696      DOUT << " killed";
697      end = getUseIndex(baseIndex) + 1;
698      SeenDefUse = true;
699      goto exit;
700    } else if (mi->modifiesRegister(interval.reg, tri_)) {
701      // Another instruction redefines the register before it is ever read.
702      // Then the register is essentially dead at the instruction that defines
703      // it. Hence its interval is:
704      // [defSlot(def), defSlot(def)+1)
705      DOUT << " dead";
706      end = getDefIndex(start) + 1;
707      SeenDefUse = true;
708      goto exit;
709    }
710
711    baseIndex += InstrSlots::NUM;
712    ++mi;
713    if (mi != MBB->end()) {
714      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
715             getInstructionFromIndex(baseIndex) == 0)
716        baseIndex += InstrSlots::NUM;
717    }
718  }
719
720exit:
721  // Live-in register might not be used at all.
722  if (!SeenDefUse) {
723    if (isAlias) {
724      DOUT << " dead";
725      end = getDefIndex(MIIdx) + 1;
726    } else {
727      DOUT << " live through";
728      end = baseIndex;
729    }
730  }
731
732  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
733  interval.addRange(LR);
734  interval.addKill(LR.valno, end);
735  DOUT << " +" << LR << '\n';
736}
737
738/// computeIntervals - computes the live intervals for virtual
739/// registers. for some ordering of the machine instructions [1,N] a
740/// live interval is an interval [i, j) where 1 <= i <= j < N for
741/// which a variable is live
742void LiveIntervals::computeIntervals() {
743
744  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
745       << "********** Function: "
746       << ((Value*)mf_->getFunction())->getName() << '\n';
747
748  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
749       MBBI != E; ++MBBI) {
750    MachineBasicBlock *MBB = MBBI;
751    // Track the index of the current machine instr.
752    unsigned MIIndex = getMBBStartIdx(MBB);
753    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
754
755    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
756
757    // Create intervals for live-ins to this BB first.
758    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
759           LE = MBB->livein_end(); LI != LE; ++LI) {
760      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
761      // Multiple live-ins can alias the same register.
762      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
763        if (!hasInterval(*AS))
764          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
765                               true);
766    }
767
768    // Skip over empty initial indices.
769    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
770           getInstructionFromIndex(MIIndex) == 0)
771      MIIndex += InstrSlots::NUM;
772
773    for (; MI != miEnd; ++MI) {
774      DOUT << MIIndex << "\t" << *MI;
775
776      // Handle defs.
777      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
778        MachineOperand &MO = MI->getOperand(i);
779        // handle register defs - build intervals
780        if (MO.isReg() && MO.getReg() && MO.isDef()) {
781          handleRegisterDef(MBB, MI, MIIndex, MO, i);
782        }
783      }
784
785      // Skip over the empty slots after each instruction.
786      unsigned Slots = MI->getDesc().getNumDefs();
787      if (Slots == 0)
788        Slots = 1;
789      MIIndex += InstrSlots::NUM * Slots;
790
791      // Skip over empty indices.
792      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
793             getInstructionFromIndex(MIIndex) == 0)
794        MIIndex += InstrSlots::NUM;
795    }
796  }
797}
798
799bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
800                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
801  std::vector<IdxMBBPair>::const_iterator I =
802    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
803
804  bool ResVal = false;
805  while (I != Idx2MBBMap.end()) {
806    if (I->first >= End)
807      break;
808    MBBs.push_back(I->second);
809    ResVal = true;
810    ++I;
811  }
812  return ResVal;
813}
814
815bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
816                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
817  std::vector<IdxMBBPair>::const_iterator I =
818    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
819
820  bool ResVal = false;
821  while (I != Idx2MBBMap.end()) {
822    if (I->first > End)
823      break;
824    MachineBasicBlock *MBB = I->second;
825    if (getMBBEndIdx(MBB) > End)
826      break;
827    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
828           SE = MBB->succ_end(); SI != SE; ++SI)
829      MBBs.push_back(*SI);
830    ResVal = true;
831    ++I;
832  }
833  return ResVal;
834}
835
836LiveInterval* LiveIntervals::createInterval(unsigned reg) {
837  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
838  return new LiveInterval(reg, Weight);
839}
840
841/// dupInterval - Duplicate a live interval. The caller is responsible for
842/// managing the allocated memory.
843LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
844  LiveInterval *NewLI = createInterval(li->reg);
845  NewLI->Copy(*li, getVNInfoAllocator());
846  return NewLI;
847}
848
849/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
850/// copy field and returns the source register that defines it.
851unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
852  if (!VNI->copy)
853    return 0;
854
855  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
856    // If it's extracting out of a physical register, return the sub-register.
857    unsigned Reg = VNI->copy->getOperand(1).getReg();
858    if (TargetRegisterInfo::isPhysicalRegister(Reg))
859      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
860    return Reg;
861  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
862             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
863    return VNI->copy->getOperand(2).getReg();
864
865  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
866  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
867    return SrcReg;
868  assert(0 && "Unrecognized copy instruction!");
869  return 0;
870}
871
872//===----------------------------------------------------------------------===//
873// Register allocator hooks.
874//
875
876/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
877/// allow one) virtual register operand, then its uses are implicitly using
878/// the register. Returns the virtual register.
879unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
880                                            MachineInstr *MI) const {
881  unsigned RegOp = 0;
882  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
883    MachineOperand &MO = MI->getOperand(i);
884    if (!MO.isReg() || !MO.isUse())
885      continue;
886    unsigned Reg = MO.getReg();
887    if (Reg == 0 || Reg == li.reg)
888      continue;
889    // FIXME: For now, only remat MI with at most one register operand.
890    assert(!RegOp &&
891           "Can't rematerialize instruction with multiple register operand!");
892    RegOp = MO.getReg();
893#ifndef NDEBUG
894    break;
895#endif
896  }
897  return RegOp;
898}
899
900/// isValNoAvailableAt - Return true if the val# of the specified interval
901/// which reaches the given instruction also reaches the specified use index.
902bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
903                                       unsigned UseIdx) const {
904  unsigned Index = getInstructionIndex(MI);
905  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
906  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
907  return UI != li.end() && UI->valno == ValNo;
908}
909
910/// isReMaterializable - Returns true if the definition MI of the specified
911/// val# of the specified interval is re-materializable.
912bool LiveIntervals::isReMaterializable(const LiveInterval &li,
913                                       const VNInfo *ValNo, MachineInstr *MI,
914                                       SmallVectorImpl<LiveInterval*> &SpillIs,
915                                       bool &isLoad) {
916  if (DisableReMat)
917    return false;
918
919  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
920    return true;
921
922  int FrameIdx = 0;
923  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
924      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
925    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
926    // this but remember this is not safe to fold into a two-address
927    // instruction.
928    // This is a load from fixed stack slot. It can be rematerialized.
929    return true;
930
931  // If the target-specific rules don't identify an instruction as
932  // being trivially rematerializable, use some target-independent
933  // rules.
934  if (!MI->getDesc().isRematerializable() ||
935      !tii_->isTriviallyReMaterializable(MI)) {
936    if (!EnableAggressiveRemat)
937      return false;
938
939    // If the instruction accesses memory but the memoperands have been lost,
940    // we can't analyze it.
941    const TargetInstrDesc &TID = MI->getDesc();
942    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
943      return false;
944
945    // Avoid instructions obviously unsafe for remat.
946    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
947      return false;
948
949    // If the instruction accesses memory and the memory could be non-constant,
950    // assume the instruction is not rematerializable.
951    for (std::list<MachineMemOperand>::const_iterator
952           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
953      const MachineMemOperand &MMO = *I;
954      if (MMO.isVolatile() || MMO.isStore())
955        return false;
956      const Value *V = MMO.getValue();
957      if (!V)
958        return false;
959      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
960        if (!PSV->isConstant(mf_->getFrameInfo()))
961          return false;
962      } else if (!aa_->pointsToConstantMemory(V))
963        return false;
964    }
965
966    // If any of the registers accessed are non-constant, conservatively assume
967    // the instruction is not rematerializable.
968    unsigned ImpUse = 0;
969    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
970      const MachineOperand &MO = MI->getOperand(i);
971      if (MO.isReg()) {
972        unsigned Reg = MO.getReg();
973        if (Reg == 0)
974          continue;
975        if (TargetRegisterInfo::isPhysicalRegister(Reg))
976          return false;
977
978        // Only allow one def, and that in the first operand.
979        if (MO.isDef() != (i == 0))
980          return false;
981
982        // Only allow constant-valued registers.
983        bool IsLiveIn = mri_->isLiveIn(Reg);
984        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
985                                          E = mri_->def_end();
986
987        // For the def, it should be the only def of that register.
988        if (MO.isDef() && (next(I) != E || IsLiveIn))
989          return false;
990
991        if (MO.isUse()) {
992          // Only allow one use other register use, as that's all the
993          // remat mechanisms support currently.
994          if (Reg != li.reg) {
995            if (ImpUse == 0)
996              ImpUse = Reg;
997            else if (Reg != ImpUse)
998              return false;
999          }
1000          // For the use, there should be only one associated def.
1001          if (I != E && (next(I) != E || IsLiveIn))
1002            return false;
1003        }
1004      }
1005    }
1006  }
1007
1008  unsigned ImpUse = getReMatImplicitUse(li, MI);
1009  if (ImpUse) {
1010    const LiveInterval &ImpLi = getInterval(ImpUse);
1011    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1012           re = mri_->use_end(); ri != re; ++ri) {
1013      MachineInstr *UseMI = &*ri;
1014      unsigned UseIdx = getInstructionIndex(UseMI);
1015      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1016        continue;
1017      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1018        return false;
1019    }
1020
1021    // If a register operand of the re-materialized instruction is going to
1022    // be spilled next, then it's not legal to re-materialize this instruction.
1023    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1024      if (ImpUse == SpillIs[i]->reg)
1025        return false;
1026  }
1027  return true;
1028}
1029
1030/// isReMaterializable - Returns true if the definition MI of the specified
1031/// val# of the specified interval is re-materializable.
1032bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1033                                       const VNInfo *ValNo, MachineInstr *MI) {
1034  SmallVector<LiveInterval*, 4> Dummy1;
1035  bool Dummy2;
1036  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1037}
1038
1039/// isReMaterializable - Returns true if every definition of MI of every
1040/// val# of the specified interval is re-materializable.
1041bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1042                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1043                                       bool &isLoad) {
1044  isLoad = false;
1045  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1046       i != e; ++i) {
1047    const VNInfo *VNI = *i;
1048    unsigned DefIdx = VNI->def;
1049    if (DefIdx == ~1U)
1050      continue; // Dead val#.
1051    // Is the def for the val# rematerializable?
1052    if (DefIdx == ~0u)
1053      return false;
1054    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1055    bool DefIsLoad = false;
1056    if (!ReMatDefMI ||
1057        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1058      return false;
1059    isLoad |= DefIsLoad;
1060  }
1061  return true;
1062}
1063
1064/// FilterFoldedOps - Filter out two-address use operands. Return
1065/// true if it finds any issue with the operands that ought to prevent
1066/// folding.
1067static bool FilterFoldedOps(MachineInstr *MI,
1068                            SmallVector<unsigned, 2> &Ops,
1069                            unsigned &MRInfo,
1070                            SmallVector<unsigned, 2> &FoldOps) {
1071  MRInfo = 0;
1072  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1073    unsigned OpIdx = Ops[i];
1074    MachineOperand &MO = MI->getOperand(OpIdx);
1075    // FIXME: fold subreg use.
1076    if (MO.getSubReg())
1077      return true;
1078    if (MO.isDef())
1079      MRInfo |= (unsigned)VirtRegMap::isMod;
1080    else {
1081      // Filter out two-address use operand(s).
1082      if (MI->isRegTiedToDefOperand(OpIdx)) {
1083        MRInfo = VirtRegMap::isModRef;
1084        continue;
1085      }
1086      MRInfo |= (unsigned)VirtRegMap::isRef;
1087    }
1088    FoldOps.push_back(OpIdx);
1089  }
1090  return false;
1091}
1092
1093
1094/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1095/// slot / to reg or any rematerialized load into ith operand of specified
1096/// MI. If it is successul, MI is updated with the newly created MI and
1097/// returns true.
1098bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1099                                         VirtRegMap &vrm, MachineInstr *DefMI,
1100                                         unsigned InstrIdx,
1101                                         SmallVector<unsigned, 2> &Ops,
1102                                         bool isSS, int Slot, unsigned Reg) {
1103  // If it is an implicit def instruction, just delete it.
1104  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1105    RemoveMachineInstrFromMaps(MI);
1106    vrm.RemoveMachineInstrFromMaps(MI);
1107    MI->eraseFromParent();
1108    ++numFolds;
1109    return true;
1110  }
1111
1112  // Filter the list of operand indexes that are to be folded. Abort if
1113  // any operand will prevent folding.
1114  unsigned MRInfo = 0;
1115  SmallVector<unsigned, 2> FoldOps;
1116  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1117    return false;
1118
1119  // The only time it's safe to fold into a two address instruction is when
1120  // it's folding reload and spill from / into a spill stack slot.
1121  if (DefMI && (MRInfo & VirtRegMap::isMod))
1122    return false;
1123
1124  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1125                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1126  if (fmi) {
1127    // Remember this instruction uses the spill slot.
1128    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1129
1130    // Attempt to fold the memory reference into the instruction. If
1131    // we can do this, we don't need to insert spill code.
1132    MachineBasicBlock &MBB = *MI->getParent();
1133    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1134      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1135    vrm.transferSpillPts(MI, fmi);
1136    vrm.transferRestorePts(MI, fmi);
1137    vrm.transferEmergencySpills(MI, fmi);
1138    mi2iMap_.erase(MI);
1139    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1140    mi2iMap_[fmi] = InstrIdx;
1141    MI = MBB.insert(MBB.erase(MI), fmi);
1142    ++numFolds;
1143    return true;
1144  }
1145  return false;
1146}
1147
1148/// canFoldMemoryOperand - Returns true if the specified load / store
1149/// folding is possible.
1150bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1151                                         SmallVector<unsigned, 2> &Ops,
1152                                         bool ReMat) const {
1153  // Filter the list of operand indexes that are to be folded. Abort if
1154  // any operand will prevent folding.
1155  unsigned MRInfo = 0;
1156  SmallVector<unsigned, 2> FoldOps;
1157  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1158    return false;
1159
1160  // It's only legal to remat for a use, not a def.
1161  if (ReMat && (MRInfo & VirtRegMap::isMod))
1162    return false;
1163
1164  return tii_->canFoldMemoryOperand(MI, FoldOps);
1165}
1166
1167bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1168  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1169  for (LiveInterval::Ranges::const_iterator
1170         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1171    std::vector<IdxMBBPair>::const_iterator II =
1172      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1173    if (II == Idx2MBBMap.end())
1174      continue;
1175    if (I->end > II->first)  // crossing a MBB.
1176      return false;
1177    MBBs.insert(II->second);
1178    if (MBBs.size() > 1)
1179      return false;
1180  }
1181  return true;
1182}
1183
1184/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1185/// interval on to-be re-materialized operands of MI) with new register.
1186void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1187                                       MachineInstr *MI, unsigned NewVReg,
1188                                       VirtRegMap &vrm) {
1189  // There is an implicit use. That means one of the other operand is
1190  // being remat'ed and the remat'ed instruction has li.reg as an
1191  // use operand. Make sure we rewrite that as well.
1192  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1193    MachineOperand &MO = MI->getOperand(i);
1194    if (!MO.isReg())
1195      continue;
1196    unsigned Reg = MO.getReg();
1197    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1198      continue;
1199    if (!vrm.isReMaterialized(Reg))
1200      continue;
1201    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1202    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1203    if (UseMO)
1204      UseMO->setReg(NewVReg);
1205  }
1206}
1207
1208/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1209/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1210bool LiveIntervals::
1211rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1212                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1213                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1214                 unsigned Slot, int LdSlot,
1215                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1216                 VirtRegMap &vrm,
1217                 const TargetRegisterClass* rc,
1218                 SmallVector<int, 4> &ReMatIds,
1219                 const MachineLoopInfo *loopInfo,
1220                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1221                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1222                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1223  MachineBasicBlock *MBB = MI->getParent();
1224  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1225  bool CanFold = false;
1226 RestartInstruction:
1227  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1228    MachineOperand& mop = MI->getOperand(i);
1229    if (!mop.isReg())
1230      continue;
1231    unsigned Reg = mop.getReg();
1232    unsigned RegI = Reg;
1233    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1234      continue;
1235    if (Reg != li.reg)
1236      continue;
1237
1238    bool TryFold = !DefIsReMat;
1239    bool FoldSS = true; // Default behavior unless it's a remat.
1240    int FoldSlot = Slot;
1241    if (DefIsReMat) {
1242      // If this is the rematerializable definition MI itself and
1243      // all of its uses are rematerialized, simply delete it.
1244      if (MI == ReMatOrigDefMI && CanDelete) {
1245        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1246        DOUT << MI << '\n';
1247        RemoveMachineInstrFromMaps(MI);
1248        vrm.RemoveMachineInstrFromMaps(MI);
1249        MI->eraseFromParent();
1250        break;
1251      }
1252
1253      // If def for this use can't be rematerialized, then try folding.
1254      // If def is rematerializable and it's a load, also try folding.
1255      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1256      if (isLoad) {
1257        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1258        FoldSS = isLoadSS;
1259        FoldSlot = LdSlot;
1260      }
1261    }
1262
1263    // Scan all of the operands of this instruction rewriting operands
1264    // to use NewVReg instead of li.reg as appropriate.  We do this for
1265    // two reasons:
1266    //
1267    //   1. If the instr reads the same spilled vreg multiple times, we
1268    //      want to reuse the NewVReg.
1269    //   2. If the instr is a two-addr instruction, we are required to
1270    //      keep the src/dst regs pinned.
1271    //
1272    // Keep track of whether we replace a use and/or def so that we can
1273    // create the spill interval with the appropriate range.
1274
1275    HasUse = mop.isUse();
1276    HasDef = mop.isDef();
1277    SmallVector<unsigned, 2> Ops;
1278    Ops.push_back(i);
1279    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1280      const MachineOperand &MOj = MI->getOperand(j);
1281      if (!MOj.isReg())
1282        continue;
1283      unsigned RegJ = MOj.getReg();
1284      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1285        continue;
1286      if (RegJ == RegI) {
1287        Ops.push_back(j);
1288        HasUse |= MOj.isUse();
1289        HasDef |= MOj.isDef();
1290      }
1291    }
1292
1293    if (HasUse && !li.liveAt(getUseIndex(index)))
1294      // Must be defined by an implicit def. It should not be spilled. Note,
1295      // this is for correctness reason. e.g.
1296      // 8   %reg1024<def> = IMPLICIT_DEF
1297      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1298      // The live range [12, 14) are not part of the r1024 live interval since
1299      // it's defined by an implicit def. It will not conflicts with live
1300      // interval of r1025. Now suppose both registers are spilled, you can
1301      // easily see a situation where both registers are reloaded before
1302      // the INSERT_SUBREG and both target registers that would overlap.
1303      HasUse = false;
1304
1305    // Update stack slot spill weight if we are splitting.
1306    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1307      if (!TrySplit)
1308      SSWeight += Weight;
1309
1310    // Create a new virtual register for the spill interval.
1311    // Create the new register now so we can map the fold instruction
1312    // to the new register so when it is unfolded we get the correct
1313    // answer.
1314    bool CreatedNewVReg = false;
1315    if (NewVReg == 0) {
1316      NewVReg = mri_->createVirtualRegister(rc);
1317      vrm.grow();
1318      CreatedNewVReg = true;
1319    }
1320
1321    if (!TryFold)
1322      CanFold = false;
1323    else {
1324      // Do not fold load / store here if we are splitting. We'll find an
1325      // optimal point to insert a load / store later.
1326      if (!TrySplit) {
1327        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1328                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1329          // Folding the load/store can completely change the instruction in
1330          // unpredictable ways, rescan it from the beginning.
1331
1332          if (FoldSS) {
1333            // We need to give the new vreg the same stack slot as the
1334            // spilled interval.
1335            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1336          }
1337
1338          HasUse = false;
1339          HasDef = false;
1340          CanFold = false;
1341          if (isRemoved(MI)) {
1342            SSWeight -= Weight;
1343            break;
1344          }
1345          goto RestartInstruction;
1346        }
1347      } else {
1348        // We'll try to fold it later if it's profitable.
1349        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1350      }
1351    }
1352
1353    mop.setReg(NewVReg);
1354    if (mop.isImplicit())
1355      rewriteImplicitOps(li, MI, NewVReg, vrm);
1356
1357    // Reuse NewVReg for other reads.
1358    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1359      MachineOperand &mopj = MI->getOperand(Ops[j]);
1360      mopj.setReg(NewVReg);
1361      if (mopj.isImplicit())
1362        rewriteImplicitOps(li, MI, NewVReg, vrm);
1363    }
1364
1365    if (CreatedNewVReg) {
1366      if (DefIsReMat) {
1367        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1368        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1369          // Each valnum may have its own remat id.
1370          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1371        } else {
1372          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1373        }
1374        if (!CanDelete || (HasUse && HasDef)) {
1375          // If this is a two-addr instruction then its use operands are
1376          // rematerializable but its def is not. It should be assigned a
1377          // stack slot.
1378          vrm.assignVirt2StackSlot(NewVReg, Slot);
1379        }
1380      } else {
1381        vrm.assignVirt2StackSlot(NewVReg, Slot);
1382      }
1383    } else if (HasUse && HasDef &&
1384               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1385      // If this interval hasn't been assigned a stack slot (because earlier
1386      // def is a deleted remat def), do it now.
1387      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1388      vrm.assignVirt2StackSlot(NewVReg, Slot);
1389    }
1390
1391    // Re-matting an instruction with virtual register use. Add the
1392    // register as an implicit use on the use MI.
1393    if (DefIsReMat && ImpUse)
1394      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1395
1396    // create a new register interval for this spill / remat.
1397    LiveInterval &nI = getOrCreateInterval(NewVReg);
1398    if (CreatedNewVReg) {
1399      NewLIs.push_back(&nI);
1400      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1401      if (TrySplit)
1402        vrm.setIsSplitFromReg(NewVReg, li.reg);
1403    }
1404
1405    if (HasUse) {
1406      if (CreatedNewVReg) {
1407        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1408                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1409        DOUT << " +" << LR;
1410        nI.addRange(LR);
1411      } else {
1412        // Extend the split live interval to this def / use.
1413        unsigned End = getUseIndex(index)+1;
1414        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1415                     nI.getValNumInfo(nI.getNumValNums()-1));
1416        DOUT << " +" << LR;
1417        nI.addRange(LR);
1418      }
1419    }
1420    if (HasDef) {
1421      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1422                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1423      DOUT << " +" << LR;
1424      nI.addRange(LR);
1425    }
1426
1427    DOUT << "\t\t\t\tAdded new interval: ";
1428    nI.print(DOUT, tri_);
1429    DOUT << '\n';
1430  }
1431  return CanFold;
1432}
1433bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1434                                   const VNInfo *VNI,
1435                                   MachineBasicBlock *MBB, unsigned Idx) const {
1436  unsigned End = getMBBEndIdx(MBB);
1437  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1438    unsigned KillIdx = VNI->kills[j];
1439    if (KillIdx > Idx && KillIdx < End)
1440      return true;
1441  }
1442  return false;
1443}
1444
1445/// RewriteInfo - Keep track of machine instrs that will be rewritten
1446/// during spilling.
1447namespace {
1448  struct RewriteInfo {
1449    unsigned Index;
1450    MachineInstr *MI;
1451    bool HasUse;
1452    bool HasDef;
1453    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1454      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1455  };
1456
1457  struct RewriteInfoCompare {
1458    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1459      return LHS.Index < RHS.Index;
1460    }
1461  };
1462}
1463
1464void LiveIntervals::
1465rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1466                    LiveInterval::Ranges::const_iterator &I,
1467                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1468                    unsigned Slot, int LdSlot,
1469                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1470                    VirtRegMap &vrm,
1471                    const TargetRegisterClass* rc,
1472                    SmallVector<int, 4> &ReMatIds,
1473                    const MachineLoopInfo *loopInfo,
1474                    BitVector &SpillMBBs,
1475                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1476                    BitVector &RestoreMBBs,
1477                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1478                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1479                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1480  bool AllCanFold = true;
1481  unsigned NewVReg = 0;
1482  unsigned start = getBaseIndex(I->start);
1483  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1484
1485  // First collect all the def / use in this live range that will be rewritten.
1486  // Make sure they are sorted according to instruction index.
1487  std::vector<RewriteInfo> RewriteMIs;
1488  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1489         re = mri_->reg_end(); ri != re; ) {
1490    MachineInstr *MI = &*ri;
1491    MachineOperand &O = ri.getOperand();
1492    ++ri;
1493    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1494    unsigned index = getInstructionIndex(MI);
1495    if (index < start || index >= end)
1496      continue;
1497    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1498      // Must be defined by an implicit def. It should not be spilled. Note,
1499      // this is for correctness reason. e.g.
1500      // 8   %reg1024<def> = IMPLICIT_DEF
1501      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1502      // The live range [12, 14) are not part of the r1024 live interval since
1503      // it's defined by an implicit def. It will not conflicts with live
1504      // interval of r1025. Now suppose both registers are spilled, you can
1505      // easily see a situation where both registers are reloaded before
1506      // the INSERT_SUBREG and both target registers that would overlap.
1507      continue;
1508    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1509  }
1510  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1511
1512  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1513  // Now rewrite the defs and uses.
1514  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1515    RewriteInfo &rwi = RewriteMIs[i];
1516    ++i;
1517    unsigned index = rwi.Index;
1518    bool MIHasUse = rwi.HasUse;
1519    bool MIHasDef = rwi.HasDef;
1520    MachineInstr *MI = rwi.MI;
1521    // If MI def and/or use the same register multiple times, then there
1522    // are multiple entries.
1523    unsigned NumUses = MIHasUse;
1524    while (i != e && RewriteMIs[i].MI == MI) {
1525      assert(RewriteMIs[i].Index == index);
1526      bool isUse = RewriteMIs[i].HasUse;
1527      if (isUse) ++NumUses;
1528      MIHasUse |= isUse;
1529      MIHasDef |= RewriteMIs[i].HasDef;
1530      ++i;
1531    }
1532    MachineBasicBlock *MBB = MI->getParent();
1533
1534    if (ImpUse && MI != ReMatDefMI) {
1535      // Re-matting an instruction with virtual register use. Update the
1536      // register interval's spill weight to HUGE_VALF to prevent it from
1537      // being spilled.
1538      LiveInterval &ImpLi = getInterval(ImpUse);
1539      ImpLi.weight = HUGE_VALF;
1540    }
1541
1542    unsigned MBBId = MBB->getNumber();
1543    unsigned ThisVReg = 0;
1544    if (TrySplit) {
1545      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1546      if (NVI != MBBVRegsMap.end()) {
1547        ThisVReg = NVI->second;
1548        // One common case:
1549        // x = use
1550        // ...
1551        // ...
1552        // def = ...
1553        //     = use
1554        // It's better to start a new interval to avoid artifically
1555        // extend the new interval.
1556        if (MIHasDef && !MIHasUse) {
1557          MBBVRegsMap.erase(MBB->getNumber());
1558          ThisVReg = 0;
1559        }
1560      }
1561    }
1562
1563    bool IsNew = ThisVReg == 0;
1564    if (IsNew) {
1565      // This ends the previous live interval. If all of its def / use
1566      // can be folded, give it a low spill weight.
1567      if (NewVReg && TrySplit && AllCanFold) {
1568        LiveInterval &nI = getOrCreateInterval(NewVReg);
1569        nI.weight /= 10.0F;
1570      }
1571      AllCanFold = true;
1572    }
1573    NewVReg = ThisVReg;
1574
1575    bool HasDef = false;
1576    bool HasUse = false;
1577    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1578                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1579                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1580                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1581                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1582    if (!HasDef && !HasUse)
1583      continue;
1584
1585    AllCanFold &= CanFold;
1586
1587    // Update weight of spill interval.
1588    LiveInterval &nI = getOrCreateInterval(NewVReg);
1589    if (!TrySplit) {
1590      // The spill weight is now infinity as it cannot be spilled again.
1591      nI.weight = HUGE_VALF;
1592      continue;
1593    }
1594
1595    // Keep track of the last def and first use in each MBB.
1596    if (HasDef) {
1597      if (MI != ReMatOrigDefMI || !CanDelete) {
1598        bool HasKill = false;
1599        if (!HasUse)
1600          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1601        else {
1602          // If this is a two-address code, then this index starts a new VNInfo.
1603          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1604          if (VNI)
1605            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1606        }
1607        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1608          SpillIdxes.find(MBBId);
1609        if (!HasKill) {
1610          if (SII == SpillIdxes.end()) {
1611            std::vector<SRInfo> S;
1612            S.push_back(SRInfo(index, NewVReg, true));
1613            SpillIdxes.insert(std::make_pair(MBBId, S));
1614          } else if (SII->second.back().vreg != NewVReg) {
1615            SII->second.push_back(SRInfo(index, NewVReg, true));
1616          } else if ((int)index > SII->second.back().index) {
1617            // If there is an earlier def and this is a two-address
1618            // instruction, then it's not possible to fold the store (which
1619            // would also fold the load).
1620            SRInfo &Info = SII->second.back();
1621            Info.index = index;
1622            Info.canFold = !HasUse;
1623          }
1624          SpillMBBs.set(MBBId);
1625        } else if (SII != SpillIdxes.end() &&
1626                   SII->second.back().vreg == NewVReg &&
1627                   (int)index > SII->second.back().index) {
1628          // There is an earlier def that's not killed (must be two-address).
1629          // The spill is no longer needed.
1630          SII->second.pop_back();
1631          if (SII->second.empty()) {
1632            SpillIdxes.erase(MBBId);
1633            SpillMBBs.reset(MBBId);
1634          }
1635        }
1636      }
1637    }
1638
1639    if (HasUse) {
1640      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1641        SpillIdxes.find(MBBId);
1642      if (SII != SpillIdxes.end() &&
1643          SII->second.back().vreg == NewVReg &&
1644          (int)index > SII->second.back().index)
1645        // Use(s) following the last def, it's not safe to fold the spill.
1646        SII->second.back().canFold = false;
1647      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1648        RestoreIdxes.find(MBBId);
1649      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1650        // If we are splitting live intervals, only fold if it's the first
1651        // use and there isn't another use later in the MBB.
1652        RII->second.back().canFold = false;
1653      else if (IsNew) {
1654        // Only need a reload if there isn't an earlier def / use.
1655        if (RII == RestoreIdxes.end()) {
1656          std::vector<SRInfo> Infos;
1657          Infos.push_back(SRInfo(index, NewVReg, true));
1658          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1659        } else {
1660          RII->second.push_back(SRInfo(index, NewVReg, true));
1661        }
1662        RestoreMBBs.set(MBBId);
1663      }
1664    }
1665
1666    // Update spill weight.
1667    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1668    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1669  }
1670
1671  if (NewVReg && TrySplit && AllCanFold) {
1672    // If all of its def / use can be folded, give it a low spill weight.
1673    LiveInterval &nI = getOrCreateInterval(NewVReg);
1674    nI.weight /= 10.0F;
1675  }
1676}
1677
1678bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1679                        BitVector &RestoreMBBs,
1680                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1681  if (!RestoreMBBs[Id])
1682    return false;
1683  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1684  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1685    if (Restores[i].index == index &&
1686        Restores[i].vreg == vr &&
1687        Restores[i].canFold)
1688      return true;
1689  return false;
1690}
1691
1692void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1693                        BitVector &RestoreMBBs,
1694                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1695  if (!RestoreMBBs[Id])
1696    return;
1697  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1698  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1699    if (Restores[i].index == index && Restores[i].vreg)
1700      Restores[i].index = -1;
1701}
1702
1703/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1704/// spilled and create empty intervals for their uses.
1705void
1706LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1707                                    const TargetRegisterClass* rc,
1708                                    std::vector<LiveInterval*> &NewLIs) {
1709  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1710         re = mri_->reg_end(); ri != re; ) {
1711    MachineOperand &O = ri.getOperand();
1712    MachineInstr *MI = &*ri;
1713    ++ri;
1714    if (O.isDef()) {
1715      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1716             "Register def was not rewritten?");
1717      RemoveMachineInstrFromMaps(MI);
1718      vrm.RemoveMachineInstrFromMaps(MI);
1719      MI->eraseFromParent();
1720    } else {
1721      // This must be an use of an implicit_def so it's not part of the live
1722      // interval. Create a new empty live interval for it.
1723      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1724      unsigned NewVReg = mri_->createVirtualRegister(rc);
1725      vrm.grow();
1726      vrm.setIsImplicitlyDefined(NewVReg);
1727      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1728      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1729        MachineOperand &MO = MI->getOperand(i);
1730        if (MO.isReg() && MO.getReg() == li.reg)
1731          MO.setReg(NewVReg);
1732      }
1733    }
1734  }
1735}
1736
1737namespace {
1738  struct LISorter {
1739    bool operator()(LiveInterval* A, LiveInterval* B) {
1740      return A->beginNumber() < B->beginNumber();
1741    }
1742  };
1743}
1744
1745std::vector<LiveInterval*> LiveIntervals::
1746addIntervalsForSpillsFast(const LiveInterval &li,
1747                          const MachineLoopInfo *loopInfo,
1748                          VirtRegMap &vrm, float& SSWeight) {
1749  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1750
1751  std::vector<LiveInterval*> added;
1752
1753  assert(li.weight != HUGE_VALF &&
1754         "attempt to spill already spilled interval!");
1755
1756  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1757  DEBUG(li.dump());
1758  DOUT << '\n';
1759
1760  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1761
1762  SSWeight = 0.0f;
1763
1764  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1765  while (RI != mri_->reg_end()) {
1766    MachineInstr* MI = &*RI;
1767
1768    SmallVector<unsigned, 2> Indices;
1769    bool HasUse = false;
1770    bool HasDef = false;
1771
1772    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1773      MachineOperand& mop = MI->getOperand(i);
1774      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1775
1776      HasUse |= MI->getOperand(i).isUse();
1777      HasDef |= MI->getOperand(i).isDef();
1778
1779      Indices.push_back(i);
1780    }
1781
1782    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1783                              Indices, true, slot, li.reg)) {
1784      unsigned NewVReg = mri_->createVirtualRegister(rc);
1785      vrm.grow();
1786      vrm.assignVirt2StackSlot(NewVReg, slot);
1787
1788      // create a new register for this spill
1789      LiveInterval &nI = getOrCreateInterval(NewVReg);
1790
1791      // the spill weight is now infinity as it
1792      // cannot be spilled again
1793      nI.weight = HUGE_VALF;
1794
1795      // Rewrite register operands to use the new vreg.
1796      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1797           E = Indices.end(); I != E; ++I) {
1798        MI->getOperand(*I).setReg(NewVReg);
1799
1800        if (MI->getOperand(*I).isUse())
1801          MI->getOperand(*I).setIsKill(true);
1802      }
1803
1804      // Fill in  the new live interval.
1805      unsigned index = getInstructionIndex(MI);
1806      if (HasUse) {
1807        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1808                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1809        DOUT << " +" << LR;
1810        nI.addRange(LR);
1811        vrm.addRestorePoint(NewVReg, MI);
1812      }
1813      if (HasDef) {
1814        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1815                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1816        DOUT << " +" << LR;
1817        nI.addRange(LR);
1818        vrm.addSpillPoint(NewVReg, true, MI);
1819      }
1820
1821      added.push_back(&nI);
1822
1823      DOUT << "\t\t\t\tadded new interval: ";
1824      DEBUG(nI.dump());
1825      DOUT << '\n';
1826
1827      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1828      if (HasUse) {
1829        if (HasDef)
1830          SSWeight += getSpillWeight(true, true, loopDepth);
1831        else
1832          SSWeight += getSpillWeight(false, true, loopDepth);
1833      } else
1834        SSWeight += getSpillWeight(true, false, loopDepth);
1835    }
1836
1837
1838    RI = mri_->reg_begin(li.reg);
1839  }
1840
1841  // Clients expect the new intervals to be returned in sorted order.
1842  std::sort(added.begin(), added.end(), LISorter());
1843
1844  return added;
1845}
1846
1847std::vector<LiveInterval*> LiveIntervals::
1848addIntervalsForSpills(const LiveInterval &li,
1849                      SmallVectorImpl<LiveInterval*> &SpillIs,
1850                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1851                      float &SSWeight) {
1852
1853  if (EnableFastSpilling)
1854    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1855
1856  assert(li.weight != HUGE_VALF &&
1857         "attempt to spill already spilled interval!");
1858
1859  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1860  li.print(DOUT, tri_);
1861  DOUT << '\n';
1862
1863  // Spill slot weight.
1864  SSWeight = 0.0f;
1865
1866  // Each bit specify whether a spill is required in the MBB.
1867  BitVector SpillMBBs(mf_->getNumBlockIDs());
1868  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1869  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1870  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1871  DenseMap<unsigned,unsigned> MBBVRegsMap;
1872  std::vector<LiveInterval*> NewLIs;
1873  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1874
1875  unsigned NumValNums = li.getNumValNums();
1876  SmallVector<MachineInstr*, 4> ReMatDefs;
1877  ReMatDefs.resize(NumValNums, NULL);
1878  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1879  ReMatOrigDefs.resize(NumValNums, NULL);
1880  SmallVector<int, 4> ReMatIds;
1881  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1882  BitVector ReMatDelete(NumValNums);
1883  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1884
1885  // Spilling a split live interval. It cannot be split any further. Also,
1886  // it's also guaranteed to be a single val# / range interval.
1887  if (vrm.getPreSplitReg(li.reg)) {
1888    vrm.setIsSplitFromReg(li.reg, 0);
1889    // Unset the split kill marker on the last use.
1890    unsigned KillIdx = vrm.getKillPoint(li.reg);
1891    if (KillIdx) {
1892      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1893      assert(KillMI && "Last use disappeared?");
1894      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1895      assert(KillOp != -1 && "Last use disappeared?");
1896      KillMI->getOperand(KillOp).setIsKill(false);
1897    }
1898    vrm.removeKillPoint(li.reg);
1899    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1900    Slot = vrm.getStackSlot(li.reg);
1901    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1902    MachineInstr *ReMatDefMI = DefIsReMat ?
1903      vrm.getReMaterializedMI(li.reg) : NULL;
1904    int LdSlot = 0;
1905    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1906    bool isLoad = isLoadSS ||
1907      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1908    bool IsFirstRange = true;
1909    for (LiveInterval::Ranges::const_iterator
1910           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1911      // If this is a split live interval with multiple ranges, it means there
1912      // are two-address instructions that re-defined the value. Only the
1913      // first def can be rematerialized!
1914      if (IsFirstRange) {
1915        // Note ReMatOrigDefMI has already been deleted.
1916        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1917                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1918                             false, vrm, rc, ReMatIds, loopInfo,
1919                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1920                             MBBVRegsMap, NewLIs, SSWeight);
1921      } else {
1922        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1923                             Slot, 0, false, false, false,
1924                             false, vrm, rc, ReMatIds, loopInfo,
1925                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1926                             MBBVRegsMap, NewLIs, SSWeight);
1927      }
1928      IsFirstRange = false;
1929    }
1930
1931    SSWeight = 0.0f;  // Already accounted for when split.
1932    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1933    return NewLIs;
1934  }
1935
1936  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1937  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1938    TrySplit = false;
1939  if (TrySplit)
1940    ++numSplits;
1941  bool NeedStackSlot = false;
1942  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1943       i != e; ++i) {
1944    const VNInfo *VNI = *i;
1945    unsigned VN = VNI->id;
1946    unsigned DefIdx = VNI->def;
1947    if (DefIdx == ~1U)
1948      continue; // Dead val#.
1949    // Is the def for the val# rematerializable?
1950    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1951      ? 0 : getInstructionFromIndex(DefIdx);
1952    bool dummy;
1953    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1954      // Remember how to remat the def of this val#.
1955      ReMatOrigDefs[VN] = ReMatDefMI;
1956      // Original def may be modified so we have to make a copy here.
1957      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1958      ClonedMIs.push_back(Clone);
1959      ReMatDefs[VN] = Clone;
1960
1961      bool CanDelete = true;
1962      if (VNI->hasPHIKill) {
1963        // A kill is a phi node, not all of its uses can be rematerialized.
1964        // It must not be deleted.
1965        CanDelete = false;
1966        // Need a stack slot if there is any live range where uses cannot be
1967        // rematerialized.
1968        NeedStackSlot = true;
1969      }
1970      if (CanDelete)
1971        ReMatDelete.set(VN);
1972    } else {
1973      // Need a stack slot if there is any live range where uses cannot be
1974      // rematerialized.
1975      NeedStackSlot = true;
1976    }
1977  }
1978
1979  // One stack slot per live interval.
1980  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1981    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1982      Slot = vrm.assignVirt2StackSlot(li.reg);
1983
1984    // This case only occurs when the prealloc splitter has already assigned
1985    // a stack slot to this vreg.
1986    else
1987      Slot = vrm.getStackSlot(li.reg);
1988  }
1989
1990  // Create new intervals and rewrite defs and uses.
1991  for (LiveInterval::Ranges::const_iterator
1992         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1993    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1994    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1995    bool DefIsReMat = ReMatDefMI != NULL;
1996    bool CanDelete = ReMatDelete[I->valno->id];
1997    int LdSlot = 0;
1998    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1999    bool isLoad = isLoadSS ||
2000      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2001    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2002                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2003                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2004                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2005                               MBBVRegsMap, NewLIs, SSWeight);
2006  }
2007
2008  // Insert spills / restores if we are splitting.
2009  if (!TrySplit) {
2010    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2011    return NewLIs;
2012  }
2013
2014  SmallPtrSet<LiveInterval*, 4> AddedKill;
2015  SmallVector<unsigned, 2> Ops;
2016  if (NeedStackSlot) {
2017    int Id = SpillMBBs.find_first();
2018    while (Id != -1) {
2019      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2020      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2021      std::vector<SRInfo> &spills = SpillIdxes[Id];
2022      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2023        int index = spills[i].index;
2024        unsigned VReg = spills[i].vreg;
2025        LiveInterval &nI = getOrCreateInterval(VReg);
2026        bool isReMat = vrm.isReMaterialized(VReg);
2027        MachineInstr *MI = getInstructionFromIndex(index);
2028        bool CanFold = false;
2029        bool FoundUse = false;
2030        Ops.clear();
2031        if (spills[i].canFold) {
2032          CanFold = true;
2033          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2034            MachineOperand &MO = MI->getOperand(j);
2035            if (!MO.isReg() || MO.getReg() != VReg)
2036              continue;
2037
2038            Ops.push_back(j);
2039            if (MO.isDef())
2040              continue;
2041            if (isReMat ||
2042                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2043                                                RestoreMBBs, RestoreIdxes))) {
2044              // MI has two-address uses of the same register. If the use
2045              // isn't the first and only use in the BB, then we can't fold
2046              // it. FIXME: Move this to rewriteInstructionsForSpills.
2047              CanFold = false;
2048              break;
2049            }
2050            FoundUse = true;
2051          }
2052        }
2053        // Fold the store into the def if possible.
2054        bool Folded = false;
2055        if (CanFold && !Ops.empty()) {
2056          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2057            Folded = true;
2058            if (FoundUse) {
2059              // Also folded uses, do not issue a load.
2060              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2061              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2062            }
2063            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2064          }
2065        }
2066
2067        // Otherwise tell the spiller to issue a spill.
2068        if (!Folded) {
2069          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2070          bool isKill = LR->end == getStoreIndex(index);
2071          if (!MI->registerDefIsDead(nI.reg))
2072            // No need to spill a dead def.
2073            vrm.addSpillPoint(VReg, isKill, MI);
2074          if (isKill)
2075            AddedKill.insert(&nI);
2076        }
2077
2078        // Update spill slot weight.
2079        if (!isReMat)
2080          SSWeight += getSpillWeight(true, false, loopDepth);
2081      }
2082      Id = SpillMBBs.find_next(Id);
2083    }
2084  }
2085
2086  int Id = RestoreMBBs.find_first();
2087  while (Id != -1) {
2088    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2089    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2090
2091    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2092    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2093      int index = restores[i].index;
2094      if (index == -1)
2095        continue;
2096      unsigned VReg = restores[i].vreg;
2097      LiveInterval &nI = getOrCreateInterval(VReg);
2098      bool isReMat = vrm.isReMaterialized(VReg);
2099      MachineInstr *MI = getInstructionFromIndex(index);
2100      bool CanFold = false;
2101      Ops.clear();
2102      if (restores[i].canFold) {
2103        CanFold = true;
2104        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2105          MachineOperand &MO = MI->getOperand(j);
2106          if (!MO.isReg() || MO.getReg() != VReg)
2107            continue;
2108
2109          if (MO.isDef()) {
2110            // If this restore were to be folded, it would have been folded
2111            // already.
2112            CanFold = false;
2113            break;
2114          }
2115          Ops.push_back(j);
2116        }
2117      }
2118
2119      // Fold the load into the use if possible.
2120      bool Folded = false;
2121      if (CanFold && !Ops.empty()) {
2122        if (!isReMat)
2123          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2124        else {
2125          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2126          int LdSlot = 0;
2127          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2128          // If the rematerializable def is a load, also try to fold it.
2129          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2130            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2131                                          Ops, isLoadSS, LdSlot, VReg);
2132          if (!Folded) {
2133            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2134            if (ImpUse) {
2135              // Re-matting an instruction with virtual register use. Add the
2136              // register as an implicit use on the use MI and update the register
2137              // interval's spill weight to HUGE_VALF to prevent it from being
2138              // spilled.
2139              LiveInterval &ImpLi = getInterval(ImpUse);
2140              ImpLi.weight = HUGE_VALF;
2141              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2142            }
2143          }
2144        }
2145      }
2146      // If folding is not possible / failed, then tell the spiller to issue a
2147      // load / rematerialization for us.
2148      if (Folded)
2149        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2150      else
2151        vrm.addRestorePoint(VReg, MI);
2152
2153      // Update spill slot weight.
2154      if (!isReMat)
2155        SSWeight += getSpillWeight(false, true, loopDepth);
2156    }
2157    Id = RestoreMBBs.find_next(Id);
2158  }
2159
2160  // Finalize intervals: add kills, finalize spill weights, and filter out
2161  // dead intervals.
2162  std::vector<LiveInterval*> RetNewLIs;
2163  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2164    LiveInterval *LI = NewLIs[i];
2165    if (!LI->empty()) {
2166      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2167      if (!AddedKill.count(LI)) {
2168        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2169        unsigned LastUseIdx = getBaseIndex(LR->end);
2170        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2171        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2172        assert(UseIdx != -1);
2173        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2174          LastUse->getOperand(UseIdx).setIsKill();
2175          vrm.addKillPoint(LI->reg, LastUseIdx);
2176        }
2177      }
2178      RetNewLIs.push_back(LI);
2179    }
2180  }
2181
2182  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2183  return RetNewLIs;
2184}
2185
2186/// hasAllocatableSuperReg - Return true if the specified physical register has
2187/// any super register that's allocatable.
2188bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2189  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2190    if (allocatableRegs_[*AS] && hasInterval(*AS))
2191      return true;
2192  return false;
2193}
2194
2195/// getRepresentativeReg - Find the largest super register of the specified
2196/// physical register.
2197unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2198  // Find the largest super-register that is allocatable.
2199  unsigned BestReg = Reg;
2200  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2201    unsigned SuperReg = *AS;
2202    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2203      BestReg = SuperReg;
2204      break;
2205    }
2206  }
2207  return BestReg;
2208}
2209
2210/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2211/// specified interval that conflicts with the specified physical register.
2212unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2213                                                   unsigned PhysReg) const {
2214  unsigned NumConflicts = 0;
2215  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2216  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2217         E = mri_->reg_end(); I != E; ++I) {
2218    MachineOperand &O = I.getOperand();
2219    MachineInstr *MI = O.getParent();
2220    unsigned Index = getInstructionIndex(MI);
2221    if (pli.liveAt(Index))
2222      ++NumConflicts;
2223  }
2224  return NumConflicts;
2225}
2226
2227/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2228/// around all defs and uses of the specified interval. Return true if it
2229/// was able to cut its interval.
2230bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2231                                            unsigned PhysReg, VirtRegMap &vrm) {
2232  unsigned SpillReg = getRepresentativeReg(PhysReg);
2233
2234  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2235    // If there are registers which alias PhysReg, but which are not a
2236    // sub-register of the chosen representative super register. Assert
2237    // since we can't handle it yet.
2238    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2239           tri_->isSuperRegister(*AS, SpillReg));
2240
2241  bool Cut = false;
2242  LiveInterval &pli = getInterval(SpillReg);
2243  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2244  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2245         E = mri_->reg_end(); I != E; ++I) {
2246    MachineOperand &O = I.getOperand();
2247    MachineInstr *MI = O.getParent();
2248    if (SeenMIs.count(MI))
2249      continue;
2250    SeenMIs.insert(MI);
2251    unsigned Index = getInstructionIndex(MI);
2252    if (pli.liveAt(Index)) {
2253      vrm.addEmergencySpill(SpillReg, MI);
2254      unsigned StartIdx = getLoadIndex(Index);
2255      unsigned EndIdx = getStoreIndex(Index)+1;
2256      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2257        pli.removeRange(StartIdx, EndIdx);
2258        Cut = true;
2259      } else {
2260        cerr << "Ran out of registers during register allocation!\n";
2261        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2262          cerr << "Please check your inline asm statement for invalid "
2263               << "constraints:\n";
2264          MI->print(cerr.stream(), tm_);
2265        }
2266        exit(1);
2267      }
2268      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2269        if (!hasInterval(*AS))
2270          continue;
2271        LiveInterval &spli = getInterval(*AS);
2272        if (spli.liveAt(Index))
2273          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2274      }
2275    }
2276  }
2277  return Cut;
2278}
2279
2280LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2281                                                   MachineInstr* startInst) {
2282  LiveInterval& Interval = getOrCreateInterval(reg);
2283  VNInfo* VN = Interval.getNextValue(
2284            getInstructionIndex(startInst) + InstrSlots::DEF,
2285            startInst, getVNInfoAllocator());
2286  VN->hasPHIKill = true;
2287  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2288  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2289               getMBBEndIdx(startInst->getParent()) + 1, VN);
2290  Interval.addRange(LR);
2291
2292  return LR;
2293}
2294