LiveIntervalAnalysis.cpp revision 0a1fcce09230e9b4bd30a8f07447aa075dce7470
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/STLExtras.h"
38#include <algorithm>
39#include <cmath>
40using namespace llvm;
41
42// Hidden options for help debugging.
43static cl::opt<bool> DisableReMat("disable-rematerialization",
44                                  cl::init(false), cl::Hidden);
45
46static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47                               cl::init(true), cl::Hidden);
48static cl::opt<int> SplitLimit("split-limit",
49                               cl::init(-1), cl::Hidden);
50
51static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals, "Number of original intervals");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70
71  if (!StrongPHIElim) {
72    AU.addPreservedID(PHIEliminationID);
73    AU.addRequiredID(PHIEliminationID);
74  }
75
76  AU.addRequiredID(TwoAddressInstructionPassID);
77  MachineFunctionPass::getAnalysisUsage(AU);
78}
79
80void LiveIntervals::releaseMemory() {
81  // Free the live intervals themselves.
82  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83       E = r2iMap_.end(); I != E; ++I)
84    delete I->second;
85
86  MBB2IdxMap.clear();
87  Idx2MBBMap.clear();
88  mi2iMap_.clear();
89  i2miMap_.clear();
90  r2iMap_.clear();
91  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92  VNInfoAllocator.Reset();
93  while (!ClonedMIs.empty()) {
94    MachineInstr *MI = ClonedMIs.back();
95    ClonedMIs.pop_back();
96    mf_->DeleteMachineInstr(MI);
97  }
98}
99
100void LiveIntervals::computeNumbering() {
101  Index2MiMap OldI2MI = i2miMap_;
102  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
103
104  Idx2MBBMap.clear();
105  MBB2IdxMap.clear();
106  mi2iMap_.clear();
107  i2miMap_.clear();
108
109  FunctionSize = 0;
110
111  // Number MachineInstrs and MachineBasicBlocks.
112  // Initialize MBB indexes to a sentinal.
113  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
114
115  unsigned MIIndex = 0;
116  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
117       MBB != E; ++MBB) {
118    unsigned StartIdx = MIIndex;
119
120    // Insert an empty slot at the beginning of each block.
121    MIIndex += InstrSlots::NUM;
122    i2miMap_.push_back(0);
123
124    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125         I != E; ++I) {
126      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127      assert(inserted && "multiple MachineInstr -> index mappings");
128      inserted = true;
129      i2miMap_.push_back(I);
130      MIIndex += InstrSlots::NUM;
131      FunctionSize++;
132
133      // Insert max(1, numdefs) empty slots after every instruction.
134      unsigned Slots = I->getDesc().getNumDefs();
135      if (Slots == 0)
136        Slots = 1;
137      MIIndex += InstrSlots::NUM * Slots;
138      while (Slots--)
139        i2miMap_.push_back(0);
140    }
141
142    // Set the MBB2IdxMap entry for this MBB.
143    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
145  }
146  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
147
148  if (!OldI2MI.empty())
149    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150      for (LiveInterval::iterator LI = OI->second->begin(),
151           LE = OI->second->end(); LI != LE; ++LI) {
152
153        // Remap the start index of the live range to the corresponding new
154        // number, or our best guess at what it _should_ correspond to if the
155        // original instruction has been erased.  This is either the following
156        // instruction or its predecessor.
157        unsigned index = LI->start / InstrSlots::NUM;
158        unsigned offset = LI->start % InstrSlots::NUM;
159        if (offset == InstrSlots::LOAD) {
160          std::vector<IdxMBBPair>::const_iterator I =
161                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162          // Take the pair containing the index
163          std::vector<IdxMBBPair>::const_iterator J =
164                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165
166          LI->start = getMBBStartIdx(J->second);
167        } else {
168          LI->start = mi2iMap_[OldI2MI[index]] + offset;
169        }
170
171        // Remap the ending index in the same way that we remapped the start,
172        // except for the final step where we always map to the immediately
173        // following instruction.
174        index = (LI->end - 1) / InstrSlots::NUM;
175        offset  = LI->end % InstrSlots::NUM;
176        if (offset == InstrSlots::LOAD) {
177          // VReg dies at end of block.
178          std::vector<IdxMBBPair>::const_iterator I =
179                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180          --I;
181
182          LI->end = getMBBEndIdx(I->second) + 1;
183        } else {
184          unsigned idx = index;
185          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
186
187          if (index != OldI2MI.size())
188            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189          else
190            LI->end = InstrSlots::NUM * i2miMap_.size();
191        }
192      }
193
194      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196        VNInfo* vni = *VNI;
197
198        // Remap the VNInfo def index, which works the same as the
199        // start indices above. VN's with special sentinel defs
200        // don't need to be remapped.
201        if (vni->def != ~0U && vni->def != ~1U) {
202          unsigned index = vni->def / InstrSlots::NUM;
203          unsigned offset = vni->def % InstrSlots::NUM;
204          if (offset == InstrSlots::LOAD) {
205            std::vector<IdxMBBPair>::const_iterator I =
206                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207            // Take the pair containing the index
208            std::vector<IdxMBBPair>::const_iterator J =
209                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
210
211            vni->def = getMBBStartIdx(J->second);
212          } else {
213            vni->def = mi2iMap_[OldI2MI[index]] + offset;
214          }
215        }
216
217        // Remap the VNInfo kill indices, which works the same as
218        // the end indices above.
219        for (size_t i = 0; i < vni->kills.size(); ++i) {
220          // PHI kills don't need to be remapped.
221          if (!vni->kills[i]) continue;
222
223          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224          unsigned offset = vni->kills[i] % InstrSlots::NUM;
225          if (offset == InstrSlots::LOAD) {
226            std::vector<IdxMBBPair>::const_iterator I =
227             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228            --I;
229
230            vni->kills[i] = getMBBEndIdx(I->second);
231          } else {
232            unsigned idx = index;
233            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
234
235            if (index != OldI2MI.size())
236              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237                              (idx == index ? offset : 0);
238            else
239              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
240          }
241        }
242      }
243    }
244}
245
246/// runOnMachineFunction - Register allocate the whole function
247///
248bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249  mf_ = &fn;
250  mri_ = &mf_->getRegInfo();
251  tm_ = &fn.getTarget();
252  tri_ = tm_->getRegisterInfo();
253  tii_ = tm_->getInstrInfo();
254  aa_ = &getAnalysis<AliasAnalysis>();
255  lv_ = &getAnalysis<LiveVariables>();
256  allocatableRegs_ = tri_->getAllocatableSet(fn);
257
258  computeNumbering();
259  computeIntervals();
260
261  numIntervals += getNumIntervals();
262
263  DEBUG(dump());
264  return true;
265}
266
267/// print - Implement the dump method.
268void LiveIntervals::print(std::ostream &O, const Module* ) const {
269  O << "********** INTERVALS **********\n";
270  for (const_iterator I = begin(), E = end(); I != E; ++I) {
271    I->second->print(O, tri_);
272    O << "\n";
273  }
274
275  O << "********** MACHINEINSTRS **********\n";
276  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277       mbbi != mbbe; ++mbbi) {
278    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279    for (MachineBasicBlock::iterator mii = mbbi->begin(),
280           mie = mbbi->end(); mii != mie; ++mii) {
281      O << getInstructionIndex(mii) << '\t' << *mii;
282    }
283  }
284}
285
286/// conflictsWithPhysRegDef - Returns true if the specified register
287/// is defined during the duration of the specified interval.
288bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289                                            VirtRegMap &vrm, unsigned reg) {
290  for (LiveInterval::Ranges::const_iterator
291         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292    for (unsigned index = getBaseIndex(I->start),
293           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294         index += InstrSlots::NUM) {
295      // skip deleted instructions
296      while (index != end && !getInstructionFromIndex(index))
297        index += InstrSlots::NUM;
298      if (index == end) break;
299
300      MachineInstr *MI = getInstructionFromIndex(index);
301      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303        if (SrcReg == li.reg || DstReg == li.reg)
304          continue;
305      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306        MachineOperand& mop = MI->getOperand(i);
307        if (!mop.isReg())
308          continue;
309        unsigned PhysReg = mop.getReg();
310        if (PhysReg == 0 || PhysReg == li.reg)
311          continue;
312        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313          if (!vrm.hasPhys(PhysReg))
314            continue;
315          PhysReg = vrm.getPhys(PhysReg);
316        }
317        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
318          return true;
319      }
320    }
321  }
322
323  return false;
324}
325
326/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327/// it can check use as well.
328bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329                                            unsigned Reg, bool CheckUse,
330                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331  for (LiveInterval::Ranges::const_iterator
332         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333    for (unsigned index = getBaseIndex(I->start),
334           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335         index += InstrSlots::NUM) {
336      // Skip deleted instructions.
337      MachineInstr *MI = 0;
338      while (index != end) {
339        MI = getInstructionFromIndex(index);
340        if (MI)
341          break;
342        index += InstrSlots::NUM;
343      }
344      if (index == end) break;
345
346      if (JoinedCopies.count(MI))
347        continue;
348      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349        MachineOperand& MO = MI->getOperand(i);
350        if (!MO.isReg())
351          continue;
352        if (MO.isUse() && !CheckUse)
353          continue;
354        unsigned PhysReg = MO.getReg();
355        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
356          continue;
357        if (tri_->isSubRegister(Reg, PhysReg))
358          return true;
359      }
360    }
361  }
362
363  return false;
364}
365
366
367void LiveIntervals::printRegName(unsigned reg) const {
368  if (TargetRegisterInfo::isPhysicalRegister(reg))
369    cerr << tri_->getName(reg);
370  else
371    cerr << "%reg" << reg;
372}
373
374void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375                                             MachineBasicBlock::iterator mi,
376                                             unsigned MIIdx, MachineOperand& MO,
377                                             unsigned MOIdx,
378                                             LiveInterval &interval) {
379  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
381
382  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383    DOUT << "is a implicit_def\n";
384    return;
385  }
386
387  // Virtual registers may be defined multiple times (due to phi
388  // elimination and 2-addr elimination).  Much of what we do only has to be
389  // done once for the vreg.  We use an empty interval to detect the first
390  // time we see a vreg.
391  if (interval.empty()) {
392    // Get the Idx of the defining instructions.
393    unsigned defIndex = getDefIndex(MIIdx);
394    // Earlyclobbers move back one.
395    if (MO.isEarlyClobber())
396      defIndex = getUseIndex(MIIdx);
397    VNInfo *ValNo;
398    MachineInstr *CopyMI = NULL;
399    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
403      CopyMI = mi;
404    // Earlyclobbers move back one.
405    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
406
407    assert(ValNo->id == 0 && "First value in interval is not 0?");
408
409    // Loop over all of the blocks that the vreg is defined in.  There are
410    // two cases we have to handle here.  The most common case is a vreg
411    // whose lifetime is contained within a basic block.  In this case there
412    // will be a single kill, in MBB, which comes after the definition.
413    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414      // FIXME: what about dead vars?
415      unsigned killIdx;
416      if (vi.Kills[0] != mi)
417        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
418      else
419        killIdx = defIndex+1;
420
421      // If the kill happens after the definition, we have an intra-block
422      // live range.
423      if (killIdx > defIndex) {
424        assert(vi.AliveBlocks.none() &&
425               "Shouldn't be alive across any blocks!");
426        LiveRange LR(defIndex, killIdx, ValNo);
427        interval.addRange(LR);
428        DOUT << " +" << LR << "\n";
429        interval.addKill(ValNo, killIdx);
430        return;
431      }
432    }
433
434    // The other case we handle is when a virtual register lives to the end
435    // of the defining block, potentially live across some blocks, then is
436    // live into some number of blocks, but gets killed.  Start by adding a
437    // range that goes from this definition to the end of the defining block.
438    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439    DOUT << " +" << NewLR;
440    interval.addRange(NewLR);
441
442    // Iterate over all of the blocks that the variable is completely
443    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
444    // live interval.
445    for (int i = vi.AliveBlocks.find_first(); i != -1;
446         i = vi.AliveBlocks.find_next(i)) {
447      LiveRange LR(getMBBStartIdx(i),
448                   getMBBEndIdx(i)+1,  // MBB ends at -1.
449                   ValNo);
450      interval.addRange(LR);
451      DOUT << " +" << LR;
452    }
453
454    // Finally, this virtual register is live from the start of any killing
455    // block to the 'use' slot of the killing instruction.
456    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457      MachineInstr *Kill = vi.Kills[i];
458      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459      LiveRange LR(getMBBStartIdx(Kill->getParent()),
460                   killIdx, ValNo);
461      interval.addRange(LR);
462      interval.addKill(ValNo, killIdx);
463      DOUT << " +" << LR;
464    }
465
466  } else {
467    // If this is the second time we see a virtual register definition, it
468    // must be due to phi elimination or two addr elimination.  If this is
469    // the result of two address elimination, then the vreg is one of the
470    // def-and-use register operand.
471    if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472      // If this is a two-address definition, then we have already processed
473      // the live range.  The only problem is that we didn't realize there
474      // are actually two values in the live interval.  Because of this we
475      // need to take the LiveRegion that defines this register and split it
476      // into two values.
477      assert(interval.containsOneValue());
478      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479      unsigned RedefIndex = getDefIndex(MIIdx);
480      // It cannot be an early clobber MO.
481      assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
482
483      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484      VNInfo *OldValNo = OldLR->valno;
485
486      // Delete the initial value, which should be short and continuous,
487      // because the 2-addr copy must be in the same MBB as the redef.
488      interval.removeRange(DefIndex, RedefIndex);
489
490      // Two-address vregs should always only be redefined once.  This means
491      // that at this point, there should be exactly one value number in it.
492      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
493
494      // The new value number (#1) is defined by the instruction we claimed
495      // defined value #0.
496      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
497                                            VNInfoAllocator);
498
499      // Value#0 is now defined by the 2-addr instruction.
500      OldValNo->def  = RedefIndex;
501      OldValNo->copy = 0;
502
503      // Add the new live interval which replaces the range for the input copy.
504      LiveRange LR(DefIndex, RedefIndex, ValNo);
505      DOUT << " replace range with " << LR;
506      interval.addRange(LR);
507      interval.addKill(ValNo, RedefIndex);
508
509      // If this redefinition is dead, we need to add a dummy unit live
510      // range covering the def slot.
511      if (MO.isDead())
512        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
513
514      DOUT << " RESULT: ";
515      interval.print(DOUT, tri_);
516
517    } else {
518      // Otherwise, this must be because of phi elimination.  If this is the
519      // first redefinition of the vreg that we have seen, go back and change
520      // the live range in the PHI block to be a different value number.
521      if (interval.containsOneValue()) {
522        assert(vi.Kills.size() == 1 &&
523               "PHI elimination vreg should have one kill, the PHI itself!");
524
525        // Remove the old range that we now know has an incorrect number.
526        VNInfo *VNI = interval.getValNumInfo(0);
527        MachineInstr *Killer = vi.Kills[0];
528        unsigned Start = getMBBStartIdx(Killer->getParent());
529        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530        DOUT << " Removing [" << Start << "," << End << "] from: ";
531        interval.print(DOUT, tri_); DOUT << "\n";
532        interval.removeRange(Start, End);
533        VNI->hasPHIKill = true;
534        DOUT << " RESULT: "; interval.print(DOUT, tri_);
535
536        // Replace the interval with one of a NEW value number.  Note that this
537        // value number isn't actually defined by an instruction, weird huh? :)
538        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539        DOUT << " replace range with " << LR;
540        interval.addRange(LR);
541        interval.addKill(LR.valno, End);
542        DOUT << " RESULT: "; interval.print(DOUT, tri_);
543      }
544
545      // In the case of PHI elimination, each variable definition is only
546      // live until the end of the block.  We've already taken care of the
547      // rest of the live range.
548      unsigned defIndex = getDefIndex(MIIdx);
549      // It cannot be an early clobber MO.
550      assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
551
552      VNInfo *ValNo;
553      MachineInstr *CopyMI = NULL;
554      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
558        CopyMI = mi;
559      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
560
561      unsigned killIndex = getMBBEndIdx(mbb) + 1;
562      LiveRange LR(defIndex, killIndex, ValNo);
563      interval.addRange(LR);
564      interval.addKill(ValNo, killIndex);
565      ValNo->hasPHIKill = true;
566      DOUT << " +" << LR;
567    }
568  }
569
570  DOUT << '\n';
571}
572
573void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574                                              MachineBasicBlock::iterator mi,
575                                              unsigned MIIdx,
576                                              MachineOperand& MO,
577                                              LiveInterval &interval,
578                                              MachineInstr *CopyMI) {
579  // A physical register cannot be live across basic block, so its
580  // lifetime must end somewhere in its defining basic block.
581  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
582
583  unsigned baseIndex = MIIdx;
584  unsigned start = getDefIndex(baseIndex);
585  // Earlyclobbers move back one.
586  if (MO.isEarlyClobber())
587    start = getUseIndex(MIIdx);
588  unsigned end = start;
589
590  // If it is not used after definition, it is considered dead at
591  // the instruction defining it. Hence its interval is:
592  // [defSlot(def), defSlot(def)+1)
593  if (MO.isDead()) {
594    DOUT << " dead";
595    end = start + 1;
596    goto exit;
597  }
598
599  // If it is not dead on definition, it must be killed by a
600  // subsequent instruction. Hence its interval is:
601  // [defSlot(def), useSlot(kill)+1)
602  baseIndex += InstrSlots::NUM;
603  while (++mi != MBB->end()) {
604    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605           getInstructionFromIndex(baseIndex) == 0)
606      baseIndex += InstrSlots::NUM;
607    if (mi->killsRegister(interval.reg, tri_)) {
608      DOUT << " killed";
609      end = getUseIndex(baseIndex) + 1;
610      goto exit;
611    } else if (mi->modifiesRegister(interval.reg, tri_)) {
612      // Another instruction redefines the register before it is ever read.
613      // Then the register is essentially dead at the instruction that defines
614      // it. Hence its interval is:
615      // [defSlot(def), defSlot(def)+1)
616      DOUT << " dead";
617      end = start + 1;
618      goto exit;
619    }
620
621    baseIndex += InstrSlots::NUM;
622  }
623
624  // The only case we should have a dead physreg here without a killing or
625  // instruction where we know it's dead is if it is live-in to the function
626  // and never used.
627  assert(!CopyMI && "physreg was not killed in defining block!");
628  end = start + 1;
629
630exit:
631  assert(start < end && "did not find end of interval?");
632
633  // Already exists? Extend old live interval.
634  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635  bool Extend = OldLR != interval.end();
636  VNInfo *ValNo = Extend
637    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638  if (MO.isEarlyClobber() && Extend)
639    ValNo->redefByEC = true;
640  LiveRange LR(start, end, ValNo);
641  interval.addRange(LR);
642  interval.addKill(LR.valno, end);
643  DOUT << " +" << LR << '\n';
644}
645
646void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647                                      MachineBasicBlock::iterator MI,
648                                      unsigned MIIdx,
649                                      MachineOperand& MO,
650                                      unsigned MOIdx) {
651  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653                             getOrCreateInterval(MO.getReg()));
654  else if (allocatableRegs_[MO.getReg()]) {
655    MachineInstr *CopyMI = NULL;
656    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
660      CopyMI = MI;
661    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662                              getOrCreateInterval(MO.getReg()), CopyMI);
663    // Def of a register also defines its sub-registers.
664    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665      // If MI also modifies the sub-register explicitly, avoid processing it
666      // more than once. Do not pass in TRI here so it checks for exact match.
667      if (!MI->modifiesRegister(*AS))
668        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669                                  getOrCreateInterval(*AS), 0);
670  }
671}
672
673void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
674                                         unsigned MIIdx,
675                                         LiveInterval &interval, bool isAlias) {
676  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
677
678  // Look for kills, if it reaches a def before it's killed, then it shouldn't
679  // be considered a livein.
680  MachineBasicBlock::iterator mi = MBB->begin();
681  unsigned baseIndex = MIIdx;
682  unsigned start = baseIndex;
683  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684         getInstructionFromIndex(baseIndex) == 0)
685    baseIndex += InstrSlots::NUM;
686  unsigned end = baseIndex;
687
688  while (mi != MBB->end()) {
689    if (mi->killsRegister(interval.reg, tri_)) {
690      DOUT << " killed";
691      end = getUseIndex(baseIndex) + 1;
692      goto exit;
693    } else if (mi->modifiesRegister(interval.reg, tri_)) {
694      // Another instruction redefines the register before it is ever read.
695      // Then the register is essentially dead at the instruction that defines
696      // it. Hence its interval is:
697      // [defSlot(def), defSlot(def)+1)
698      DOUT << " dead";
699      end = getDefIndex(start) + 1;
700      goto exit;
701    }
702
703    baseIndex += InstrSlots::NUM;
704    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
705           getInstructionFromIndex(baseIndex) == 0)
706      baseIndex += InstrSlots::NUM;
707    ++mi;
708  }
709
710exit:
711  // Live-in register might not be used at all.
712  if (end == MIIdx) {
713    if (isAlias) {
714      DOUT << " dead";
715      end = getDefIndex(MIIdx) + 1;
716    } else {
717      DOUT << " live through";
718      end = baseIndex;
719    }
720  }
721
722  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
723  interval.addRange(LR);
724  interval.addKill(LR.valno, end);
725  DOUT << " +" << LR << '\n';
726}
727
728/// computeIntervals - computes the live intervals for virtual
729/// registers. for some ordering of the machine instructions [1,N] a
730/// live interval is an interval [i, j) where 1 <= i <= j < N for
731/// which a variable is live
732void LiveIntervals::computeIntervals() {
733
734  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
735       << "********** Function: "
736       << ((Value*)mf_->getFunction())->getName() << '\n';
737
738  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
739       MBBI != E; ++MBBI) {
740    MachineBasicBlock *MBB = MBBI;
741    // Track the index of the current machine instr.
742    unsigned MIIndex = getMBBStartIdx(MBB);
743    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
744
745    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
746
747    // Create intervals for live-ins to this BB first.
748    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
749           LE = MBB->livein_end(); LI != LE; ++LI) {
750      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
751      // Multiple live-ins can alias the same register.
752      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
753        if (!hasInterval(*AS))
754          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
755                               true);
756    }
757
758    // Skip over empty initial indices.
759    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
760           getInstructionFromIndex(MIIndex) == 0)
761      MIIndex += InstrSlots::NUM;
762
763    for (; MI != miEnd; ++MI) {
764      DOUT << MIIndex << "\t" << *MI;
765
766      // Handle defs.
767      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
768        MachineOperand &MO = MI->getOperand(i);
769        // handle register defs - build intervals
770        if (MO.isReg() && MO.getReg() && MO.isDef()) {
771          handleRegisterDef(MBB, MI, MIIndex, MO, i);
772        }
773      }
774
775      // Skip over the empty slots after each instruction.
776      unsigned Slots = MI->getDesc().getNumDefs();
777      if (Slots == 0)
778        Slots = 1;
779      MIIndex += InstrSlots::NUM * Slots;
780
781      // Skip over empty indices.
782      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
783             getInstructionFromIndex(MIIndex) == 0)
784        MIIndex += InstrSlots::NUM;
785    }
786  }
787}
788
789bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
790                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
791  std::vector<IdxMBBPair>::const_iterator I =
792    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
793
794  bool ResVal = false;
795  while (I != Idx2MBBMap.end()) {
796    if (I->first >= End)
797      break;
798    MBBs.push_back(I->second);
799    ResVal = true;
800    ++I;
801  }
802  return ResVal;
803}
804
805bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
806                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
807  std::vector<IdxMBBPair>::const_iterator I =
808    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
809
810  bool ResVal = false;
811  while (I != Idx2MBBMap.end()) {
812    if (I->first > End)
813      break;
814    MachineBasicBlock *MBB = I->second;
815    if (getMBBEndIdx(MBB) > End)
816      break;
817    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
818           SE = MBB->succ_end(); SI != SE; ++SI)
819      MBBs.push_back(*SI);
820    ResVal = true;
821    ++I;
822  }
823  return ResVal;
824}
825
826LiveInterval* LiveIntervals::createInterval(unsigned reg) {
827  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
828  return new LiveInterval(reg, Weight);
829}
830
831/// dupInterval - Duplicate a live interval. The caller is responsible for
832/// managing the allocated memory.
833LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
834  LiveInterval *NewLI = createInterval(li->reg);
835  NewLI->Copy(*li, getVNInfoAllocator());
836  return NewLI;
837}
838
839/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
840/// copy field and returns the source register that defines it.
841unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
842  if (!VNI->copy)
843    return 0;
844
845  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
846    // If it's extracting out of a physical register, return the sub-register.
847    unsigned Reg = VNI->copy->getOperand(1).getReg();
848    if (TargetRegisterInfo::isPhysicalRegister(Reg))
849      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
850    return Reg;
851  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
852    return VNI->copy->getOperand(2).getReg();
853
854  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
855  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
856    return SrcReg;
857  assert(0 && "Unrecognized copy instruction!");
858  return 0;
859}
860
861//===----------------------------------------------------------------------===//
862// Register allocator hooks.
863//
864
865/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
866/// allow one) virtual register operand, then its uses are implicitly using
867/// the register. Returns the virtual register.
868unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
869                                            MachineInstr *MI) const {
870  unsigned RegOp = 0;
871  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
872    MachineOperand &MO = MI->getOperand(i);
873    if (!MO.isReg() || !MO.isUse())
874      continue;
875    unsigned Reg = MO.getReg();
876    if (Reg == 0 || Reg == li.reg)
877      continue;
878    // FIXME: For now, only remat MI with at most one register operand.
879    assert(!RegOp &&
880           "Can't rematerialize instruction with multiple register operand!");
881    RegOp = MO.getReg();
882#ifndef NDEBUG
883    break;
884#endif
885  }
886  return RegOp;
887}
888
889/// isValNoAvailableAt - Return true if the val# of the specified interval
890/// which reaches the given instruction also reaches the specified use index.
891bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
892                                       unsigned UseIdx) const {
893  unsigned Index = getInstructionIndex(MI);
894  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
895  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
896  return UI != li.end() && UI->valno == ValNo;
897}
898
899/// isReMaterializable - Returns true if the definition MI of the specified
900/// val# of the specified interval is re-materializable.
901bool LiveIntervals::isReMaterializable(const LiveInterval &li,
902                                       const VNInfo *ValNo, MachineInstr *MI,
903                                       SmallVectorImpl<LiveInterval*> &SpillIs,
904                                       bool &isLoad) {
905  if (DisableReMat)
906    return false;
907
908  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
909    return true;
910
911  int FrameIdx = 0;
912  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
913      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
914    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
915    // this but remember this is not safe to fold into a two-address
916    // instruction.
917    // This is a load from fixed stack slot. It can be rematerialized.
918    return true;
919
920  // If the target-specific rules don't identify an instruction as
921  // being trivially rematerializable, use some target-independent
922  // rules.
923  if (!MI->getDesc().isRematerializable() ||
924      !tii_->isTriviallyReMaterializable(MI)) {
925    if (!EnableAggressiveRemat)
926      return false;
927
928    // If the instruction accesses memory but the memoperands have been lost,
929    // we can't analyze it.
930    const TargetInstrDesc &TID = MI->getDesc();
931    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
932      return false;
933
934    // Avoid instructions obviously unsafe for remat.
935    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
936      return false;
937
938    // If the instruction accesses memory and the memory could be non-constant,
939    // assume the instruction is not rematerializable.
940    for (std::list<MachineMemOperand>::const_iterator
941           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
942      const MachineMemOperand &MMO = *I;
943      if (MMO.isVolatile() || MMO.isStore())
944        return false;
945      const Value *V = MMO.getValue();
946      if (!V)
947        return false;
948      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
949        if (!PSV->isConstant(mf_->getFrameInfo()))
950          return false;
951      } else if (!aa_->pointsToConstantMemory(V))
952        return false;
953    }
954
955    // If any of the registers accessed are non-constant, conservatively assume
956    // the instruction is not rematerializable.
957    unsigned ImpUse = 0;
958    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
959      const MachineOperand &MO = MI->getOperand(i);
960      if (MO.isReg()) {
961        unsigned Reg = MO.getReg();
962        if (Reg == 0)
963          continue;
964        if (TargetRegisterInfo::isPhysicalRegister(Reg))
965          return false;
966
967        // Only allow one def, and that in the first operand.
968        if (MO.isDef() != (i == 0))
969          return false;
970
971        // Only allow constant-valued registers.
972        bool IsLiveIn = mri_->isLiveIn(Reg);
973        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
974                                          E = mri_->def_end();
975
976        // For the def, it should be the only def of that register.
977        if (MO.isDef() && (next(I) != E || IsLiveIn))
978          return false;
979
980        if (MO.isUse()) {
981          // Only allow one use other register use, as that's all the
982          // remat mechanisms support currently.
983          if (Reg != li.reg) {
984            if (ImpUse == 0)
985              ImpUse = Reg;
986            else if (Reg != ImpUse)
987              return false;
988          }
989          // For the use, there should be only one associated def.
990          if (I != E && (next(I) != E || IsLiveIn))
991            return false;
992        }
993      }
994    }
995  }
996
997  unsigned ImpUse = getReMatImplicitUse(li, MI);
998  if (ImpUse) {
999    const LiveInterval &ImpLi = getInterval(ImpUse);
1000    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1001           re = mri_->use_end(); ri != re; ++ri) {
1002      MachineInstr *UseMI = &*ri;
1003      unsigned UseIdx = getInstructionIndex(UseMI);
1004      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1005        continue;
1006      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1007        return false;
1008    }
1009
1010    // If a register operand of the re-materialized instruction is going to
1011    // be spilled next, then it's not legal to re-materialize this instruction.
1012    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1013      if (ImpUse == SpillIs[i]->reg)
1014        return false;
1015  }
1016  return true;
1017}
1018
1019/// isReMaterializable - Returns true if the definition MI of the specified
1020/// val# of the specified interval is re-materializable.
1021bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1022                                       const VNInfo *ValNo, MachineInstr *MI) {
1023  SmallVector<LiveInterval*, 4> Dummy1;
1024  bool Dummy2;
1025  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1026}
1027
1028/// isReMaterializable - Returns true if every definition of MI of every
1029/// val# of the specified interval is re-materializable.
1030bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1031                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1032                                       bool &isLoad) {
1033  isLoad = false;
1034  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1035       i != e; ++i) {
1036    const VNInfo *VNI = *i;
1037    unsigned DefIdx = VNI->def;
1038    if (DefIdx == ~1U)
1039      continue; // Dead val#.
1040    // Is the def for the val# rematerializable?
1041    if (DefIdx == ~0u)
1042      return false;
1043    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1044    bool DefIsLoad = false;
1045    if (!ReMatDefMI ||
1046        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1047      return false;
1048    isLoad |= DefIsLoad;
1049  }
1050  return true;
1051}
1052
1053/// FilterFoldedOps - Filter out two-address use operands. Return
1054/// true if it finds any issue with the operands that ought to prevent
1055/// folding.
1056static bool FilterFoldedOps(MachineInstr *MI,
1057                            SmallVector<unsigned, 2> &Ops,
1058                            unsigned &MRInfo,
1059                            SmallVector<unsigned, 2> &FoldOps) {
1060  const TargetInstrDesc &TID = MI->getDesc();
1061
1062  MRInfo = 0;
1063  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1064    unsigned OpIdx = Ops[i];
1065    MachineOperand &MO = MI->getOperand(OpIdx);
1066    // FIXME: fold subreg use.
1067    if (MO.getSubReg())
1068      return true;
1069    if (MO.isDef())
1070      MRInfo |= (unsigned)VirtRegMap::isMod;
1071    else {
1072      // Filter out two-address use operand(s).
1073      if (!MO.isImplicit() &&
1074          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1075        MRInfo = VirtRegMap::isModRef;
1076        continue;
1077      }
1078      MRInfo |= (unsigned)VirtRegMap::isRef;
1079    }
1080    FoldOps.push_back(OpIdx);
1081  }
1082  return false;
1083}
1084
1085
1086/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1087/// slot / to reg or any rematerialized load into ith operand of specified
1088/// MI. If it is successul, MI is updated with the newly created MI and
1089/// returns true.
1090bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1091                                         VirtRegMap &vrm, MachineInstr *DefMI,
1092                                         unsigned InstrIdx,
1093                                         SmallVector<unsigned, 2> &Ops,
1094                                         bool isSS, int Slot, unsigned Reg) {
1095  // If it is an implicit def instruction, just delete it.
1096  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1097    RemoveMachineInstrFromMaps(MI);
1098    vrm.RemoveMachineInstrFromMaps(MI);
1099    MI->eraseFromParent();
1100    ++numFolds;
1101    return true;
1102  }
1103
1104  // Filter the list of operand indexes that are to be folded. Abort if
1105  // any operand will prevent folding.
1106  unsigned MRInfo = 0;
1107  SmallVector<unsigned, 2> FoldOps;
1108  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1109    return false;
1110
1111  // The only time it's safe to fold into a two address instruction is when
1112  // it's folding reload and spill from / into a spill stack slot.
1113  if (DefMI && (MRInfo & VirtRegMap::isMod))
1114    return false;
1115
1116  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1117                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1118  if (fmi) {
1119    // Remember this instruction uses the spill slot.
1120    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1121
1122    // Attempt to fold the memory reference into the instruction. If
1123    // we can do this, we don't need to insert spill code.
1124    MachineBasicBlock &MBB = *MI->getParent();
1125    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1126      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1127    vrm.transferSpillPts(MI, fmi);
1128    vrm.transferRestorePts(MI, fmi);
1129    vrm.transferEmergencySpills(MI, fmi);
1130    mi2iMap_.erase(MI);
1131    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1132    mi2iMap_[fmi] = InstrIdx;
1133    MI = MBB.insert(MBB.erase(MI), fmi);
1134    ++numFolds;
1135    return true;
1136  }
1137  return false;
1138}
1139
1140/// canFoldMemoryOperand - Returns true if the specified load / store
1141/// folding is possible.
1142bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1143                                         SmallVector<unsigned, 2> &Ops,
1144                                         bool ReMat) const {
1145  // Filter the list of operand indexes that are to be folded. Abort if
1146  // any operand will prevent folding.
1147  unsigned MRInfo = 0;
1148  SmallVector<unsigned, 2> FoldOps;
1149  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1150    return false;
1151
1152  // It's only legal to remat for a use, not a def.
1153  if (ReMat && (MRInfo & VirtRegMap::isMod))
1154    return false;
1155
1156  return tii_->canFoldMemoryOperand(MI, FoldOps);
1157}
1158
1159bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1160  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1161  for (LiveInterval::Ranges::const_iterator
1162         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1163    std::vector<IdxMBBPair>::const_iterator II =
1164      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1165    if (II == Idx2MBBMap.end())
1166      continue;
1167    if (I->end > II->first)  // crossing a MBB.
1168      return false;
1169    MBBs.insert(II->second);
1170    if (MBBs.size() > 1)
1171      return false;
1172  }
1173  return true;
1174}
1175
1176/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1177/// interval on to-be re-materialized operands of MI) with new register.
1178void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1179                                       MachineInstr *MI, unsigned NewVReg,
1180                                       VirtRegMap &vrm) {
1181  // There is an implicit use. That means one of the other operand is
1182  // being remat'ed and the remat'ed instruction has li.reg as an
1183  // use operand. Make sure we rewrite that as well.
1184  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1185    MachineOperand &MO = MI->getOperand(i);
1186    if (!MO.isReg())
1187      continue;
1188    unsigned Reg = MO.getReg();
1189    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1190      continue;
1191    if (!vrm.isReMaterialized(Reg))
1192      continue;
1193    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1194    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1195    if (UseMO)
1196      UseMO->setReg(NewVReg);
1197  }
1198}
1199
1200/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1201/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1202bool LiveIntervals::
1203rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1204                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1205                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1206                 unsigned Slot, int LdSlot,
1207                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1208                 VirtRegMap &vrm,
1209                 const TargetRegisterClass* rc,
1210                 SmallVector<int, 4> &ReMatIds,
1211                 const MachineLoopInfo *loopInfo,
1212                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1213                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1214                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1215  MachineBasicBlock *MBB = MI->getParent();
1216  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1217  bool CanFold = false;
1218 RestartInstruction:
1219  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1220    MachineOperand& mop = MI->getOperand(i);
1221    if (!mop.isReg())
1222      continue;
1223    unsigned Reg = mop.getReg();
1224    unsigned RegI = Reg;
1225    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1226      continue;
1227    if (Reg != li.reg)
1228      continue;
1229
1230    bool TryFold = !DefIsReMat;
1231    bool FoldSS = true; // Default behavior unless it's a remat.
1232    int FoldSlot = Slot;
1233    if (DefIsReMat) {
1234      // If this is the rematerializable definition MI itself and
1235      // all of its uses are rematerialized, simply delete it.
1236      if (MI == ReMatOrigDefMI && CanDelete) {
1237        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1238        DOUT << MI << '\n';
1239        RemoveMachineInstrFromMaps(MI);
1240        vrm.RemoveMachineInstrFromMaps(MI);
1241        MI->eraseFromParent();
1242        break;
1243      }
1244
1245      // If def for this use can't be rematerialized, then try folding.
1246      // If def is rematerializable and it's a load, also try folding.
1247      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1248      if (isLoad) {
1249        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1250        FoldSS = isLoadSS;
1251        FoldSlot = LdSlot;
1252      }
1253    }
1254
1255    // Scan all of the operands of this instruction rewriting operands
1256    // to use NewVReg instead of li.reg as appropriate.  We do this for
1257    // two reasons:
1258    //
1259    //   1. If the instr reads the same spilled vreg multiple times, we
1260    //      want to reuse the NewVReg.
1261    //   2. If the instr is a two-addr instruction, we are required to
1262    //      keep the src/dst regs pinned.
1263    //
1264    // Keep track of whether we replace a use and/or def so that we can
1265    // create the spill interval with the appropriate range.
1266
1267    HasUse = mop.isUse();
1268    HasDef = mop.isDef();
1269    SmallVector<unsigned, 2> Ops;
1270    Ops.push_back(i);
1271    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1272      const MachineOperand &MOj = MI->getOperand(j);
1273      if (!MOj.isReg())
1274        continue;
1275      unsigned RegJ = MOj.getReg();
1276      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1277        continue;
1278      if (RegJ == RegI) {
1279        Ops.push_back(j);
1280        HasUse |= MOj.isUse();
1281        HasDef |= MOj.isDef();
1282      }
1283    }
1284
1285    if (HasUse && !li.liveAt(getUseIndex(index)))
1286      // Must be defined by an implicit def. It should not be spilled. Note,
1287      // this is for correctness reason. e.g.
1288      // 8   %reg1024<def> = IMPLICIT_DEF
1289      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1290      // The live range [12, 14) are not part of the r1024 live interval since
1291      // it's defined by an implicit def. It will not conflicts with live
1292      // interval of r1025. Now suppose both registers are spilled, you can
1293      // easily see a situation where both registers are reloaded before
1294      // the INSERT_SUBREG and both target registers that would overlap.
1295      HasUse = false;
1296
1297    // Update stack slot spill weight if we are splitting.
1298    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1299      if (!TrySplit)
1300      SSWeight += Weight;
1301
1302    // Create a new virtual register for the spill interval.
1303    // Create the new register now so we can map the fold instruction
1304    // to the new register so when it is unfolded we get the correct
1305    // answer.
1306    bool CreatedNewVReg = false;
1307    if (NewVReg == 0) {
1308      NewVReg = mri_->createVirtualRegister(rc);
1309      vrm.grow();
1310      CreatedNewVReg = true;
1311    }
1312
1313    if (!TryFold)
1314      CanFold = false;
1315    else {
1316      // Do not fold load / store here if we are splitting. We'll find an
1317      // optimal point to insert a load / store later.
1318      if (!TrySplit) {
1319        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1320                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1321          // Folding the load/store can completely change the instruction in
1322          // unpredictable ways, rescan it from the beginning.
1323
1324          if (FoldSS) {
1325            // We need to give the new vreg the same stack slot as the
1326            // spilled interval.
1327            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1328          }
1329
1330          HasUse = false;
1331          HasDef = false;
1332          CanFold = false;
1333          if (isRemoved(MI)) {
1334            SSWeight -= Weight;
1335            break;
1336          }
1337          goto RestartInstruction;
1338        }
1339      } else {
1340        // We'll try to fold it later if it's profitable.
1341        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1342      }
1343    }
1344
1345    mop.setReg(NewVReg);
1346    if (mop.isImplicit())
1347      rewriteImplicitOps(li, MI, NewVReg, vrm);
1348
1349    // Reuse NewVReg for other reads.
1350    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1351      MachineOperand &mopj = MI->getOperand(Ops[j]);
1352      mopj.setReg(NewVReg);
1353      if (mopj.isImplicit())
1354        rewriteImplicitOps(li, MI, NewVReg, vrm);
1355    }
1356
1357    if (CreatedNewVReg) {
1358      if (DefIsReMat) {
1359        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1360        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1361          // Each valnum may have its own remat id.
1362          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1363        } else {
1364          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1365        }
1366        if (!CanDelete || (HasUse && HasDef)) {
1367          // If this is a two-addr instruction then its use operands are
1368          // rematerializable but its def is not. It should be assigned a
1369          // stack slot.
1370          vrm.assignVirt2StackSlot(NewVReg, Slot);
1371        }
1372      } else {
1373        vrm.assignVirt2StackSlot(NewVReg, Slot);
1374      }
1375    } else if (HasUse && HasDef &&
1376               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1377      // If this interval hasn't been assigned a stack slot (because earlier
1378      // def is a deleted remat def), do it now.
1379      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1380      vrm.assignVirt2StackSlot(NewVReg, Slot);
1381    }
1382
1383    // Re-matting an instruction with virtual register use. Add the
1384    // register as an implicit use on the use MI.
1385    if (DefIsReMat && ImpUse)
1386      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1387
1388    // create a new register interval for this spill / remat.
1389    LiveInterval &nI = getOrCreateInterval(NewVReg);
1390    if (CreatedNewVReg) {
1391      NewLIs.push_back(&nI);
1392      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1393      if (TrySplit)
1394        vrm.setIsSplitFromReg(NewVReg, li.reg);
1395    }
1396
1397    if (HasUse) {
1398      if (CreatedNewVReg) {
1399        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1400                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1401        DOUT << " +" << LR;
1402        nI.addRange(LR);
1403      } else {
1404        // Extend the split live interval to this def / use.
1405        unsigned End = getUseIndex(index)+1;
1406        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1407                     nI.getValNumInfo(nI.getNumValNums()-1));
1408        DOUT << " +" << LR;
1409        nI.addRange(LR);
1410      }
1411    }
1412    if (HasDef) {
1413      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1414                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1415      DOUT << " +" << LR;
1416      nI.addRange(LR);
1417    }
1418
1419    DOUT << "\t\t\t\tAdded new interval: ";
1420    nI.print(DOUT, tri_);
1421    DOUT << '\n';
1422  }
1423  return CanFold;
1424}
1425bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1426                                   const VNInfo *VNI,
1427                                   MachineBasicBlock *MBB, unsigned Idx) const {
1428  unsigned End = getMBBEndIdx(MBB);
1429  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1430    unsigned KillIdx = VNI->kills[j];
1431    if (KillIdx > Idx && KillIdx < End)
1432      return true;
1433  }
1434  return false;
1435}
1436
1437/// RewriteInfo - Keep track of machine instrs that will be rewritten
1438/// during spilling.
1439namespace {
1440  struct RewriteInfo {
1441    unsigned Index;
1442    MachineInstr *MI;
1443    bool HasUse;
1444    bool HasDef;
1445    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1446      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1447  };
1448
1449  struct RewriteInfoCompare {
1450    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1451      return LHS.Index < RHS.Index;
1452    }
1453  };
1454}
1455
1456void LiveIntervals::
1457rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1458                    LiveInterval::Ranges::const_iterator &I,
1459                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1460                    unsigned Slot, int LdSlot,
1461                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1462                    VirtRegMap &vrm,
1463                    const TargetRegisterClass* rc,
1464                    SmallVector<int, 4> &ReMatIds,
1465                    const MachineLoopInfo *loopInfo,
1466                    BitVector &SpillMBBs,
1467                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1468                    BitVector &RestoreMBBs,
1469                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1470                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1471                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1472  bool AllCanFold = true;
1473  unsigned NewVReg = 0;
1474  unsigned start = getBaseIndex(I->start);
1475  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1476
1477  // First collect all the def / use in this live range that will be rewritten.
1478  // Make sure they are sorted according to instruction index.
1479  std::vector<RewriteInfo> RewriteMIs;
1480  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1481         re = mri_->reg_end(); ri != re; ) {
1482    MachineInstr *MI = &*ri;
1483    MachineOperand &O = ri.getOperand();
1484    ++ri;
1485    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1486    unsigned index = getInstructionIndex(MI);
1487    if (index < start || index >= end)
1488      continue;
1489    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1490      // Must be defined by an implicit def. It should not be spilled. Note,
1491      // this is for correctness reason. e.g.
1492      // 8   %reg1024<def> = IMPLICIT_DEF
1493      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1494      // The live range [12, 14) are not part of the r1024 live interval since
1495      // it's defined by an implicit def. It will not conflicts with live
1496      // interval of r1025. Now suppose both registers are spilled, you can
1497      // easily see a situation where both registers are reloaded before
1498      // the INSERT_SUBREG and both target registers that would overlap.
1499      continue;
1500    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1501  }
1502  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1503
1504  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1505  // Now rewrite the defs and uses.
1506  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1507    RewriteInfo &rwi = RewriteMIs[i];
1508    ++i;
1509    unsigned index = rwi.Index;
1510    bool MIHasUse = rwi.HasUse;
1511    bool MIHasDef = rwi.HasDef;
1512    MachineInstr *MI = rwi.MI;
1513    // If MI def and/or use the same register multiple times, then there
1514    // are multiple entries.
1515    unsigned NumUses = MIHasUse;
1516    while (i != e && RewriteMIs[i].MI == MI) {
1517      assert(RewriteMIs[i].Index == index);
1518      bool isUse = RewriteMIs[i].HasUse;
1519      if (isUse) ++NumUses;
1520      MIHasUse |= isUse;
1521      MIHasDef |= RewriteMIs[i].HasDef;
1522      ++i;
1523    }
1524    MachineBasicBlock *MBB = MI->getParent();
1525
1526    if (ImpUse && MI != ReMatDefMI) {
1527      // Re-matting an instruction with virtual register use. Update the
1528      // register interval's spill weight to HUGE_VALF to prevent it from
1529      // being spilled.
1530      LiveInterval &ImpLi = getInterval(ImpUse);
1531      ImpLi.weight = HUGE_VALF;
1532    }
1533
1534    unsigned MBBId = MBB->getNumber();
1535    unsigned ThisVReg = 0;
1536    if (TrySplit) {
1537      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1538      if (NVI != MBBVRegsMap.end()) {
1539        ThisVReg = NVI->second;
1540        // One common case:
1541        // x = use
1542        // ...
1543        // ...
1544        // def = ...
1545        //     = use
1546        // It's better to start a new interval to avoid artifically
1547        // extend the new interval.
1548        if (MIHasDef && !MIHasUse) {
1549          MBBVRegsMap.erase(MBB->getNumber());
1550          ThisVReg = 0;
1551        }
1552      }
1553    }
1554
1555    bool IsNew = ThisVReg == 0;
1556    if (IsNew) {
1557      // This ends the previous live interval. If all of its def / use
1558      // can be folded, give it a low spill weight.
1559      if (NewVReg && TrySplit && AllCanFold) {
1560        LiveInterval &nI = getOrCreateInterval(NewVReg);
1561        nI.weight /= 10.0F;
1562      }
1563      AllCanFold = true;
1564    }
1565    NewVReg = ThisVReg;
1566
1567    bool HasDef = false;
1568    bool HasUse = false;
1569    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1570                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1571                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1572                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1573                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1574    if (!HasDef && !HasUse)
1575      continue;
1576
1577    AllCanFold &= CanFold;
1578
1579    // Update weight of spill interval.
1580    LiveInterval &nI = getOrCreateInterval(NewVReg);
1581    if (!TrySplit) {
1582      // The spill weight is now infinity as it cannot be spilled again.
1583      nI.weight = HUGE_VALF;
1584      continue;
1585    }
1586
1587    // Keep track of the last def and first use in each MBB.
1588    if (HasDef) {
1589      if (MI != ReMatOrigDefMI || !CanDelete) {
1590        bool HasKill = false;
1591        if (!HasUse)
1592          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1593        else {
1594          // If this is a two-address code, then this index starts a new VNInfo.
1595          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1596          if (VNI)
1597            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1598        }
1599        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1600          SpillIdxes.find(MBBId);
1601        if (!HasKill) {
1602          if (SII == SpillIdxes.end()) {
1603            std::vector<SRInfo> S;
1604            S.push_back(SRInfo(index, NewVReg, true));
1605            SpillIdxes.insert(std::make_pair(MBBId, S));
1606          } else if (SII->second.back().vreg != NewVReg) {
1607            SII->second.push_back(SRInfo(index, NewVReg, true));
1608          } else if ((int)index > SII->second.back().index) {
1609            // If there is an earlier def and this is a two-address
1610            // instruction, then it's not possible to fold the store (which
1611            // would also fold the load).
1612            SRInfo &Info = SII->second.back();
1613            Info.index = index;
1614            Info.canFold = !HasUse;
1615          }
1616          SpillMBBs.set(MBBId);
1617        } else if (SII != SpillIdxes.end() &&
1618                   SII->second.back().vreg == NewVReg &&
1619                   (int)index > SII->second.back().index) {
1620          // There is an earlier def that's not killed (must be two-address).
1621          // The spill is no longer needed.
1622          SII->second.pop_back();
1623          if (SII->second.empty()) {
1624            SpillIdxes.erase(MBBId);
1625            SpillMBBs.reset(MBBId);
1626          }
1627        }
1628      }
1629    }
1630
1631    if (HasUse) {
1632      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1633        SpillIdxes.find(MBBId);
1634      if (SII != SpillIdxes.end() &&
1635          SII->second.back().vreg == NewVReg &&
1636          (int)index > SII->second.back().index)
1637        // Use(s) following the last def, it's not safe to fold the spill.
1638        SII->second.back().canFold = false;
1639      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1640        RestoreIdxes.find(MBBId);
1641      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1642        // If we are splitting live intervals, only fold if it's the first
1643        // use and there isn't another use later in the MBB.
1644        RII->second.back().canFold = false;
1645      else if (IsNew) {
1646        // Only need a reload if there isn't an earlier def / use.
1647        if (RII == RestoreIdxes.end()) {
1648          std::vector<SRInfo> Infos;
1649          Infos.push_back(SRInfo(index, NewVReg, true));
1650          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1651        } else {
1652          RII->second.push_back(SRInfo(index, NewVReg, true));
1653        }
1654        RestoreMBBs.set(MBBId);
1655      }
1656    }
1657
1658    // Update spill weight.
1659    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1660    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1661  }
1662
1663  if (NewVReg && TrySplit && AllCanFold) {
1664    // If all of its def / use can be folded, give it a low spill weight.
1665    LiveInterval &nI = getOrCreateInterval(NewVReg);
1666    nI.weight /= 10.0F;
1667  }
1668}
1669
1670bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1671                        BitVector &RestoreMBBs,
1672                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1673  if (!RestoreMBBs[Id])
1674    return false;
1675  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1676  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1677    if (Restores[i].index == index &&
1678        Restores[i].vreg == vr &&
1679        Restores[i].canFold)
1680      return true;
1681  return false;
1682}
1683
1684void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1685                        BitVector &RestoreMBBs,
1686                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1687  if (!RestoreMBBs[Id])
1688    return;
1689  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1690  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1691    if (Restores[i].index == index && Restores[i].vreg)
1692      Restores[i].index = -1;
1693}
1694
1695/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1696/// spilled and create empty intervals for their uses.
1697void
1698LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1699                                    const TargetRegisterClass* rc,
1700                                    std::vector<LiveInterval*> &NewLIs) {
1701  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1702         re = mri_->reg_end(); ri != re; ) {
1703    MachineOperand &O = ri.getOperand();
1704    MachineInstr *MI = &*ri;
1705    ++ri;
1706    if (O.isDef()) {
1707      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1708             "Register def was not rewritten?");
1709      RemoveMachineInstrFromMaps(MI);
1710      vrm.RemoveMachineInstrFromMaps(MI);
1711      MI->eraseFromParent();
1712    } else {
1713      // This must be an use of an implicit_def so it's not part of the live
1714      // interval. Create a new empty live interval for it.
1715      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1716      unsigned NewVReg = mri_->createVirtualRegister(rc);
1717      vrm.grow();
1718      vrm.setIsImplicitlyDefined(NewVReg);
1719      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1720      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1721        MachineOperand &MO = MI->getOperand(i);
1722        if (MO.isReg() && MO.getReg() == li.reg)
1723          MO.setReg(NewVReg);
1724      }
1725    }
1726  }
1727}
1728
1729namespace {
1730  struct LISorter {
1731    bool operator()(LiveInterval* A, LiveInterval* B) {
1732      return A->beginNumber() < B->beginNumber();
1733    }
1734  };
1735}
1736
1737std::vector<LiveInterval*> LiveIntervals::
1738addIntervalsForSpillsFast(const LiveInterval &li,
1739                          const MachineLoopInfo *loopInfo,
1740                          VirtRegMap &vrm, float& SSWeight) {
1741  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1742
1743  std::vector<LiveInterval*> added;
1744
1745  assert(li.weight != HUGE_VALF &&
1746         "attempt to spill already spilled interval!");
1747
1748  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1749  DEBUG(li.dump());
1750  DOUT << '\n';
1751
1752  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1753
1754  SSWeight = 0.0f;
1755
1756  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1757  while (RI != mri_->reg_end()) {
1758    MachineInstr* MI = &*RI;
1759
1760    SmallVector<unsigned, 2> Indices;
1761    bool HasUse = false;
1762    bool HasDef = false;
1763
1764    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1765      MachineOperand& mop = MI->getOperand(i);
1766      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1767
1768      HasUse |= MI->getOperand(i).isUse();
1769      HasDef |= MI->getOperand(i).isDef();
1770
1771      Indices.push_back(i);
1772    }
1773
1774    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1775                              Indices, true, slot, li.reg)) {
1776      unsigned NewVReg = mri_->createVirtualRegister(rc);
1777      vrm.grow();
1778      vrm.assignVirt2StackSlot(NewVReg, slot);
1779
1780      // create a new register for this spill
1781      LiveInterval &nI = getOrCreateInterval(NewVReg);
1782
1783      // the spill weight is now infinity as it
1784      // cannot be spilled again
1785      nI.weight = HUGE_VALF;
1786
1787      // Rewrite register operands to use the new vreg.
1788      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1789           E = Indices.end(); I != E; ++I) {
1790        MI->getOperand(*I).setReg(NewVReg);
1791
1792        if (MI->getOperand(*I).isUse())
1793          MI->getOperand(*I).setIsKill(true);
1794      }
1795
1796      // Fill in  the new live interval.
1797      unsigned index = getInstructionIndex(MI);
1798      if (HasUse) {
1799        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1800                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1801        DOUT << " +" << LR;
1802        nI.addRange(LR);
1803        vrm.addRestorePoint(NewVReg, MI);
1804      }
1805      if (HasDef) {
1806        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1807                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1808        DOUT << " +" << LR;
1809        nI.addRange(LR);
1810        vrm.addSpillPoint(NewVReg, true, MI);
1811      }
1812
1813      added.push_back(&nI);
1814
1815      DOUT << "\t\t\t\tadded new interval: ";
1816      DEBUG(nI.dump());
1817      DOUT << '\n';
1818
1819      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1820      if (HasUse) {
1821        if (HasDef)
1822          SSWeight += getSpillWeight(true, true, loopDepth);
1823        else
1824          SSWeight += getSpillWeight(false, true, loopDepth);
1825      } else
1826        SSWeight += getSpillWeight(true, false, loopDepth);
1827    }
1828
1829
1830    RI = mri_->reg_begin(li.reg);
1831  }
1832
1833  // Clients expect the new intervals to be returned in sorted order.
1834  std::sort(added.begin(), added.end(), LISorter());
1835
1836  return added;
1837}
1838
1839std::vector<LiveInterval*> LiveIntervals::
1840addIntervalsForSpills(const LiveInterval &li,
1841                      SmallVectorImpl<LiveInterval*> &SpillIs,
1842                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1843                      float &SSWeight) {
1844
1845  if (EnableFastSpilling)
1846    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1847
1848  assert(li.weight != HUGE_VALF &&
1849         "attempt to spill already spilled interval!");
1850
1851  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1852  li.print(DOUT, tri_);
1853  DOUT << '\n';
1854
1855  // Spill slot weight.
1856  SSWeight = 0.0f;
1857
1858  // Each bit specify whether a spill is required in the MBB.
1859  BitVector SpillMBBs(mf_->getNumBlockIDs());
1860  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1861  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1862  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1863  DenseMap<unsigned,unsigned> MBBVRegsMap;
1864  std::vector<LiveInterval*> NewLIs;
1865  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1866
1867  unsigned NumValNums = li.getNumValNums();
1868  SmallVector<MachineInstr*, 4> ReMatDefs;
1869  ReMatDefs.resize(NumValNums, NULL);
1870  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1871  ReMatOrigDefs.resize(NumValNums, NULL);
1872  SmallVector<int, 4> ReMatIds;
1873  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1874  BitVector ReMatDelete(NumValNums);
1875  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1876
1877  // Spilling a split live interval. It cannot be split any further. Also,
1878  // it's also guaranteed to be a single val# / range interval.
1879  if (vrm.getPreSplitReg(li.reg)) {
1880    vrm.setIsSplitFromReg(li.reg, 0);
1881    // Unset the split kill marker on the last use.
1882    unsigned KillIdx = vrm.getKillPoint(li.reg);
1883    if (KillIdx) {
1884      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1885      assert(KillMI && "Last use disappeared?");
1886      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1887      assert(KillOp != -1 && "Last use disappeared?");
1888      KillMI->getOperand(KillOp).setIsKill(false);
1889    }
1890    vrm.removeKillPoint(li.reg);
1891    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1892    Slot = vrm.getStackSlot(li.reg);
1893    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1894    MachineInstr *ReMatDefMI = DefIsReMat ?
1895      vrm.getReMaterializedMI(li.reg) : NULL;
1896    int LdSlot = 0;
1897    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1898    bool isLoad = isLoadSS ||
1899      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1900    bool IsFirstRange = true;
1901    for (LiveInterval::Ranges::const_iterator
1902           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1903      // If this is a split live interval with multiple ranges, it means there
1904      // are two-address instructions that re-defined the value. Only the
1905      // first def can be rematerialized!
1906      if (IsFirstRange) {
1907        // Note ReMatOrigDefMI has already been deleted.
1908        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1909                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1910                             false, vrm, rc, ReMatIds, loopInfo,
1911                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1912                             MBBVRegsMap, NewLIs, SSWeight);
1913      } else {
1914        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1915                             Slot, 0, false, false, false,
1916                             false, vrm, rc, ReMatIds, loopInfo,
1917                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1918                             MBBVRegsMap, NewLIs, SSWeight);
1919      }
1920      IsFirstRange = false;
1921    }
1922
1923    SSWeight = 0.0f;  // Already accounted for when split.
1924    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1925    return NewLIs;
1926  }
1927
1928  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1929  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1930    TrySplit = false;
1931  if (TrySplit)
1932    ++numSplits;
1933  bool NeedStackSlot = false;
1934  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1935       i != e; ++i) {
1936    const VNInfo *VNI = *i;
1937    unsigned VN = VNI->id;
1938    unsigned DefIdx = VNI->def;
1939    if (DefIdx == ~1U)
1940      continue; // Dead val#.
1941    // Is the def for the val# rematerializable?
1942    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1943      ? 0 : getInstructionFromIndex(DefIdx);
1944    bool dummy;
1945    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1946      // Remember how to remat the def of this val#.
1947      ReMatOrigDefs[VN] = ReMatDefMI;
1948      // Original def may be modified so we have to make a copy here.
1949      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1950      ClonedMIs.push_back(Clone);
1951      ReMatDefs[VN] = Clone;
1952
1953      bool CanDelete = true;
1954      if (VNI->hasPHIKill) {
1955        // A kill is a phi node, not all of its uses can be rematerialized.
1956        // It must not be deleted.
1957        CanDelete = false;
1958        // Need a stack slot if there is any live range where uses cannot be
1959        // rematerialized.
1960        NeedStackSlot = true;
1961      }
1962      if (CanDelete)
1963        ReMatDelete.set(VN);
1964    } else {
1965      // Need a stack slot if there is any live range where uses cannot be
1966      // rematerialized.
1967      NeedStackSlot = true;
1968    }
1969  }
1970
1971  // One stack slot per live interval.
1972  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1973    Slot = vrm.assignVirt2StackSlot(li.reg);
1974
1975  // Create new intervals and rewrite defs and uses.
1976  for (LiveInterval::Ranges::const_iterator
1977         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1978    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1979    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1980    bool DefIsReMat = ReMatDefMI != NULL;
1981    bool CanDelete = ReMatDelete[I->valno->id];
1982    int LdSlot = 0;
1983    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1984    bool isLoad = isLoadSS ||
1985      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1986    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1987                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1988                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1989                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1990                               MBBVRegsMap, NewLIs, SSWeight);
1991  }
1992
1993  // Insert spills / restores if we are splitting.
1994  if (!TrySplit) {
1995    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1996    return NewLIs;
1997  }
1998
1999  SmallPtrSet<LiveInterval*, 4> AddedKill;
2000  SmallVector<unsigned, 2> Ops;
2001  if (NeedStackSlot) {
2002    int Id = SpillMBBs.find_first();
2003    while (Id != -1) {
2004      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2005      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2006      std::vector<SRInfo> &spills = SpillIdxes[Id];
2007      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2008        int index = spills[i].index;
2009        unsigned VReg = spills[i].vreg;
2010        LiveInterval &nI = getOrCreateInterval(VReg);
2011        bool isReMat = vrm.isReMaterialized(VReg);
2012        MachineInstr *MI = getInstructionFromIndex(index);
2013        bool CanFold = false;
2014        bool FoundUse = false;
2015        Ops.clear();
2016        if (spills[i].canFold) {
2017          CanFold = true;
2018          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2019            MachineOperand &MO = MI->getOperand(j);
2020            if (!MO.isReg() || MO.getReg() != VReg)
2021              continue;
2022
2023            Ops.push_back(j);
2024            if (MO.isDef())
2025              continue;
2026            if (isReMat ||
2027                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2028                                                RestoreMBBs, RestoreIdxes))) {
2029              // MI has two-address uses of the same register. If the use
2030              // isn't the first and only use in the BB, then we can't fold
2031              // it. FIXME: Move this to rewriteInstructionsForSpills.
2032              CanFold = false;
2033              break;
2034            }
2035            FoundUse = true;
2036          }
2037        }
2038        // Fold the store into the def if possible.
2039        bool Folded = false;
2040        if (CanFold && !Ops.empty()) {
2041          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2042            Folded = true;
2043            if (FoundUse > 0) {
2044              // Also folded uses, do not issue a load.
2045              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2046              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2047            }
2048            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2049          }
2050        }
2051
2052        // Otherwise tell the spiller to issue a spill.
2053        if (!Folded) {
2054          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2055          bool isKill = LR->end == getStoreIndex(index);
2056          if (!MI->registerDefIsDead(nI.reg))
2057            // No need to spill a dead def.
2058            vrm.addSpillPoint(VReg, isKill, MI);
2059          if (isKill)
2060            AddedKill.insert(&nI);
2061        }
2062
2063        // Update spill slot weight.
2064        if (!isReMat)
2065          SSWeight += getSpillWeight(true, false, loopDepth);
2066      }
2067      Id = SpillMBBs.find_next(Id);
2068    }
2069  }
2070
2071  int Id = RestoreMBBs.find_first();
2072  while (Id != -1) {
2073    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2074    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2075
2076    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2077    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2078      int index = restores[i].index;
2079      if (index == -1)
2080        continue;
2081      unsigned VReg = restores[i].vreg;
2082      LiveInterval &nI = getOrCreateInterval(VReg);
2083      bool isReMat = vrm.isReMaterialized(VReg);
2084      MachineInstr *MI = getInstructionFromIndex(index);
2085      bool CanFold = false;
2086      Ops.clear();
2087      if (restores[i].canFold) {
2088        CanFold = true;
2089        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2090          MachineOperand &MO = MI->getOperand(j);
2091          if (!MO.isReg() || MO.getReg() != VReg)
2092            continue;
2093
2094          if (MO.isDef()) {
2095            // If this restore were to be folded, it would have been folded
2096            // already.
2097            CanFold = false;
2098            break;
2099          }
2100          Ops.push_back(j);
2101        }
2102      }
2103
2104      // Fold the load into the use if possible.
2105      bool Folded = false;
2106      if (CanFold && !Ops.empty()) {
2107        if (!isReMat)
2108          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2109        else {
2110          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2111          int LdSlot = 0;
2112          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2113          // If the rematerializable def is a load, also try to fold it.
2114          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2115            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2116                                          Ops, isLoadSS, LdSlot, VReg);
2117          if (!Folded) {
2118            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2119            if (ImpUse) {
2120              // Re-matting an instruction with virtual register use. Add the
2121              // register as an implicit use on the use MI and update the register
2122              // interval's spill weight to HUGE_VALF to prevent it from being
2123              // spilled.
2124              LiveInterval &ImpLi = getInterval(ImpUse);
2125              ImpLi.weight = HUGE_VALF;
2126              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2127            }
2128          }
2129        }
2130      }
2131      // If folding is not possible / failed, then tell the spiller to issue a
2132      // load / rematerialization for us.
2133      if (Folded)
2134        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2135      else
2136        vrm.addRestorePoint(VReg, MI);
2137
2138      // Update spill slot weight.
2139      if (!isReMat)
2140        SSWeight += getSpillWeight(false, true, loopDepth);
2141    }
2142    Id = RestoreMBBs.find_next(Id);
2143  }
2144
2145  // Finalize intervals: add kills, finalize spill weights, and filter out
2146  // dead intervals.
2147  std::vector<LiveInterval*> RetNewLIs;
2148  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2149    LiveInterval *LI = NewLIs[i];
2150    if (!LI->empty()) {
2151      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2152      if (!AddedKill.count(LI)) {
2153        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2154        unsigned LastUseIdx = getBaseIndex(LR->end);
2155        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2156        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2157        assert(UseIdx != -1);
2158        if (LastUse->getOperand(UseIdx).isImplicit() ||
2159            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2160          LastUse->getOperand(UseIdx).setIsKill();
2161          vrm.addKillPoint(LI->reg, LastUseIdx);
2162        }
2163      }
2164      RetNewLIs.push_back(LI);
2165    }
2166  }
2167
2168  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2169  return RetNewLIs;
2170}
2171
2172/// hasAllocatableSuperReg - Return true if the specified physical register has
2173/// any super register that's allocatable.
2174bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2175  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2176    if (allocatableRegs_[*AS] && hasInterval(*AS))
2177      return true;
2178  return false;
2179}
2180
2181/// getRepresentativeReg - Find the largest super register of the specified
2182/// physical register.
2183unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2184  // Find the largest super-register that is allocatable.
2185  unsigned BestReg = Reg;
2186  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2187    unsigned SuperReg = *AS;
2188    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2189      BestReg = SuperReg;
2190      break;
2191    }
2192  }
2193  return BestReg;
2194}
2195
2196/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2197/// specified interval that conflicts with the specified physical register.
2198unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2199                                                   unsigned PhysReg) const {
2200  unsigned NumConflicts = 0;
2201  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2202  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2203         E = mri_->reg_end(); I != E; ++I) {
2204    MachineOperand &O = I.getOperand();
2205    MachineInstr *MI = O.getParent();
2206    unsigned Index = getInstructionIndex(MI);
2207    if (pli.liveAt(Index))
2208      ++NumConflicts;
2209  }
2210  return NumConflicts;
2211}
2212
2213/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2214/// around all defs and uses of the specified interval.
2215void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2216                                            unsigned PhysReg, VirtRegMap &vrm) {
2217  unsigned SpillReg = getRepresentativeReg(PhysReg);
2218
2219  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2220    // If there are registers which alias PhysReg, but which are not a
2221    // sub-register of the chosen representative super register. Assert
2222    // since we can't handle it yet.
2223    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2224           tri_->isSuperRegister(*AS, SpillReg));
2225
2226  LiveInterval &pli = getInterval(SpillReg);
2227  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2228  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2229         E = mri_->reg_end(); I != E; ++I) {
2230    MachineOperand &O = I.getOperand();
2231    MachineInstr *MI = O.getParent();
2232    if (SeenMIs.count(MI))
2233      continue;
2234    SeenMIs.insert(MI);
2235    unsigned Index = getInstructionIndex(MI);
2236    if (pli.liveAt(Index)) {
2237      vrm.addEmergencySpill(SpillReg, MI);
2238      unsigned StartIdx = getLoadIndex(Index);
2239      unsigned EndIdx = getStoreIndex(Index)+1;
2240      if (pli.isInOneLiveRange(StartIdx, EndIdx))
2241        pli.removeRange(StartIdx, EndIdx);
2242      else {
2243        cerr << "Ran out of registers during register allocation!\n";
2244        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2245          cerr << "Please check your inline asm statement for invalid "
2246               << "constraints:\n";
2247          MI->print(cerr.stream(), tm_);
2248        }
2249        exit(1);
2250      }
2251      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2252        if (!hasInterval(*AS))
2253          continue;
2254        LiveInterval &spli = getInterval(*AS);
2255        if (spli.liveAt(Index))
2256          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2257      }
2258    }
2259  }
2260}
2261
2262LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2263                                                   MachineInstr* startInst) {
2264  LiveInterval& Interval = getOrCreateInterval(reg);
2265  VNInfo* VN = Interval.getNextValue(
2266            getInstructionIndex(startInst) + InstrSlots::DEF,
2267            startInst, getVNInfoAllocator());
2268  VN->hasPHIKill = true;
2269  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2270  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2271               getMBBEndIdx(startInst->getParent()) + 1, VN);
2272  Interval.addRange(LR);
2273
2274  return LR;
2275}
2276