LiveIntervalAnalysis.cpp revision d21c31610c4a10e9e638f4221b5df89ea932e258
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/STLExtras.h"
38#include <algorithm>
39#include <limits>
40#include <cmath>
41using namespace llvm;
42
43// Hidden options for help debugging.
44static cl::opt<bool> DisableReMat("disable-rematerialization",
45                                  cl::init(false), cl::Hidden);
46
47static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
48                               cl::init(true), cl::Hidden);
49static cl::opt<int> SplitLimit("split-limit",
50                               cl::init(-1), cl::Hidden);
51
52static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53
54static cl::opt<bool> EnableFastSpilling("fast-spill",
55                                        cl::init(false), cl::Hidden);
56
57STATISTIC(numIntervals, "Number of original intervals");
58STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
59STATISTIC(numSplits   , "Number of intervals split");
60
61char LiveIntervals::ID = 0;
62static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63
64void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  MachineFunctionPass::getAnalysisUsage(AU);
79}
80
81void LiveIntervals::releaseMemory() {
82  // Free the live intervals themselves.
83  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84       E = r2iMap_.end(); I != E; ++I)
85    delete I->second;
86
87  MBB2IdxMap.clear();
88  Idx2MBBMap.clear();
89  mi2iMap_.clear();
90  i2miMap_.clear();
91  r2iMap_.clear();
92  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93  VNInfoAllocator.Reset();
94  while (!ClonedMIs.empty()) {
95    MachineInstr *MI = ClonedMIs.back();
96    ClonedMIs.pop_back();
97    mf_->DeleteMachineInstr(MI);
98  }
99}
100
101void LiveIntervals::computeNumbering() {
102  Index2MiMap OldI2MI = i2miMap_;
103  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
104
105  Idx2MBBMap.clear();
106  MBB2IdxMap.clear();
107  mi2iMap_.clear();
108  i2miMap_.clear();
109
110  FunctionSize = 0;
111
112  // Number MachineInstrs and MachineBasicBlocks.
113  // Initialize MBB indexes to a sentinal.
114  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115
116  unsigned MIIndex = 0;
117  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118       MBB != E; ++MBB) {
119    unsigned StartIdx = MIIndex;
120
121    // Insert an empty slot at the beginning of each block.
122    MIIndex += InstrSlots::NUM;
123    i2miMap_.push_back(0);
124
125    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126         I != E; ++I) {
127      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128      assert(inserted && "multiple MachineInstr -> index mappings");
129      inserted = true;
130      i2miMap_.push_back(I);
131      MIIndex += InstrSlots::NUM;
132      FunctionSize++;
133
134      // Insert max(1, numdefs) empty slots after every instruction.
135      unsigned Slots = I->getDesc().getNumDefs();
136      if (Slots == 0)
137        Slots = 1;
138      MIIndex += InstrSlots::NUM * Slots;
139      while (Slots--)
140        i2miMap_.push_back(0);
141    }
142
143    // Set the MBB2IdxMap entry for this MBB.
144    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
145    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146  }
147  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148
149  if (!OldI2MI.empty())
150    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
151      for (LiveInterval::iterator LI = OI->second->begin(),
152           LE = OI->second->end(); LI != LE; ++LI) {
153
154        // Remap the start index of the live range to the corresponding new
155        // number, or our best guess at what it _should_ correspond to if the
156        // original instruction has been erased.  This is either the following
157        // instruction or its predecessor.
158        unsigned index = LI->start / InstrSlots::NUM;
159        unsigned offset = LI->start % InstrSlots::NUM;
160        if (offset == InstrSlots::LOAD) {
161          std::vector<IdxMBBPair>::const_iterator I =
162                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
163          // Take the pair containing the index
164          std::vector<IdxMBBPair>::const_iterator J =
165                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166
167          LI->start = getMBBStartIdx(J->second);
168        } else {
169          LI->start = mi2iMap_[OldI2MI[index]] + offset;
170        }
171
172        // Remap the ending index in the same way that we remapped the start,
173        // except for the final step where we always map to the immediately
174        // following instruction.
175        index = (LI->end - 1) / InstrSlots::NUM;
176        offset  = LI->end % InstrSlots::NUM;
177        if (offset == InstrSlots::LOAD) {
178          // VReg dies at end of block.
179          std::vector<IdxMBBPair>::const_iterator I =
180                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
181          --I;
182
183          LI->end = getMBBEndIdx(I->second) + 1;
184        } else {
185          unsigned idx = index;
186          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187
188          if (index != OldI2MI.size())
189            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190          else
191            LI->end = InstrSlots::NUM * i2miMap_.size();
192        }
193      }
194
195      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
196           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
197        VNInfo* vni = *VNI;
198
199        // Remap the VNInfo def index, which works the same as the
200        // start indices above. VN's with special sentinel defs
201        // don't need to be remapped.
202        if (vni->isDefAccurate() && !vni->isUnused()) {
203          unsigned index = vni->def / InstrSlots::NUM;
204          unsigned offset = vni->def % InstrSlots::NUM;
205          if (offset == InstrSlots::LOAD) {
206            std::vector<IdxMBBPair>::const_iterator I =
207                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
208            // Take the pair containing the index
209            std::vector<IdxMBBPair>::const_iterator J =
210                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211
212            vni->def = getMBBStartIdx(J->second);
213          } else {
214            vni->def = mi2iMap_[OldI2MI[index]] + offset;
215          }
216        }
217
218        // Remap the VNInfo kill indices, which works the same as
219        // the end indices above.
220        for (size_t i = 0; i < vni->kills.size(); ++i) {
221          // PHI kills don't need to be remapped.
222          if (!vni->kills[i]) continue;
223
224          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
225          unsigned offset = vni->kills[i] % InstrSlots::NUM;
226          if (offset == InstrSlots::LOAD) {
227            std::vector<IdxMBBPair>::const_iterator I =
228             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
229            --I;
230
231            vni->kills[i] = getMBBEndIdx(I->second);
232          } else {
233            unsigned idx = index;
234            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235
236            if (index != OldI2MI.size())
237              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
238                              (idx == index ? offset : 0);
239            else
240              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
241          }
242        }
243      }
244    }
245}
246
247void LiveIntervals::scaleNumbering(int factor) {
248  // Need to
249  //  * scale MBB begin and end points
250  //  * scale all ranges.
251  //  * Update VNI structures.
252  //  * Scale instruction numberings
253
254  // Scale the MBB indices.
255  Idx2MBBMap.clear();
256  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
257       MBB != MBBE; ++MBB) {
258    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
259    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
260    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
261    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
262  }
263  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
264
265  // Scale the intervals.
266  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
267    LI->second->scaleNumbering(factor);
268  }
269
270  // Scale MachineInstrs.
271  Mi2IndexMap oldmi2iMap = mi2iMap_;
272  unsigned highestSlot = 0;
273  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
274       MI != ME; ++MI) {
275    unsigned newSlot = InstrSlots::scale(MI->second, factor);
276    mi2iMap_[MI->first] = newSlot;
277    highestSlot = std::max(highestSlot, newSlot);
278  }
279
280  i2miMap_.clear();
281  i2miMap_.resize(highestSlot + 1);
282  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
283       MI != ME; ++MI) {
284    i2miMap_[MI->second] = MI->first;
285  }
286
287}
288
289
290/// runOnMachineFunction - Register allocate the whole function
291///
292bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
293  mf_ = &fn;
294  mri_ = &mf_->getRegInfo();
295  tm_ = &fn.getTarget();
296  tri_ = tm_->getRegisterInfo();
297  tii_ = tm_->getInstrInfo();
298  aa_ = &getAnalysis<AliasAnalysis>();
299  lv_ = &getAnalysis<LiveVariables>();
300  allocatableRegs_ = tri_->getAllocatableSet(fn);
301
302  computeNumbering();
303  computeIntervals();
304
305  numIntervals += getNumIntervals();
306
307  DEBUG(dump());
308  return true;
309}
310
311/// print - Implement the dump method.
312void LiveIntervals::print(std::ostream &O, const Module* ) const {
313  O << "********** INTERVALS **********\n";
314  for (const_iterator I = begin(), E = end(); I != E; ++I) {
315    I->second->print(O, tri_);
316    O << "\n";
317  }
318
319  O << "********** MACHINEINSTRS **********\n";
320  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
321       mbbi != mbbe; ++mbbi) {
322    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
323    for (MachineBasicBlock::iterator mii = mbbi->begin(),
324           mie = mbbi->end(); mii != mie; ++mii) {
325      O << getInstructionIndex(mii) << '\t' << *mii;
326    }
327  }
328}
329
330/// conflictsWithPhysRegDef - Returns true if the specified register
331/// is defined during the duration of the specified interval.
332bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
333                                            VirtRegMap &vrm, unsigned reg) {
334  for (LiveInterval::Ranges::const_iterator
335         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
336    for (unsigned index = getBaseIndex(I->start),
337           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
338         index += InstrSlots::NUM) {
339      // skip deleted instructions
340      while (index != end && !getInstructionFromIndex(index))
341        index += InstrSlots::NUM;
342      if (index == end) break;
343
344      MachineInstr *MI = getInstructionFromIndex(index);
345      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
346      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
347        if (SrcReg == li.reg || DstReg == li.reg)
348          continue;
349      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
350        MachineOperand& mop = MI->getOperand(i);
351        if (!mop.isReg())
352          continue;
353        unsigned PhysReg = mop.getReg();
354        if (PhysReg == 0 || PhysReg == li.reg)
355          continue;
356        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
357          if (!vrm.hasPhys(PhysReg))
358            continue;
359          PhysReg = vrm.getPhys(PhysReg);
360        }
361        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
362          return true;
363      }
364    }
365  }
366
367  return false;
368}
369
370/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
371/// it can check use as well.
372bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
373                                            unsigned Reg, bool CheckUse,
374                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
375  for (LiveInterval::Ranges::const_iterator
376         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
377    for (unsigned index = getBaseIndex(I->start),
378           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
379         index += InstrSlots::NUM) {
380      // Skip deleted instructions.
381      MachineInstr *MI = 0;
382      while (index != end) {
383        MI = getInstructionFromIndex(index);
384        if (MI)
385          break;
386        index += InstrSlots::NUM;
387      }
388      if (index == end) break;
389
390      if (JoinedCopies.count(MI))
391        continue;
392      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
393        MachineOperand& MO = MI->getOperand(i);
394        if (!MO.isReg())
395          continue;
396        if (MO.isUse() && !CheckUse)
397          continue;
398        unsigned PhysReg = MO.getReg();
399        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
400          continue;
401        if (tri_->isSubRegister(Reg, PhysReg))
402          return true;
403      }
404    }
405  }
406
407  return false;
408}
409
410
411void LiveIntervals::printRegName(unsigned reg) const {
412  if (TargetRegisterInfo::isPhysicalRegister(reg))
413    cerr << tri_->getName(reg);
414  else
415    cerr << "%reg" << reg;
416}
417
418void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
419                                             MachineBasicBlock::iterator mi,
420                                             unsigned MIIdx, MachineOperand& MO,
421                                             unsigned MOIdx,
422                                             LiveInterval &interval) {
423  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
424  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
425
426  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
427    DOUT << "is a implicit_def\n";
428    return;
429  }
430
431  // Virtual registers may be defined multiple times (due to phi
432  // elimination and 2-addr elimination).  Much of what we do only has to be
433  // done once for the vreg.  We use an empty interval to detect the first
434  // time we see a vreg.
435  if (interval.empty()) {
436    // Get the Idx of the defining instructions.
437    unsigned defIndex = getDefIndex(MIIdx);
438    // Earlyclobbers move back one.
439    if (MO.isEarlyClobber())
440      defIndex = getUseIndex(MIIdx);
441    VNInfo *ValNo;
442    MachineInstr *CopyMI = NULL;
443    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
444    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
445        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
446        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
447        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
448      CopyMI = mi;
449    // Earlyclobbers move back one.
450    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
451
452    assert(ValNo->id == 0 && "First value in interval is not 0?");
453
454    // Loop over all of the blocks that the vreg is defined in.  There are
455    // two cases we have to handle here.  The most common case is a vreg
456    // whose lifetime is contained within a basic block.  In this case there
457    // will be a single kill, in MBB, which comes after the definition.
458    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
459      // FIXME: what about dead vars?
460      unsigned killIdx;
461      if (vi.Kills[0] != mi)
462        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
463      else
464        killIdx = defIndex+1;
465
466      // If the kill happens after the definition, we have an intra-block
467      // live range.
468      if (killIdx > defIndex) {
469        assert(vi.AliveBlocks.empty() &&
470               "Shouldn't be alive across any blocks!");
471        LiveRange LR(defIndex, killIdx, ValNo);
472        interval.addRange(LR);
473        DOUT << " +" << LR << "\n";
474        interval.addKill(ValNo, killIdx);
475        return;
476      }
477    }
478
479    // The other case we handle is when a virtual register lives to the end
480    // of the defining block, potentially live across some blocks, then is
481    // live into some number of blocks, but gets killed.  Start by adding a
482    // range that goes from this definition to the end of the defining block.
483    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
484    DOUT << " +" << NewLR;
485    interval.addRange(NewLR);
486
487    // Iterate over all of the blocks that the variable is completely
488    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
489    // live interval.
490    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
491             E = vi.AliveBlocks.end(); I != E; ++I) {
492      LiveRange LR(getMBBStartIdx(*I),
493                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
494                   ValNo);
495      interval.addRange(LR);
496      DOUT << " +" << LR;
497    }
498
499    // Finally, this virtual register is live from the start of any killing
500    // block to the 'use' slot of the killing instruction.
501    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
502      MachineInstr *Kill = vi.Kills[i];
503      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
504      LiveRange LR(getMBBStartIdx(Kill->getParent()),
505                   killIdx, ValNo);
506      interval.addRange(LR);
507      interval.addKill(ValNo, killIdx);
508      DOUT << " +" << LR;
509    }
510
511  } else {
512    // If this is the second time we see a virtual register definition, it
513    // must be due to phi elimination or two addr elimination.  If this is
514    // the result of two address elimination, then the vreg is one of the
515    // def-and-use register operand.
516    if (mi->isRegTiedToUseOperand(MOIdx)) {
517      // If this is a two-address definition, then we have already processed
518      // the live range.  The only problem is that we didn't realize there
519      // are actually two values in the live interval.  Because of this we
520      // need to take the LiveRegion that defines this register and split it
521      // into two values.
522      assert(interval.containsOneValue());
523      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
524      unsigned RedefIndex = getDefIndex(MIIdx);
525      if (MO.isEarlyClobber())
526        RedefIndex = getUseIndex(MIIdx);
527
528      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
529      VNInfo *OldValNo = OldLR->valno;
530
531      // Delete the initial value, which should be short and continuous,
532      // because the 2-addr copy must be in the same MBB as the redef.
533      interval.removeRange(DefIndex, RedefIndex);
534
535      // Two-address vregs should always only be redefined once.  This means
536      // that at this point, there should be exactly one value number in it.
537      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
538
539      // The new value number (#1) is defined by the instruction we claimed
540      // defined value #0.
541      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
542                                            false, // update at *
543                                            VNInfoAllocator);
544      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
545
546      // Value#0 is now defined by the 2-addr instruction.
547      OldValNo->def  = RedefIndex;
548      OldValNo->copy = 0;
549      if (MO.isEarlyClobber())
550        OldValNo->setHasRedefByEC(true);
551
552      // Add the new live interval which replaces the range for the input copy.
553      LiveRange LR(DefIndex, RedefIndex, ValNo);
554      DOUT << " replace range with " << LR;
555      interval.addRange(LR);
556      interval.addKill(ValNo, RedefIndex);
557
558      // If this redefinition is dead, we need to add a dummy unit live
559      // range covering the def slot.
560      if (MO.isDead())
561        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
562
563      DOUT << " RESULT: ";
564      interval.print(DOUT, tri_);
565
566    } else {
567      // Otherwise, this must be because of phi elimination.  If this is the
568      // first redefinition of the vreg that we have seen, go back and change
569      // the live range in the PHI block to be a different value number.
570      if (interval.containsOneValue()) {
571        assert(vi.Kills.size() == 1 &&
572               "PHI elimination vreg should have one kill, the PHI itself!");
573
574        // Remove the old range that we now know has an incorrect number.
575        VNInfo *VNI = interval.getValNumInfo(0);
576        MachineInstr *Killer = vi.Kills[0];
577        unsigned Start = getMBBStartIdx(Killer->getParent());
578        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
579        DOUT << " Removing [" << Start << "," << End << "] from: ";
580        interval.print(DOUT, tri_); DOUT << "\n";
581        interval.removeRange(Start, End);
582        VNI->setHasPHIKill(true);
583        DOUT << " RESULT: "; interval.print(DOUT, tri_);
584
585        // Replace the interval with one of a NEW value number.  Note that this
586        // value number isn't actually defined by an instruction, weird huh? :)
587        LiveRange LR(Start, End, interval.getNextValue(Start, 0, false, VNInfoAllocator));
588        LR.valno->setIsPHIDef(true);
589        DOUT << " replace range with " << LR;
590        interval.addRange(LR);
591        interval.addKill(LR.valno, End);
592        DOUT << " RESULT: "; interval.print(DOUT, tri_);
593      }
594
595      // In the case of PHI elimination, each variable definition is only
596      // live until the end of the block.  We've already taken care of the
597      // rest of the live range.
598      unsigned defIndex = getDefIndex(MIIdx);
599      if (MO.isEarlyClobber())
600        defIndex = getUseIndex(MIIdx);
601
602      VNInfo *ValNo;
603      MachineInstr *CopyMI = NULL;
604      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
605      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
606          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
607          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
608          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
609        CopyMI = mi;
610      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
611
612      unsigned killIndex = getMBBEndIdx(mbb) + 1;
613      LiveRange LR(defIndex, killIndex, ValNo);
614      interval.addRange(LR);
615      interval.addKill(ValNo, killIndex);
616      ValNo->setHasPHIKill(true);
617      DOUT << " +" << LR;
618    }
619  }
620
621  DOUT << '\n';
622}
623
624void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
625                                              MachineBasicBlock::iterator mi,
626                                              unsigned MIIdx,
627                                              MachineOperand& MO,
628                                              LiveInterval &interval,
629                                              MachineInstr *CopyMI) {
630  // A physical register cannot be live across basic block, so its
631  // lifetime must end somewhere in its defining basic block.
632  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
633
634  unsigned baseIndex = MIIdx;
635  unsigned start = getDefIndex(baseIndex);
636  // Earlyclobbers move back one.
637  if (MO.isEarlyClobber())
638    start = getUseIndex(MIIdx);
639  unsigned end = start;
640
641  // If it is not used after definition, it is considered dead at
642  // the instruction defining it. Hence its interval is:
643  // [defSlot(def), defSlot(def)+1)
644  if (MO.isDead()) {
645    DOUT << " dead";
646    end = start + 1;
647    goto exit;
648  }
649
650  // If it is not dead on definition, it must be killed by a
651  // subsequent instruction. Hence its interval is:
652  // [defSlot(def), useSlot(kill)+1)
653  baseIndex += InstrSlots::NUM;
654  while (++mi != MBB->end()) {
655    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
656           getInstructionFromIndex(baseIndex) == 0)
657      baseIndex += InstrSlots::NUM;
658    if (mi->killsRegister(interval.reg, tri_)) {
659      DOUT << " killed";
660      end = getUseIndex(baseIndex) + 1;
661      goto exit;
662    } else {
663      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
664      if (DefIdx != -1) {
665        if (mi->isRegTiedToUseOperand(DefIdx)) {
666          // Two-address instruction.
667          end = getDefIndex(baseIndex);
668          if (mi->getOperand(DefIdx).isEarlyClobber())
669            end = getUseIndex(baseIndex);
670        } else {
671          // Another instruction redefines the register before it is ever read.
672          // Then the register is essentially dead at the instruction that defines
673          // it. Hence its interval is:
674          // [defSlot(def), defSlot(def)+1)
675          DOUT << " dead";
676          end = start + 1;
677        }
678        goto exit;
679      }
680    }
681
682    baseIndex += InstrSlots::NUM;
683  }
684
685  // The only case we should have a dead physreg here without a killing or
686  // instruction where we know it's dead is if it is live-in to the function
687  // and never used. Another possible case is the implicit use of the
688  // physical register has been deleted by two-address pass.
689  end = start + 1;
690
691exit:
692  assert(start < end && "did not find end of interval?");
693
694  // Already exists? Extend old live interval.
695  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
696  bool Extend = OldLR != interval.end();
697  VNInfo *ValNo = Extend
698    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
699  if (MO.isEarlyClobber() && Extend)
700    ValNo->setHasRedefByEC(true);
701  LiveRange LR(start, end, ValNo);
702  interval.addRange(LR);
703  interval.addKill(LR.valno, end);
704  DOUT << " +" << LR << '\n';
705}
706
707void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
708                                      MachineBasicBlock::iterator MI,
709                                      unsigned MIIdx,
710                                      MachineOperand& MO,
711                                      unsigned MOIdx) {
712  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
713    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
714                             getOrCreateInterval(MO.getReg()));
715  else if (allocatableRegs_[MO.getReg()]) {
716    MachineInstr *CopyMI = NULL;
717    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
718    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
719        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
720        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
721        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
722      CopyMI = MI;
723    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
724                              getOrCreateInterval(MO.getReg()), CopyMI);
725    // Def of a register also defines its sub-registers.
726    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
727      // If MI also modifies the sub-register explicitly, avoid processing it
728      // more than once. Do not pass in TRI here so it checks for exact match.
729      if (!MI->modifiesRegister(*AS))
730        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
731                                  getOrCreateInterval(*AS), 0);
732  }
733}
734
735void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
736                                         unsigned MIIdx,
737                                         LiveInterval &interval, bool isAlias) {
738  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
739
740  // Look for kills, if it reaches a def before it's killed, then it shouldn't
741  // be considered a livein.
742  MachineBasicBlock::iterator mi = MBB->begin();
743  unsigned baseIndex = MIIdx;
744  unsigned start = baseIndex;
745  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
746         getInstructionFromIndex(baseIndex) == 0)
747    baseIndex += InstrSlots::NUM;
748  unsigned end = baseIndex;
749  bool SeenDefUse = false;
750
751  while (mi != MBB->end()) {
752    if (mi->killsRegister(interval.reg, tri_)) {
753      DOUT << " killed";
754      end = getUseIndex(baseIndex) + 1;
755      SeenDefUse = true;
756      break;
757    } else if (mi->modifiesRegister(interval.reg, tri_)) {
758      // Another instruction redefines the register before it is ever read.
759      // Then the register is essentially dead at the instruction that defines
760      // it. Hence its interval is:
761      // [defSlot(def), defSlot(def)+1)
762      DOUT << " dead";
763      end = getDefIndex(start) + 1;
764      SeenDefUse = true;
765      break;
766    }
767
768    baseIndex += InstrSlots::NUM;
769    ++mi;
770    if (mi != MBB->end()) {
771      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
772             getInstructionFromIndex(baseIndex) == 0)
773        baseIndex += InstrSlots::NUM;
774    }
775  }
776
777  // Live-in register might not be used at all.
778  if (!SeenDefUse) {
779    if (isAlias) {
780      DOUT << " dead";
781      end = getDefIndex(MIIdx) + 1;
782    } else {
783      DOUT << " live through";
784      end = baseIndex;
785    }
786  }
787
788  VNInfo *vni = interval.getNextValue(start, 0, false, VNInfoAllocator);
789  vni->setIsPHIDef(true);
790  LiveRange LR(start, end, vni);
791
792  interval.addRange(LR);
793  interval.addKill(LR.valno, end);
794  DOUT << " +" << LR << '\n';
795}
796
797/// computeIntervals - computes the live intervals for virtual
798/// registers. for some ordering of the machine instructions [1,N] a
799/// live interval is an interval [i, j) where 1 <= i <= j < N for
800/// which a variable is live
801void LiveIntervals::computeIntervals() {
802
803  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
804       << "********** Function: "
805       << ((Value*)mf_->getFunction())->getName() << '\n';
806
807  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
808       MBBI != E; ++MBBI) {
809    MachineBasicBlock *MBB = MBBI;
810    // Track the index of the current machine instr.
811    unsigned MIIndex = getMBBStartIdx(MBB);
812    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
813
814    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
815
816    // Create intervals for live-ins to this BB first.
817    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
818           LE = MBB->livein_end(); LI != LE; ++LI) {
819      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
820      // Multiple live-ins can alias the same register.
821      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
822        if (!hasInterval(*AS))
823          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
824                               true);
825    }
826
827    // Skip over empty initial indices.
828    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
829           getInstructionFromIndex(MIIndex) == 0)
830      MIIndex += InstrSlots::NUM;
831
832    for (; MI != miEnd; ++MI) {
833      DOUT << MIIndex << "\t" << *MI;
834
835      // Handle defs.
836      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
837        MachineOperand &MO = MI->getOperand(i);
838        // handle register defs - build intervals
839        if (MO.isReg() && MO.getReg() && MO.isDef()) {
840          handleRegisterDef(MBB, MI, MIIndex, MO, i);
841        }
842      }
843
844      // Skip over the empty slots after each instruction.
845      unsigned Slots = MI->getDesc().getNumDefs();
846      if (Slots == 0)
847        Slots = 1;
848      MIIndex += InstrSlots::NUM * Slots;
849
850      // Skip over empty indices.
851      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
852             getInstructionFromIndex(MIIndex) == 0)
853        MIIndex += InstrSlots::NUM;
854    }
855  }
856}
857
858bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
859                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
860  std::vector<IdxMBBPair>::const_iterator I =
861    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
862
863  bool ResVal = false;
864  while (I != Idx2MBBMap.end()) {
865    if (I->first >= End)
866      break;
867    MBBs.push_back(I->second);
868    ResVal = true;
869    ++I;
870  }
871  return ResVal;
872}
873
874bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
875                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
876  std::vector<IdxMBBPair>::const_iterator I =
877    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
878
879  bool ResVal = false;
880  while (I != Idx2MBBMap.end()) {
881    if (I->first > End)
882      break;
883    MachineBasicBlock *MBB = I->second;
884    if (getMBBEndIdx(MBB) > End)
885      break;
886    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
887           SE = MBB->succ_end(); SI != SE; ++SI)
888      MBBs.push_back(*SI);
889    ResVal = true;
890    ++I;
891  }
892  return ResVal;
893}
894
895LiveInterval* LiveIntervals::createInterval(unsigned reg) {
896  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
897  return new LiveInterval(reg, Weight);
898}
899
900/// dupInterval - Duplicate a live interval. The caller is responsible for
901/// managing the allocated memory.
902LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
903  LiveInterval *NewLI = createInterval(li->reg);
904  NewLI->Copy(*li, mri_, getVNInfoAllocator());
905  return NewLI;
906}
907
908/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
909/// copy field and returns the source register that defines it.
910unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
911  if (!VNI->copy)
912    return 0;
913
914  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
915    // If it's extracting out of a physical register, return the sub-register.
916    unsigned Reg = VNI->copy->getOperand(1).getReg();
917    if (TargetRegisterInfo::isPhysicalRegister(Reg))
918      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
919    return Reg;
920  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
921             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
922    return VNI->copy->getOperand(2).getReg();
923
924  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
925  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
926    return SrcReg;
927  assert(0 && "Unrecognized copy instruction!");
928  return 0;
929}
930
931//===----------------------------------------------------------------------===//
932// Register allocator hooks.
933//
934
935/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
936/// allow one) virtual register operand, then its uses are implicitly using
937/// the register. Returns the virtual register.
938unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
939                                            MachineInstr *MI) const {
940  unsigned RegOp = 0;
941  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
942    MachineOperand &MO = MI->getOperand(i);
943    if (!MO.isReg() || !MO.isUse())
944      continue;
945    unsigned Reg = MO.getReg();
946    if (Reg == 0 || Reg == li.reg)
947      continue;
948    // FIXME: For now, only remat MI with at most one register operand.
949    assert(!RegOp &&
950           "Can't rematerialize instruction with multiple register operand!");
951    RegOp = MO.getReg();
952#ifndef NDEBUG
953    break;
954#endif
955  }
956  return RegOp;
957}
958
959/// isValNoAvailableAt - Return true if the val# of the specified interval
960/// which reaches the given instruction also reaches the specified use index.
961bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
962                                       unsigned UseIdx) const {
963  unsigned Index = getInstructionIndex(MI);
964  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
965  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
966  return UI != li.end() && UI->valno == ValNo;
967}
968
969/// isReMaterializable - Returns true if the definition MI of the specified
970/// val# of the specified interval is re-materializable.
971bool LiveIntervals::isReMaterializable(const LiveInterval &li,
972                                       const VNInfo *ValNo, MachineInstr *MI,
973                                       SmallVectorImpl<LiveInterval*> &SpillIs,
974                                       bool &isLoad) {
975  if (DisableReMat)
976    return false;
977
978  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
979    return true;
980
981  int FrameIdx = 0;
982  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
983      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
984    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
985    // this but remember this is not safe to fold into a two-address
986    // instruction.
987    // This is a load from fixed stack slot. It can be rematerialized.
988    return true;
989
990  // If the target-specific rules don't identify an instruction as
991  // being trivially rematerializable, use some target-independent
992  // rules.
993  if (!MI->getDesc().isRematerializable() ||
994      !tii_->isTriviallyReMaterializable(MI)) {
995    if (!EnableAggressiveRemat)
996      return false;
997
998    // If the instruction accesses memory but the memoperands have been lost,
999    // we can't analyze it.
1000    const TargetInstrDesc &TID = MI->getDesc();
1001    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1002      return false;
1003
1004    // Avoid instructions obviously unsafe for remat.
1005    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1006      return false;
1007
1008    // If the instruction accesses memory and the memory could be non-constant,
1009    // assume the instruction is not rematerializable.
1010    for (std::list<MachineMemOperand>::const_iterator
1011           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1012      const MachineMemOperand &MMO = *I;
1013      if (MMO.isVolatile() || MMO.isStore())
1014        return false;
1015      const Value *V = MMO.getValue();
1016      if (!V)
1017        return false;
1018      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1019        if (!PSV->isConstant(mf_->getFrameInfo()))
1020          return false;
1021      } else if (!aa_->pointsToConstantMemory(V))
1022        return false;
1023    }
1024
1025    // If any of the registers accessed are non-constant, conservatively assume
1026    // the instruction is not rematerializable.
1027    unsigned ImpUse = 0;
1028    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1029      const MachineOperand &MO = MI->getOperand(i);
1030      if (MO.isReg()) {
1031        unsigned Reg = MO.getReg();
1032        if (Reg == 0)
1033          continue;
1034        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1035          return false;
1036
1037        // Only allow one def, and that in the first operand.
1038        if (MO.isDef() != (i == 0))
1039          return false;
1040
1041        // Only allow constant-valued registers.
1042        bool IsLiveIn = mri_->isLiveIn(Reg);
1043        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1044                                          E = mri_->def_end();
1045
1046        // For the def, it should be the only def of that register.
1047        if (MO.isDef() && (next(I) != E || IsLiveIn))
1048          return false;
1049
1050        if (MO.isUse()) {
1051          // Only allow one use other register use, as that's all the
1052          // remat mechanisms support currently.
1053          if (Reg != li.reg) {
1054            if (ImpUse == 0)
1055              ImpUse = Reg;
1056            else if (Reg != ImpUse)
1057              return false;
1058          }
1059          // For the use, there should be only one associated def.
1060          if (I != E && (next(I) != E || IsLiveIn))
1061            return false;
1062        }
1063      }
1064    }
1065  }
1066
1067  unsigned ImpUse = getReMatImplicitUse(li, MI);
1068  if (ImpUse) {
1069    const LiveInterval &ImpLi = getInterval(ImpUse);
1070    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1071           re = mri_->use_end(); ri != re; ++ri) {
1072      MachineInstr *UseMI = &*ri;
1073      unsigned UseIdx = getInstructionIndex(UseMI);
1074      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1075        continue;
1076      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1077        return false;
1078    }
1079
1080    // If a register operand of the re-materialized instruction is going to
1081    // be spilled next, then it's not legal to re-materialize this instruction.
1082    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1083      if (ImpUse == SpillIs[i]->reg)
1084        return false;
1085  }
1086  return true;
1087}
1088
1089/// isReMaterializable - Returns true if the definition MI of the specified
1090/// val# of the specified interval is re-materializable.
1091bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1092                                       const VNInfo *ValNo, MachineInstr *MI) {
1093  SmallVector<LiveInterval*, 4> Dummy1;
1094  bool Dummy2;
1095  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1096}
1097
1098/// isReMaterializable - Returns true if every definition of MI of every
1099/// val# of the specified interval is re-materializable.
1100bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1101                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1102                                       bool &isLoad) {
1103  isLoad = false;
1104  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1105       i != e; ++i) {
1106    const VNInfo *VNI = *i;
1107    if (VNI->isUnused())
1108      continue; // Dead val#.
1109    // Is the def for the val# rematerializable?
1110    if (!VNI->isDefAccurate())
1111      return false;
1112    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1113    bool DefIsLoad = false;
1114    if (!ReMatDefMI ||
1115        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1116      return false;
1117    isLoad |= DefIsLoad;
1118  }
1119  return true;
1120}
1121
1122/// FilterFoldedOps - Filter out two-address use operands. Return
1123/// true if it finds any issue with the operands that ought to prevent
1124/// folding.
1125static bool FilterFoldedOps(MachineInstr *MI,
1126                            SmallVector<unsigned, 2> &Ops,
1127                            unsigned &MRInfo,
1128                            SmallVector<unsigned, 2> &FoldOps) {
1129  MRInfo = 0;
1130  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1131    unsigned OpIdx = Ops[i];
1132    MachineOperand &MO = MI->getOperand(OpIdx);
1133    // FIXME: fold subreg use.
1134    if (MO.getSubReg())
1135      return true;
1136    if (MO.isDef())
1137      MRInfo |= (unsigned)VirtRegMap::isMod;
1138    else {
1139      // Filter out two-address use operand(s).
1140      if (MI->isRegTiedToDefOperand(OpIdx)) {
1141        MRInfo = VirtRegMap::isModRef;
1142        continue;
1143      }
1144      MRInfo |= (unsigned)VirtRegMap::isRef;
1145    }
1146    FoldOps.push_back(OpIdx);
1147  }
1148  return false;
1149}
1150
1151
1152/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1153/// slot / to reg or any rematerialized load into ith operand of specified
1154/// MI. If it is successul, MI is updated with the newly created MI and
1155/// returns true.
1156bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1157                                         VirtRegMap &vrm, MachineInstr *DefMI,
1158                                         unsigned InstrIdx,
1159                                         SmallVector<unsigned, 2> &Ops,
1160                                         bool isSS, int Slot, unsigned Reg) {
1161  // If it is an implicit def instruction, just delete it.
1162  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1163    RemoveMachineInstrFromMaps(MI);
1164    vrm.RemoveMachineInstrFromMaps(MI);
1165    MI->eraseFromParent();
1166    ++numFolds;
1167    return true;
1168  }
1169
1170  // Filter the list of operand indexes that are to be folded. Abort if
1171  // any operand will prevent folding.
1172  unsigned MRInfo = 0;
1173  SmallVector<unsigned, 2> FoldOps;
1174  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1175    return false;
1176
1177  // The only time it's safe to fold into a two address instruction is when
1178  // it's folding reload and spill from / into a spill stack slot.
1179  if (DefMI && (MRInfo & VirtRegMap::isMod))
1180    return false;
1181
1182  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1183                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1184  if (fmi) {
1185    // Remember this instruction uses the spill slot.
1186    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1187
1188    // Attempt to fold the memory reference into the instruction. If
1189    // we can do this, we don't need to insert spill code.
1190    MachineBasicBlock &MBB = *MI->getParent();
1191    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1192      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1193    vrm.transferSpillPts(MI, fmi);
1194    vrm.transferRestorePts(MI, fmi);
1195    vrm.transferEmergencySpills(MI, fmi);
1196    mi2iMap_.erase(MI);
1197    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1198    mi2iMap_[fmi] = InstrIdx;
1199    MI = MBB.insert(MBB.erase(MI), fmi);
1200    ++numFolds;
1201    return true;
1202  }
1203  return false;
1204}
1205
1206/// canFoldMemoryOperand - Returns true if the specified load / store
1207/// folding is possible.
1208bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1209                                         SmallVector<unsigned, 2> &Ops,
1210                                         bool ReMat) const {
1211  // Filter the list of operand indexes that are to be folded. Abort if
1212  // any operand will prevent folding.
1213  unsigned MRInfo = 0;
1214  SmallVector<unsigned, 2> FoldOps;
1215  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1216    return false;
1217
1218  // It's only legal to remat for a use, not a def.
1219  if (ReMat && (MRInfo & VirtRegMap::isMod))
1220    return false;
1221
1222  return tii_->canFoldMemoryOperand(MI, FoldOps);
1223}
1224
1225bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1226  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1227  for (LiveInterval::Ranges::const_iterator
1228         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1229    std::vector<IdxMBBPair>::const_iterator II =
1230      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1231    if (II == Idx2MBBMap.end())
1232      continue;
1233    if (I->end > II->first)  // crossing a MBB.
1234      return false;
1235    MBBs.insert(II->second);
1236    if (MBBs.size() > 1)
1237      return false;
1238  }
1239  return true;
1240}
1241
1242/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1243/// interval on to-be re-materialized operands of MI) with new register.
1244void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1245                                       MachineInstr *MI, unsigned NewVReg,
1246                                       VirtRegMap &vrm) {
1247  // There is an implicit use. That means one of the other operand is
1248  // being remat'ed and the remat'ed instruction has li.reg as an
1249  // use operand. Make sure we rewrite that as well.
1250  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1251    MachineOperand &MO = MI->getOperand(i);
1252    if (!MO.isReg())
1253      continue;
1254    unsigned Reg = MO.getReg();
1255    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1256      continue;
1257    if (!vrm.isReMaterialized(Reg))
1258      continue;
1259    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1260    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1261    if (UseMO)
1262      UseMO->setReg(NewVReg);
1263  }
1264}
1265
1266/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1267/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1268bool LiveIntervals::
1269rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1270                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1271                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1272                 unsigned Slot, int LdSlot,
1273                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1274                 VirtRegMap &vrm,
1275                 const TargetRegisterClass* rc,
1276                 SmallVector<int, 4> &ReMatIds,
1277                 const MachineLoopInfo *loopInfo,
1278                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1279                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1280                 std::vector<LiveInterval*> &NewLIs) {
1281  bool CanFold = false;
1282 RestartInstruction:
1283  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1284    MachineOperand& mop = MI->getOperand(i);
1285    if (!mop.isReg())
1286      continue;
1287    unsigned Reg = mop.getReg();
1288    unsigned RegI = Reg;
1289    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1290      continue;
1291    if (Reg != li.reg)
1292      continue;
1293
1294    bool TryFold = !DefIsReMat;
1295    bool FoldSS = true; // Default behavior unless it's a remat.
1296    int FoldSlot = Slot;
1297    if (DefIsReMat) {
1298      // If this is the rematerializable definition MI itself and
1299      // all of its uses are rematerialized, simply delete it.
1300      if (MI == ReMatOrigDefMI && CanDelete) {
1301        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1302        DOUT << MI << '\n';
1303        RemoveMachineInstrFromMaps(MI);
1304        vrm.RemoveMachineInstrFromMaps(MI);
1305        MI->eraseFromParent();
1306        break;
1307      }
1308
1309      // If def for this use can't be rematerialized, then try folding.
1310      // If def is rematerializable and it's a load, also try folding.
1311      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1312      if (isLoad) {
1313        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1314        FoldSS = isLoadSS;
1315        FoldSlot = LdSlot;
1316      }
1317    }
1318
1319    // Scan all of the operands of this instruction rewriting operands
1320    // to use NewVReg instead of li.reg as appropriate.  We do this for
1321    // two reasons:
1322    //
1323    //   1. If the instr reads the same spilled vreg multiple times, we
1324    //      want to reuse the NewVReg.
1325    //   2. If the instr is a two-addr instruction, we are required to
1326    //      keep the src/dst regs pinned.
1327    //
1328    // Keep track of whether we replace a use and/or def so that we can
1329    // create the spill interval with the appropriate range.
1330
1331    HasUse = mop.isUse();
1332    HasDef = mop.isDef();
1333    SmallVector<unsigned, 2> Ops;
1334    Ops.push_back(i);
1335    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1336      const MachineOperand &MOj = MI->getOperand(j);
1337      if (!MOj.isReg())
1338        continue;
1339      unsigned RegJ = MOj.getReg();
1340      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1341        continue;
1342      if (RegJ == RegI) {
1343        Ops.push_back(j);
1344        HasUse |= MOj.isUse();
1345        HasDef |= MOj.isDef();
1346      }
1347    }
1348
1349    if (HasUse && !li.liveAt(getUseIndex(index)))
1350      // Must be defined by an implicit def. It should not be spilled. Note,
1351      // this is for correctness reason. e.g.
1352      // 8   %reg1024<def> = IMPLICIT_DEF
1353      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1354      // The live range [12, 14) are not part of the r1024 live interval since
1355      // it's defined by an implicit def. It will not conflicts with live
1356      // interval of r1025. Now suppose both registers are spilled, you can
1357      // easily see a situation where both registers are reloaded before
1358      // the INSERT_SUBREG and both target registers that would overlap.
1359      HasUse = false;
1360
1361    // Create a new virtual register for the spill interval.
1362    // Create the new register now so we can map the fold instruction
1363    // to the new register so when it is unfolded we get the correct
1364    // answer.
1365    bool CreatedNewVReg = false;
1366    if (NewVReg == 0) {
1367      NewVReg = mri_->createVirtualRegister(rc);
1368      vrm.grow();
1369      CreatedNewVReg = true;
1370    }
1371
1372    if (!TryFold)
1373      CanFold = false;
1374    else {
1375      // Do not fold load / store here if we are splitting. We'll find an
1376      // optimal point to insert a load / store later.
1377      if (!TrySplit) {
1378        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1379                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1380          // Folding the load/store can completely change the instruction in
1381          // unpredictable ways, rescan it from the beginning.
1382
1383          if (FoldSS) {
1384            // We need to give the new vreg the same stack slot as the
1385            // spilled interval.
1386            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1387          }
1388
1389          HasUse = false;
1390          HasDef = false;
1391          CanFold = false;
1392          if (isNotInMIMap(MI))
1393            break;
1394          goto RestartInstruction;
1395        }
1396      } else {
1397        // We'll try to fold it later if it's profitable.
1398        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1399      }
1400    }
1401
1402    mop.setReg(NewVReg);
1403    if (mop.isImplicit())
1404      rewriteImplicitOps(li, MI, NewVReg, vrm);
1405
1406    // Reuse NewVReg for other reads.
1407    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1408      MachineOperand &mopj = MI->getOperand(Ops[j]);
1409      mopj.setReg(NewVReg);
1410      if (mopj.isImplicit())
1411        rewriteImplicitOps(li, MI, NewVReg, vrm);
1412    }
1413
1414    if (CreatedNewVReg) {
1415      if (DefIsReMat) {
1416        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1417        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1418          // Each valnum may have its own remat id.
1419          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1420        } else {
1421          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1422        }
1423        if (!CanDelete || (HasUse && HasDef)) {
1424          // If this is a two-addr instruction then its use operands are
1425          // rematerializable but its def is not. It should be assigned a
1426          // stack slot.
1427          vrm.assignVirt2StackSlot(NewVReg, Slot);
1428        }
1429      } else {
1430        vrm.assignVirt2StackSlot(NewVReg, Slot);
1431      }
1432    } else if (HasUse && HasDef &&
1433               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1434      // If this interval hasn't been assigned a stack slot (because earlier
1435      // def is a deleted remat def), do it now.
1436      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1437      vrm.assignVirt2StackSlot(NewVReg, Slot);
1438    }
1439
1440    // Re-matting an instruction with virtual register use. Add the
1441    // register as an implicit use on the use MI.
1442    if (DefIsReMat && ImpUse)
1443      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1444
1445    // Create a new register interval for this spill / remat.
1446    LiveInterval &nI = getOrCreateInterval(NewVReg);
1447    if (CreatedNewVReg) {
1448      NewLIs.push_back(&nI);
1449      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1450      if (TrySplit)
1451        vrm.setIsSplitFromReg(NewVReg, li.reg);
1452    }
1453
1454    if (HasUse) {
1455      if (CreatedNewVReg) {
1456        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1457                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1458        DOUT << " +" << LR;
1459        nI.addRange(LR);
1460      } else {
1461        // Extend the split live interval to this def / use.
1462        unsigned End = getUseIndex(index)+1;
1463        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1464                     nI.getValNumInfo(nI.getNumValNums()-1));
1465        DOUT << " +" << LR;
1466        nI.addRange(LR);
1467      }
1468    }
1469    if (HasDef) {
1470      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1471                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1472      DOUT << " +" << LR;
1473      nI.addRange(LR);
1474    }
1475
1476    DOUT << "\t\t\t\tAdded new interval: ";
1477    nI.print(DOUT, tri_);
1478    DOUT << '\n';
1479  }
1480  return CanFold;
1481}
1482bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1483                                   const VNInfo *VNI,
1484                                   MachineBasicBlock *MBB, unsigned Idx) const {
1485  unsigned End = getMBBEndIdx(MBB);
1486  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1487    unsigned KillIdx = VNI->kills[j];
1488    if (KillIdx > Idx && KillIdx < End)
1489      return true;
1490  }
1491  return false;
1492}
1493
1494/// RewriteInfo - Keep track of machine instrs that will be rewritten
1495/// during spilling.
1496namespace {
1497  struct RewriteInfo {
1498    unsigned Index;
1499    MachineInstr *MI;
1500    bool HasUse;
1501    bool HasDef;
1502    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1503      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1504  };
1505
1506  struct RewriteInfoCompare {
1507    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1508      return LHS.Index < RHS.Index;
1509    }
1510  };
1511}
1512
1513void LiveIntervals::
1514rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1515                    LiveInterval::Ranges::const_iterator &I,
1516                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1517                    unsigned Slot, int LdSlot,
1518                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1519                    VirtRegMap &vrm,
1520                    const TargetRegisterClass* rc,
1521                    SmallVector<int, 4> &ReMatIds,
1522                    const MachineLoopInfo *loopInfo,
1523                    BitVector &SpillMBBs,
1524                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1525                    BitVector &RestoreMBBs,
1526                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1527                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1528                    std::vector<LiveInterval*> &NewLIs) {
1529  bool AllCanFold = true;
1530  unsigned NewVReg = 0;
1531  unsigned start = getBaseIndex(I->start);
1532  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1533
1534  // First collect all the def / use in this live range that will be rewritten.
1535  // Make sure they are sorted according to instruction index.
1536  std::vector<RewriteInfo> RewriteMIs;
1537  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1538         re = mri_->reg_end(); ri != re; ) {
1539    MachineInstr *MI = &*ri;
1540    MachineOperand &O = ri.getOperand();
1541    ++ri;
1542    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1543    unsigned index = getInstructionIndex(MI);
1544    if (index < start || index >= end)
1545      continue;
1546    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1547      // Must be defined by an implicit def. It should not be spilled. Note,
1548      // this is for correctness reason. e.g.
1549      // 8   %reg1024<def> = IMPLICIT_DEF
1550      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1551      // The live range [12, 14) are not part of the r1024 live interval since
1552      // it's defined by an implicit def. It will not conflicts with live
1553      // interval of r1025. Now suppose both registers are spilled, you can
1554      // easily see a situation where both registers are reloaded before
1555      // the INSERT_SUBREG and both target registers that would overlap.
1556      continue;
1557    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1558  }
1559  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1560
1561  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1562  // Now rewrite the defs and uses.
1563  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1564    RewriteInfo &rwi = RewriteMIs[i];
1565    ++i;
1566    unsigned index = rwi.Index;
1567    bool MIHasUse = rwi.HasUse;
1568    bool MIHasDef = rwi.HasDef;
1569    MachineInstr *MI = rwi.MI;
1570    // If MI def and/or use the same register multiple times, then there
1571    // are multiple entries.
1572    unsigned NumUses = MIHasUse;
1573    while (i != e && RewriteMIs[i].MI == MI) {
1574      assert(RewriteMIs[i].Index == index);
1575      bool isUse = RewriteMIs[i].HasUse;
1576      if (isUse) ++NumUses;
1577      MIHasUse |= isUse;
1578      MIHasDef |= RewriteMIs[i].HasDef;
1579      ++i;
1580    }
1581    MachineBasicBlock *MBB = MI->getParent();
1582
1583    if (ImpUse && MI != ReMatDefMI) {
1584      // Re-matting an instruction with virtual register use. Update the
1585      // register interval's spill weight to HUGE_VALF to prevent it from
1586      // being spilled.
1587      LiveInterval &ImpLi = getInterval(ImpUse);
1588      ImpLi.weight = HUGE_VALF;
1589    }
1590
1591    unsigned MBBId = MBB->getNumber();
1592    unsigned ThisVReg = 0;
1593    if (TrySplit) {
1594      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1595      if (NVI != MBBVRegsMap.end()) {
1596        ThisVReg = NVI->second;
1597        // One common case:
1598        // x = use
1599        // ...
1600        // ...
1601        // def = ...
1602        //     = use
1603        // It's better to start a new interval to avoid artifically
1604        // extend the new interval.
1605        if (MIHasDef && !MIHasUse) {
1606          MBBVRegsMap.erase(MBB->getNumber());
1607          ThisVReg = 0;
1608        }
1609      }
1610    }
1611
1612    bool IsNew = ThisVReg == 0;
1613    if (IsNew) {
1614      // This ends the previous live interval. If all of its def / use
1615      // can be folded, give it a low spill weight.
1616      if (NewVReg && TrySplit && AllCanFold) {
1617        LiveInterval &nI = getOrCreateInterval(NewVReg);
1618        nI.weight /= 10.0F;
1619      }
1620      AllCanFold = true;
1621    }
1622    NewVReg = ThisVReg;
1623
1624    bool HasDef = false;
1625    bool HasUse = false;
1626    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1627                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1628                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1629                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1630                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1631    if (!HasDef && !HasUse)
1632      continue;
1633
1634    AllCanFold &= CanFold;
1635
1636    // Update weight of spill interval.
1637    LiveInterval &nI = getOrCreateInterval(NewVReg);
1638    if (!TrySplit) {
1639      // The spill weight is now infinity as it cannot be spilled again.
1640      nI.weight = HUGE_VALF;
1641      continue;
1642    }
1643
1644    // Keep track of the last def and first use in each MBB.
1645    if (HasDef) {
1646      if (MI != ReMatOrigDefMI || !CanDelete) {
1647        bool HasKill = false;
1648        if (!HasUse)
1649          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1650        else {
1651          // If this is a two-address code, then this index starts a new VNInfo.
1652          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1653          if (VNI)
1654            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1655        }
1656        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1657          SpillIdxes.find(MBBId);
1658        if (!HasKill) {
1659          if (SII == SpillIdxes.end()) {
1660            std::vector<SRInfo> S;
1661            S.push_back(SRInfo(index, NewVReg, true));
1662            SpillIdxes.insert(std::make_pair(MBBId, S));
1663          } else if (SII->second.back().vreg != NewVReg) {
1664            SII->second.push_back(SRInfo(index, NewVReg, true));
1665          } else if ((int)index > SII->second.back().index) {
1666            // If there is an earlier def and this is a two-address
1667            // instruction, then it's not possible to fold the store (which
1668            // would also fold the load).
1669            SRInfo &Info = SII->second.back();
1670            Info.index = index;
1671            Info.canFold = !HasUse;
1672          }
1673          SpillMBBs.set(MBBId);
1674        } else if (SII != SpillIdxes.end() &&
1675                   SII->second.back().vreg == NewVReg &&
1676                   (int)index > SII->second.back().index) {
1677          // There is an earlier def that's not killed (must be two-address).
1678          // The spill is no longer needed.
1679          SII->second.pop_back();
1680          if (SII->second.empty()) {
1681            SpillIdxes.erase(MBBId);
1682            SpillMBBs.reset(MBBId);
1683          }
1684        }
1685      }
1686    }
1687
1688    if (HasUse) {
1689      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1690        SpillIdxes.find(MBBId);
1691      if (SII != SpillIdxes.end() &&
1692          SII->second.back().vreg == NewVReg &&
1693          (int)index > SII->second.back().index)
1694        // Use(s) following the last def, it's not safe to fold the spill.
1695        SII->second.back().canFold = false;
1696      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1697        RestoreIdxes.find(MBBId);
1698      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1699        // If we are splitting live intervals, only fold if it's the first
1700        // use and there isn't another use later in the MBB.
1701        RII->second.back().canFold = false;
1702      else if (IsNew) {
1703        // Only need a reload if there isn't an earlier def / use.
1704        if (RII == RestoreIdxes.end()) {
1705          std::vector<SRInfo> Infos;
1706          Infos.push_back(SRInfo(index, NewVReg, true));
1707          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1708        } else {
1709          RII->second.push_back(SRInfo(index, NewVReg, true));
1710        }
1711        RestoreMBBs.set(MBBId);
1712      }
1713    }
1714
1715    // Update spill weight.
1716    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1717    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1718  }
1719
1720  if (NewVReg && TrySplit && AllCanFold) {
1721    // If all of its def / use can be folded, give it a low spill weight.
1722    LiveInterval &nI = getOrCreateInterval(NewVReg);
1723    nI.weight /= 10.0F;
1724  }
1725}
1726
1727bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1728                        BitVector &RestoreMBBs,
1729                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1730  if (!RestoreMBBs[Id])
1731    return false;
1732  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1733  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1734    if (Restores[i].index == index &&
1735        Restores[i].vreg == vr &&
1736        Restores[i].canFold)
1737      return true;
1738  return false;
1739}
1740
1741void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1742                        BitVector &RestoreMBBs,
1743                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1744  if (!RestoreMBBs[Id])
1745    return;
1746  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1747  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1748    if (Restores[i].index == index && Restores[i].vreg)
1749      Restores[i].index = -1;
1750}
1751
1752/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1753/// spilled and create empty intervals for their uses.
1754void
1755LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1756                                    const TargetRegisterClass* rc,
1757                                    std::vector<LiveInterval*> &NewLIs) {
1758  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1759         re = mri_->reg_end(); ri != re; ) {
1760    MachineOperand &O = ri.getOperand();
1761    MachineInstr *MI = &*ri;
1762    ++ri;
1763    if (O.isDef()) {
1764      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1765             "Register def was not rewritten?");
1766      RemoveMachineInstrFromMaps(MI);
1767      vrm.RemoveMachineInstrFromMaps(MI);
1768      MI->eraseFromParent();
1769    } else {
1770      // This must be an use of an implicit_def so it's not part of the live
1771      // interval. Create a new empty live interval for it.
1772      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1773      unsigned NewVReg = mri_->createVirtualRegister(rc);
1774      vrm.grow();
1775      vrm.setIsImplicitlyDefined(NewVReg);
1776      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1777      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1778        MachineOperand &MO = MI->getOperand(i);
1779        if (MO.isReg() && MO.getReg() == li.reg)
1780          MO.setReg(NewVReg);
1781      }
1782    }
1783  }
1784}
1785
1786std::vector<LiveInterval*> LiveIntervals::
1787addIntervalsForSpillsFast(const LiveInterval &li,
1788                          const MachineLoopInfo *loopInfo,
1789                          VirtRegMap &vrm) {
1790  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1791
1792  std::vector<LiveInterval*> added;
1793
1794  assert(li.weight != HUGE_VALF &&
1795         "attempt to spill already spilled interval!");
1796
1797  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1798  DEBUG(li.dump());
1799  DOUT << '\n';
1800
1801  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1802
1803  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1804  while (RI != mri_->reg_end()) {
1805    MachineInstr* MI = &*RI;
1806
1807    SmallVector<unsigned, 2> Indices;
1808    bool HasUse = false;
1809    bool HasDef = false;
1810
1811    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1812      MachineOperand& mop = MI->getOperand(i);
1813      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1814
1815      HasUse |= MI->getOperand(i).isUse();
1816      HasDef |= MI->getOperand(i).isDef();
1817
1818      Indices.push_back(i);
1819    }
1820
1821    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1822                              Indices, true, slot, li.reg)) {
1823      unsigned NewVReg = mri_->createVirtualRegister(rc);
1824      vrm.grow();
1825      vrm.assignVirt2StackSlot(NewVReg, slot);
1826
1827      // create a new register for this spill
1828      LiveInterval &nI = getOrCreateInterval(NewVReg);
1829
1830      // the spill weight is now infinity as it
1831      // cannot be spilled again
1832      nI.weight = HUGE_VALF;
1833
1834      // Rewrite register operands to use the new vreg.
1835      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1836           E = Indices.end(); I != E; ++I) {
1837        MI->getOperand(*I).setReg(NewVReg);
1838
1839        if (MI->getOperand(*I).isUse())
1840          MI->getOperand(*I).setIsKill(true);
1841      }
1842
1843      // Fill in  the new live interval.
1844      unsigned index = getInstructionIndex(MI);
1845      if (HasUse) {
1846        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1847                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1848        DOUT << " +" << LR;
1849        nI.addRange(LR);
1850        vrm.addRestorePoint(NewVReg, MI);
1851      }
1852      if (HasDef) {
1853        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1854                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1855        DOUT << " +" << LR;
1856        nI.addRange(LR);
1857        vrm.addSpillPoint(NewVReg, true, MI);
1858      }
1859
1860      added.push_back(&nI);
1861
1862      DOUT << "\t\t\t\tadded new interval: ";
1863      DEBUG(nI.dump());
1864      DOUT << '\n';
1865    }
1866
1867
1868    RI = mri_->reg_begin(li.reg);
1869  }
1870
1871  return added;
1872}
1873
1874std::vector<LiveInterval*> LiveIntervals::
1875addIntervalsForSpills(const LiveInterval &li,
1876                      SmallVectorImpl<LiveInterval*> &SpillIs,
1877                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1878
1879  if (EnableFastSpilling)
1880    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1881
1882  assert(li.weight != HUGE_VALF &&
1883         "attempt to spill already spilled interval!");
1884
1885  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1886  li.print(DOUT, tri_);
1887  DOUT << '\n';
1888
1889  // Each bit specify whether a spill is required in the MBB.
1890  BitVector SpillMBBs(mf_->getNumBlockIDs());
1891  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1892  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1893  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1894  DenseMap<unsigned,unsigned> MBBVRegsMap;
1895  std::vector<LiveInterval*> NewLIs;
1896  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1897
1898  unsigned NumValNums = li.getNumValNums();
1899  SmallVector<MachineInstr*, 4> ReMatDefs;
1900  ReMatDefs.resize(NumValNums, NULL);
1901  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1902  ReMatOrigDefs.resize(NumValNums, NULL);
1903  SmallVector<int, 4> ReMatIds;
1904  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1905  BitVector ReMatDelete(NumValNums);
1906  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1907
1908  // Spilling a split live interval. It cannot be split any further. Also,
1909  // it's also guaranteed to be a single val# / range interval.
1910  if (vrm.getPreSplitReg(li.reg)) {
1911    vrm.setIsSplitFromReg(li.reg, 0);
1912    // Unset the split kill marker on the last use.
1913    unsigned KillIdx = vrm.getKillPoint(li.reg);
1914    if (KillIdx) {
1915      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1916      assert(KillMI && "Last use disappeared?");
1917      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1918      assert(KillOp != -1 && "Last use disappeared?");
1919      KillMI->getOperand(KillOp).setIsKill(false);
1920    }
1921    vrm.removeKillPoint(li.reg);
1922    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1923    Slot = vrm.getStackSlot(li.reg);
1924    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1925    MachineInstr *ReMatDefMI = DefIsReMat ?
1926      vrm.getReMaterializedMI(li.reg) : NULL;
1927    int LdSlot = 0;
1928    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1929    bool isLoad = isLoadSS ||
1930      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1931    bool IsFirstRange = true;
1932    for (LiveInterval::Ranges::const_iterator
1933           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1934      // If this is a split live interval with multiple ranges, it means there
1935      // are two-address instructions that re-defined the value. Only the
1936      // first def can be rematerialized!
1937      if (IsFirstRange) {
1938        // Note ReMatOrigDefMI has already been deleted.
1939        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1940                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1941                             false, vrm, rc, ReMatIds, loopInfo,
1942                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1943                             MBBVRegsMap, NewLIs);
1944      } else {
1945        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1946                             Slot, 0, false, false, false,
1947                             false, vrm, rc, ReMatIds, loopInfo,
1948                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1949                             MBBVRegsMap, NewLIs);
1950      }
1951      IsFirstRange = false;
1952    }
1953
1954    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1955    return NewLIs;
1956  }
1957
1958  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1959  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1960    TrySplit = false;
1961  if (TrySplit)
1962    ++numSplits;
1963  bool NeedStackSlot = false;
1964  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1965       i != e; ++i) {
1966    const VNInfo *VNI = *i;
1967    unsigned VN = VNI->id;
1968    if (VNI->isUnused())
1969      continue; // Dead val#.
1970    // Is the def for the val# rematerializable?
1971    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1972      ? getInstructionFromIndex(VNI->def) : 0;
1973    bool dummy;
1974    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1975      // Remember how to remat the def of this val#.
1976      ReMatOrigDefs[VN] = ReMatDefMI;
1977      // Original def may be modified so we have to make a copy here.
1978      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1979      ClonedMIs.push_back(Clone);
1980      ReMatDefs[VN] = Clone;
1981
1982      bool CanDelete = true;
1983      if (VNI->hasPHIKill()) {
1984        // A kill is a phi node, not all of its uses can be rematerialized.
1985        // It must not be deleted.
1986        CanDelete = false;
1987        // Need a stack slot if there is any live range where uses cannot be
1988        // rematerialized.
1989        NeedStackSlot = true;
1990      }
1991      if (CanDelete)
1992        ReMatDelete.set(VN);
1993    } else {
1994      // Need a stack slot if there is any live range where uses cannot be
1995      // rematerialized.
1996      NeedStackSlot = true;
1997    }
1998  }
1999
2000  // One stack slot per live interval.
2001  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2002    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2003      Slot = vrm.assignVirt2StackSlot(li.reg);
2004
2005    // This case only occurs when the prealloc splitter has already assigned
2006    // a stack slot to this vreg.
2007    else
2008      Slot = vrm.getStackSlot(li.reg);
2009  }
2010
2011  // Create new intervals and rewrite defs and uses.
2012  for (LiveInterval::Ranges::const_iterator
2013         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2014    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2015    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2016    bool DefIsReMat = ReMatDefMI != NULL;
2017    bool CanDelete = ReMatDelete[I->valno->id];
2018    int LdSlot = 0;
2019    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2020    bool isLoad = isLoadSS ||
2021      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2022    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2023                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2024                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2025                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2026                               MBBVRegsMap, NewLIs);
2027  }
2028
2029  // Insert spills / restores if we are splitting.
2030  if (!TrySplit) {
2031    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2032    return NewLIs;
2033  }
2034
2035  SmallPtrSet<LiveInterval*, 4> AddedKill;
2036  SmallVector<unsigned, 2> Ops;
2037  if (NeedStackSlot) {
2038    int Id = SpillMBBs.find_first();
2039    while (Id != -1) {
2040      std::vector<SRInfo> &spills = SpillIdxes[Id];
2041      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2042        int index = spills[i].index;
2043        unsigned VReg = spills[i].vreg;
2044        LiveInterval &nI = getOrCreateInterval(VReg);
2045        bool isReMat = vrm.isReMaterialized(VReg);
2046        MachineInstr *MI = getInstructionFromIndex(index);
2047        bool CanFold = false;
2048        bool FoundUse = false;
2049        Ops.clear();
2050        if (spills[i].canFold) {
2051          CanFold = true;
2052          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2053            MachineOperand &MO = MI->getOperand(j);
2054            if (!MO.isReg() || MO.getReg() != VReg)
2055              continue;
2056
2057            Ops.push_back(j);
2058            if (MO.isDef())
2059              continue;
2060            if (isReMat ||
2061                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2062                                                RestoreMBBs, RestoreIdxes))) {
2063              // MI has two-address uses of the same register. If the use
2064              // isn't the first and only use in the BB, then we can't fold
2065              // it. FIXME: Move this to rewriteInstructionsForSpills.
2066              CanFold = false;
2067              break;
2068            }
2069            FoundUse = true;
2070          }
2071        }
2072        // Fold the store into the def if possible.
2073        bool Folded = false;
2074        if (CanFold && !Ops.empty()) {
2075          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2076            Folded = true;
2077            if (FoundUse) {
2078              // Also folded uses, do not issue a load.
2079              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2080              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2081            }
2082            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2083          }
2084        }
2085
2086        // Otherwise tell the spiller to issue a spill.
2087        if (!Folded) {
2088          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2089          bool isKill = LR->end == getStoreIndex(index);
2090          if (!MI->registerDefIsDead(nI.reg))
2091            // No need to spill a dead def.
2092            vrm.addSpillPoint(VReg, isKill, MI);
2093          if (isKill)
2094            AddedKill.insert(&nI);
2095        }
2096      }
2097      Id = SpillMBBs.find_next(Id);
2098    }
2099  }
2100
2101  int Id = RestoreMBBs.find_first();
2102  while (Id != -1) {
2103    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2104    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2105      int index = restores[i].index;
2106      if (index == -1)
2107        continue;
2108      unsigned VReg = restores[i].vreg;
2109      LiveInterval &nI = getOrCreateInterval(VReg);
2110      bool isReMat = vrm.isReMaterialized(VReg);
2111      MachineInstr *MI = getInstructionFromIndex(index);
2112      bool CanFold = false;
2113      Ops.clear();
2114      if (restores[i].canFold) {
2115        CanFold = true;
2116        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2117          MachineOperand &MO = MI->getOperand(j);
2118          if (!MO.isReg() || MO.getReg() != VReg)
2119            continue;
2120
2121          if (MO.isDef()) {
2122            // If this restore were to be folded, it would have been folded
2123            // already.
2124            CanFold = false;
2125            break;
2126          }
2127          Ops.push_back(j);
2128        }
2129      }
2130
2131      // Fold the load into the use if possible.
2132      bool Folded = false;
2133      if (CanFold && !Ops.empty()) {
2134        if (!isReMat)
2135          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2136        else {
2137          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2138          int LdSlot = 0;
2139          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2140          // If the rematerializable def is a load, also try to fold it.
2141          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2142            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2143                                          Ops, isLoadSS, LdSlot, VReg);
2144          if (!Folded) {
2145            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2146            if (ImpUse) {
2147              // Re-matting an instruction with virtual register use. Add the
2148              // register as an implicit use on the use MI and update the register
2149              // interval's spill weight to HUGE_VALF to prevent it from being
2150              // spilled.
2151              LiveInterval &ImpLi = getInterval(ImpUse);
2152              ImpLi.weight = HUGE_VALF;
2153              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2154            }
2155          }
2156        }
2157      }
2158      // If folding is not possible / failed, then tell the spiller to issue a
2159      // load / rematerialization for us.
2160      if (Folded)
2161        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2162      else
2163        vrm.addRestorePoint(VReg, MI);
2164    }
2165    Id = RestoreMBBs.find_next(Id);
2166  }
2167
2168  // Finalize intervals: add kills, finalize spill weights, and filter out
2169  // dead intervals.
2170  std::vector<LiveInterval*> RetNewLIs;
2171  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2172    LiveInterval *LI = NewLIs[i];
2173    if (!LI->empty()) {
2174      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2175      if (!AddedKill.count(LI)) {
2176        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2177        unsigned LastUseIdx = getBaseIndex(LR->end);
2178        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2179        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2180        assert(UseIdx != -1);
2181        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2182          LastUse->getOperand(UseIdx).setIsKill();
2183          vrm.addKillPoint(LI->reg, LastUseIdx);
2184        }
2185      }
2186      RetNewLIs.push_back(LI);
2187    }
2188  }
2189
2190  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2191  return RetNewLIs;
2192}
2193
2194/// hasAllocatableSuperReg - Return true if the specified physical register has
2195/// any super register that's allocatable.
2196bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2197  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2198    if (allocatableRegs_[*AS] && hasInterval(*AS))
2199      return true;
2200  return false;
2201}
2202
2203/// getRepresentativeReg - Find the largest super register of the specified
2204/// physical register.
2205unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2206  // Find the largest super-register that is allocatable.
2207  unsigned BestReg = Reg;
2208  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2209    unsigned SuperReg = *AS;
2210    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2211      BestReg = SuperReg;
2212      break;
2213    }
2214  }
2215  return BestReg;
2216}
2217
2218/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2219/// specified interval that conflicts with the specified physical register.
2220unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2221                                                   unsigned PhysReg) const {
2222  unsigned NumConflicts = 0;
2223  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2224  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2225         E = mri_->reg_end(); I != E; ++I) {
2226    MachineOperand &O = I.getOperand();
2227    MachineInstr *MI = O.getParent();
2228    unsigned Index = getInstructionIndex(MI);
2229    if (pli.liveAt(Index))
2230      ++NumConflicts;
2231  }
2232  return NumConflicts;
2233}
2234
2235/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2236/// around all defs and uses of the specified interval. Return true if it
2237/// was able to cut its interval.
2238bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2239                                            unsigned PhysReg, VirtRegMap &vrm) {
2240  unsigned SpillReg = getRepresentativeReg(PhysReg);
2241
2242  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2243    // If there are registers which alias PhysReg, but which are not a
2244    // sub-register of the chosen representative super register. Assert
2245    // since we can't handle it yet.
2246    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2247           tri_->isSuperRegister(*AS, SpillReg));
2248
2249  bool Cut = false;
2250  LiveInterval &pli = getInterval(SpillReg);
2251  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2252  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2253         E = mri_->reg_end(); I != E; ++I) {
2254    MachineOperand &O = I.getOperand();
2255    MachineInstr *MI = O.getParent();
2256    if (SeenMIs.count(MI))
2257      continue;
2258    SeenMIs.insert(MI);
2259    unsigned Index = getInstructionIndex(MI);
2260    if (pli.liveAt(Index)) {
2261      vrm.addEmergencySpill(SpillReg, MI);
2262      unsigned StartIdx = getLoadIndex(Index);
2263      unsigned EndIdx = getStoreIndex(Index)+1;
2264      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2265        pli.removeRange(StartIdx, EndIdx);
2266        Cut = true;
2267      } else {
2268        cerr << "Ran out of registers during register allocation!\n";
2269        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2270          cerr << "Please check your inline asm statement for invalid "
2271               << "constraints:\n";
2272          MI->print(cerr.stream(), tm_);
2273        }
2274        exit(1);
2275      }
2276      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2277        if (!hasInterval(*AS))
2278          continue;
2279        LiveInterval &spli = getInterval(*AS);
2280        if (spli.liveAt(Index))
2281          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2282      }
2283    }
2284  }
2285  return Cut;
2286}
2287
2288LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2289                                                   MachineInstr* startInst) {
2290  LiveInterval& Interval = getOrCreateInterval(reg);
2291  VNInfo* VN = Interval.getNextValue(
2292            getInstructionIndex(startInst) + InstrSlots::DEF,
2293            startInst, true, getVNInfoAllocator());
2294  VN->setHasPHIKill(true);
2295  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2296  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2297               getMBBEndIdx(startInst->getParent()) + 1, VN);
2298  Interval.addRange(LR);
2299
2300  return LR;
2301}
2302