LiveIntervalAnalysis.cpp revision 378445303b10b092a898a75131141a8259cff50b
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/DepthFirstIterator.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/ADT/STLExtras.h"
43#include <algorithm>
44#include <limits>
45#include <cmath>
46using namespace llvm;
47
48// Hidden options for help debugging.
49static cl::opt<bool> DisableReMat("disable-rematerialization",
50                                  cl::init(false), cl::Hidden);
51
52static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53                               cl::init(true), cl::Hidden);
54static cl::opt<int> SplitLimit("split-limit",
55                               cl::init(-1), cl::Hidden);
56
57static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
58
59static cl::opt<bool> EnableFastSpilling("fast-spill",
60                                        cl::init(false), cl::Hidden);
61
62STATISTIC(numIntervals, "Number of original intervals");
63STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits   , "Number of intervals split");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.addRequired<AliasAnalysis>();
71  AU.addPreserved<AliasAnalysis>();
72  AU.addPreserved<LiveVariables>();
73  AU.addRequired<LiveVariables>();
74  AU.addPreservedID(MachineLoopInfoID);
75  AU.addPreservedID(MachineDominatorsID);
76
77  if (!StrongPHIElim) {
78    AU.addPreservedID(PHIEliminationID);
79    AU.addRequiredID(PHIEliminationID);
80  }
81
82  AU.addRequiredID(TwoAddressInstructionPassID);
83  MachineFunctionPass::getAnalysisUsage(AU);
84}
85
86void LiveIntervals::releaseMemory() {
87  // Free the live intervals themselves.
88  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89       E = r2iMap_.end(); I != E; ++I)
90    delete I->second;
91
92  MBB2IdxMap.clear();
93  Idx2MBBMap.clear();
94  mi2iMap_.clear();
95  i2miMap_.clear();
96  r2iMap_.clear();
97  terminatorGaps.clear();
98
99  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100  VNInfoAllocator.Reset();
101  while (!ClonedMIs.empty()) {
102    MachineInstr *MI = ClonedMIs.back();
103    ClonedMIs.pop_back();
104    mf_->DeleteMachineInstr(MI);
105  }
106}
107
108/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109/// there is one implicit_def for each use. Add isUndef marker to
110/// implicit_def defs and their uses.
111void LiveIntervals::processImplicitDefs() {
112  SmallSet<unsigned, 8> ImpDefRegs;
113  SmallVector<MachineInstr*, 8> ImpDefMIs;
114  MachineBasicBlock *Entry = mf_->begin();
115  SmallPtrSet<MachineBasicBlock*,16> Visited;
116  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
118       DFI != E; ++DFI) {
119    MachineBasicBlock *MBB = *DFI;
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ) {
122      MachineInstr *MI = &*I;
123      ++I;
124      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125        unsigned Reg = MI->getOperand(0).getReg();
126        MI->getOperand(0).setIsUndef();
127        ImpDefRegs.insert(Reg);
128        ImpDefMIs.push_back(MI);
129        continue;
130      }
131
132      bool ChangedToImpDef = false;
133      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134        MachineOperand& MO = MI->getOperand(i);
135        if (!MO.isReg() || !MO.isUse())
136          continue;
137        unsigned Reg = MO.getReg();
138        if (!Reg)
139          continue;
140        if (!ImpDefRegs.count(Reg))
141          continue;
142        // Use is a copy, just turn it into an implicit_def.
143        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
144        if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
145            Reg == SrcReg) {
146          bool isKill = MO.isKill();
147          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
148          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
149            MI->RemoveOperand(j);
150          if (isKill)
151            ImpDefRegs.erase(Reg);
152          ChangedToImpDef = true;
153          break;
154        }
155
156        MO.setIsUndef();
157        if (MO.isKill() || MI->isRegTiedToDefOperand(i))
158          ImpDefRegs.erase(Reg);
159      }
160
161      if (ChangedToImpDef) {
162        // Backtrack to process this new implicit_def.
163        --I;
164      } else {
165        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
166          MachineOperand& MO = MI->getOperand(i);
167          if (!MO.isReg() || !MO.isDef())
168            continue;
169          ImpDefRegs.erase(MO.getReg());
170        }
171      }
172    }
173
174    // Any outstanding liveout implicit_def's?
175    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
176      MachineInstr *MI = ImpDefMIs[i];
177      unsigned Reg = MI->getOperand(0).getReg();
178      if (TargetRegisterInfo::isPhysicalRegister(Reg))
179        // Physical registers are not liveout (yet).
180        continue;
181      if (!ImpDefRegs.count(Reg))
182        continue;
183
184      // If there are multiple defs of the same register and at least one
185      // is not an implicit_def, do not insert implicit_def's before the
186      // uses.
187      bool Skip = false;
188      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
189             DE = mri_->def_end(); DI != DE; ++DI) {
190        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
191          Skip = true;
192          break;
193        }
194      }
195      if (Skip)
196        continue;
197
198      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
199             UE = mri_->use_end(); UI != UE; ) {
200        MachineOperand &RMO = UI.getOperand();
201        MachineInstr *RMI = &*UI;
202        ++UI;
203        MachineBasicBlock *RMBB = RMI->getParent();
204        if (RMBB == MBB)
205          continue;
206        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
207        unsigned NewVReg = mri_->createVirtualRegister(RC);
208        MachineInstrBuilder MIB =
209          BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
210                  tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
211        (*MIB).getOperand(0).setIsUndef();
212        RMO.setReg(NewVReg);
213        RMO.setIsUndef();
214        RMO.setIsKill();
215      }
216    }
217    ImpDefRegs.clear();
218    ImpDefMIs.clear();
219  }
220}
221
222void LiveIntervals::computeNumbering() {
223  Index2MiMap OldI2MI = i2miMap_;
224  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
225
226  Idx2MBBMap.clear();
227  MBB2IdxMap.clear();
228  mi2iMap_.clear();
229  i2miMap_.clear();
230  terminatorGaps.clear();
231
232  FunctionSize = 0;
233
234  // Number MachineInstrs and MachineBasicBlocks.
235  // Initialize MBB indexes to a sentinal.
236  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
237
238  unsigned MIIndex = 0;
239  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
240       MBB != E; ++MBB) {
241    unsigned StartIdx = MIIndex;
242
243    // Insert an empty slot at the beginning of each block.
244    MIIndex += InstrSlots::NUM;
245    i2miMap_.push_back(0);
246
247    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
248         I != E; ++I) {
249
250      if (I == MBB->getFirstTerminator()) {
251        // Leave a gap for before terminators, this is where we will point
252        // PHI kills.
253        bool inserted =
254          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
255        assert(inserted &&
256               "Multiple 'first' terminators encountered during numbering.");
257        inserted = inserted; // Avoid compiler warning if assertions turned off.
258        i2miMap_.push_back(0);
259
260        MIIndex += InstrSlots::NUM;
261      }
262
263      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
264      assert(inserted && "multiple MachineInstr -> index mappings");
265      inserted = true;
266      i2miMap_.push_back(I);
267      MIIndex += InstrSlots::NUM;
268      FunctionSize++;
269
270      // Insert max(1, numdefs) empty slots after every instruction.
271      unsigned Slots = I->getDesc().getNumDefs();
272      if (Slots == 0)
273        Slots = 1;
274      MIIndex += InstrSlots::NUM * Slots;
275      while (Slots--)
276        i2miMap_.push_back(0);
277    }
278
279    if (MBB->getFirstTerminator() == MBB->end()) {
280      // Leave a gap for before terminators, this is where we will point
281      // PHI kills.
282      bool inserted =
283        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
284      assert(inserted &&
285             "Multiple 'first' terminators encountered during numbering.");
286      inserted = inserted; // Avoid compiler warning if assertions turned off.
287      i2miMap_.push_back(0);
288
289      MIIndex += InstrSlots::NUM;
290    }
291
292    // Set the MBB2IdxMap entry for this MBB.
293    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
294    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
295  }
296
297  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
298
299  if (!OldI2MI.empty())
300    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
301      for (LiveInterval::iterator LI = OI->second->begin(),
302           LE = OI->second->end(); LI != LE; ++LI) {
303
304        // Remap the start index of the live range to the corresponding new
305        // number, or our best guess at what it _should_ correspond to if the
306        // original instruction has been erased.  This is either the following
307        // instruction or its predecessor.
308        unsigned index = LI->start / InstrSlots::NUM;
309        unsigned offset = LI->start % InstrSlots::NUM;
310        if (offset == InstrSlots::LOAD) {
311          std::vector<IdxMBBPair>::const_iterator I =
312                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
313          // Take the pair containing the index
314          std::vector<IdxMBBPair>::const_iterator J =
315                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
316
317          LI->start = getMBBStartIdx(J->second);
318        } else {
319          LI->start = mi2iMap_[OldI2MI[index]] + offset;
320        }
321
322        // Remap the ending index in the same way that we remapped the start,
323        // except for the final step where we always map to the immediately
324        // following instruction.
325        index = (LI->end - 1) / InstrSlots::NUM;
326        offset  = LI->end % InstrSlots::NUM;
327        if (offset == InstrSlots::LOAD) {
328          // VReg dies at end of block.
329          std::vector<IdxMBBPair>::const_iterator I =
330                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
331          --I;
332
333          LI->end = getMBBEndIdx(I->second) + 1;
334        } else {
335          unsigned idx = index;
336          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
337
338          if (index != OldI2MI.size())
339            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
340          else
341            LI->end = InstrSlots::NUM * i2miMap_.size();
342        }
343      }
344
345      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
346           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
347        VNInfo* vni = *VNI;
348
349        // Remap the VNInfo def index, which works the same as the
350        // start indices above. VN's with special sentinel defs
351        // don't need to be remapped.
352        if (vni->isDefAccurate() && !vni->isUnused()) {
353          unsigned index = vni->def / InstrSlots::NUM;
354          unsigned offset = vni->def % InstrSlots::NUM;
355          if (offset == InstrSlots::LOAD) {
356            std::vector<IdxMBBPair>::const_iterator I =
357                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
358            // Take the pair containing the index
359            std::vector<IdxMBBPair>::const_iterator J =
360                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
361
362            vni->def = getMBBStartIdx(J->second);
363          } else {
364            vni->def = mi2iMap_[OldI2MI[index]] + offset;
365          }
366        }
367
368        // Remap the VNInfo kill indices, which works the same as
369        // the end indices above.
370        for (size_t i = 0; i < vni->kills.size(); ++i) {
371          unsigned killIdx = vni->kills[i].killIdx;
372
373          unsigned index = (killIdx - 1) / InstrSlots::NUM;
374          unsigned offset = killIdx % InstrSlots::NUM;
375
376          if (offset == InstrSlots::LOAD) {
377            assert("Value killed at a load slot.");
378            /*std::vector<IdxMBBPair>::const_iterator I =
379             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
380            --I;
381
382            vni->kills[i] = getMBBEndIdx(I->second);*/
383          } else {
384            if (vni->kills[i].isPHIKill) {
385              std::vector<IdxMBBPair>::const_iterator I =
386                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
387              --I;
388              vni->kills[i].killIdx = terminatorGaps[I->second];
389            } else {
390              assert(OldI2MI[index] != 0 &&
391                     "Kill refers to instruction not present in index maps.");
392              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
393            }
394
395            /*
396            unsigned idx = index;
397            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
398
399            if (index != OldI2MI.size())
400              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
401                              (idx == index ? offset : 0);
402            else
403              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
404            */
405          }
406        }
407      }
408    }
409}
410
411void LiveIntervals::scaleNumbering(int factor) {
412  // Need to
413  //  * scale MBB begin and end points
414  //  * scale all ranges.
415  //  * Update VNI structures.
416  //  * Scale instruction numberings
417
418  // Scale the MBB indices.
419  Idx2MBBMap.clear();
420  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
421       MBB != MBBE; ++MBB) {
422    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
423    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
424    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
425    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
426  }
427  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
428
429  // Scale terminator gaps.
430  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
431       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
432       TGI != TGE; ++TGI) {
433    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
434  }
435
436  // Scale the intervals.
437  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
438    LI->second->scaleNumbering(factor);
439  }
440
441  // Scale MachineInstrs.
442  Mi2IndexMap oldmi2iMap = mi2iMap_;
443  unsigned highestSlot = 0;
444  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
445       MI != ME; ++MI) {
446    unsigned newSlot = InstrSlots::scale(MI->second, factor);
447    mi2iMap_[MI->first] = newSlot;
448    highestSlot = std::max(highestSlot, newSlot);
449  }
450
451  i2miMap_.clear();
452  i2miMap_.resize(highestSlot + 1);
453  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
454       MI != ME; ++MI) {
455    i2miMap_[MI->second] = MI->first;
456  }
457
458}
459
460
461/// runOnMachineFunction - Register allocate the whole function
462///
463bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
464  mf_ = &fn;
465  mri_ = &mf_->getRegInfo();
466  tm_ = &fn.getTarget();
467  tri_ = tm_->getRegisterInfo();
468  tii_ = tm_->getInstrInfo();
469  aa_ = &getAnalysis<AliasAnalysis>();
470  lv_ = &getAnalysis<LiveVariables>();
471  allocatableRegs_ = tri_->getAllocatableSet(fn);
472
473  processImplicitDefs();
474  computeNumbering();
475  computeIntervals();
476
477  numIntervals += getNumIntervals();
478
479  DEBUG(dump());
480  return true;
481}
482
483/// print - Implement the dump method.
484void LiveIntervals::print(std::ostream &O, const Module* ) const {
485  O << "********** INTERVALS **********\n";
486  for (const_iterator I = begin(), E = end(); I != E; ++I) {
487    I->second->print(O, tri_);
488    O << "\n";
489  }
490
491  O << "********** MACHINEINSTRS **********\n";
492  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
493       mbbi != mbbe; ++mbbi) {
494    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
495    for (MachineBasicBlock::iterator mii = mbbi->begin(),
496           mie = mbbi->end(); mii != mie; ++mii) {
497      O << getInstructionIndex(mii) << '\t' << *mii;
498    }
499  }
500}
501
502/// conflictsWithPhysRegDef - Returns true if the specified register
503/// is defined during the duration of the specified interval.
504bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
505                                            VirtRegMap &vrm, unsigned reg) {
506  for (LiveInterval::Ranges::const_iterator
507         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
508    for (unsigned index = getBaseIndex(I->start),
509           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
510         index += InstrSlots::NUM) {
511      // skip deleted instructions
512      while (index != end && !getInstructionFromIndex(index))
513        index += InstrSlots::NUM;
514      if (index == end) break;
515
516      MachineInstr *MI = getInstructionFromIndex(index);
517      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
518      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
519        if (SrcReg == li.reg || DstReg == li.reg)
520          continue;
521      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
522        MachineOperand& mop = MI->getOperand(i);
523        if (!mop.isReg())
524          continue;
525        unsigned PhysReg = mop.getReg();
526        if (PhysReg == 0 || PhysReg == li.reg)
527          continue;
528        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
529          if (!vrm.hasPhys(PhysReg))
530            continue;
531          PhysReg = vrm.getPhys(PhysReg);
532        }
533        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
534          return true;
535      }
536    }
537  }
538
539  return false;
540}
541
542/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
543/// it can check use as well.
544bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
545                                            unsigned Reg, bool CheckUse,
546                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
547  for (LiveInterval::Ranges::const_iterator
548         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
549    for (unsigned index = getBaseIndex(I->start),
550           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
551         index += InstrSlots::NUM) {
552      // Skip deleted instructions.
553      MachineInstr *MI = 0;
554      while (index != end) {
555        MI = getInstructionFromIndex(index);
556        if (MI)
557          break;
558        index += InstrSlots::NUM;
559      }
560      if (index == end) break;
561
562      if (JoinedCopies.count(MI))
563        continue;
564      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
565        MachineOperand& MO = MI->getOperand(i);
566        if (!MO.isReg())
567          continue;
568        if (MO.isUse() && !CheckUse)
569          continue;
570        unsigned PhysReg = MO.getReg();
571        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
572          continue;
573        if (tri_->isSubRegister(Reg, PhysReg))
574          return true;
575      }
576    }
577  }
578
579  return false;
580}
581
582
583void LiveIntervals::printRegName(unsigned reg) const {
584  if (TargetRegisterInfo::isPhysicalRegister(reg))
585    cerr << tri_->getName(reg);
586  else
587    cerr << "%reg" << reg;
588}
589
590void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
591                                             MachineBasicBlock::iterator mi,
592                                             unsigned MIIdx, MachineOperand& MO,
593                                             unsigned MOIdx,
594                                             LiveInterval &interval) {
595  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
596  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
597
598  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
599    DOUT << "is a implicit_def\n";
600    return;
601  }
602
603  // Virtual registers may be defined multiple times (due to phi
604  // elimination and 2-addr elimination).  Much of what we do only has to be
605  // done once for the vreg.  We use an empty interval to detect the first
606  // time we see a vreg.
607  if (interval.empty()) {
608    // Get the Idx of the defining instructions.
609    unsigned defIndex = getDefIndex(MIIdx);
610    // Earlyclobbers move back one.
611    if (MO.isEarlyClobber())
612      defIndex = getUseIndex(MIIdx);
613    VNInfo *ValNo;
614    MachineInstr *CopyMI = NULL;
615    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
616    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
617        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
618        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
619        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
620      CopyMI = mi;
621    // Earlyclobbers move back one.
622    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
623
624    assert(ValNo->id == 0 && "First value in interval is not 0?");
625
626    // Loop over all of the blocks that the vreg is defined in.  There are
627    // two cases we have to handle here.  The most common case is a vreg
628    // whose lifetime is contained within a basic block.  In this case there
629    // will be a single kill, in MBB, which comes after the definition.
630    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
631      // FIXME: what about dead vars?
632      unsigned killIdx;
633      if (vi.Kills[0] != mi)
634        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
635      else
636        killIdx = defIndex+1;
637
638      // If the kill happens after the definition, we have an intra-block
639      // live range.
640      if (killIdx > defIndex) {
641        assert(vi.AliveBlocks.empty() &&
642               "Shouldn't be alive across any blocks!");
643        LiveRange LR(defIndex, killIdx, ValNo);
644        interval.addRange(LR);
645        DOUT << " +" << LR << "\n";
646        interval.addKill(ValNo, killIdx, false);
647        return;
648      }
649    }
650
651    // The other case we handle is when a virtual register lives to the end
652    // of the defining block, potentially live across some blocks, then is
653    // live into some number of blocks, but gets killed.  Start by adding a
654    // range that goes from this definition to the end of the defining block.
655    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
656    DOUT << " +" << NewLR;
657    interval.addRange(NewLR);
658
659    // Iterate over all of the blocks that the variable is completely
660    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
661    // live interval.
662    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
663             E = vi.AliveBlocks.end(); I != E; ++I) {
664      LiveRange LR(getMBBStartIdx(*I),
665                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
666                   ValNo);
667      interval.addRange(LR);
668      DOUT << " +" << LR;
669    }
670
671    // Finally, this virtual register is live from the start of any killing
672    // block to the 'use' slot of the killing instruction.
673    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
674      MachineInstr *Kill = vi.Kills[i];
675      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
676      LiveRange LR(getMBBStartIdx(Kill->getParent()),
677                   killIdx, ValNo);
678      interval.addRange(LR);
679      interval.addKill(ValNo, killIdx, false);
680      DOUT << " +" << LR;
681    }
682
683  } else {
684    // If this is the second time we see a virtual register definition, it
685    // must be due to phi elimination or two addr elimination.  If this is
686    // the result of two address elimination, then the vreg is one of the
687    // def-and-use register operand.
688    if (mi->isRegTiedToUseOperand(MOIdx)) {
689      // If this is a two-address definition, then we have already processed
690      // the live range.  The only problem is that we didn't realize there
691      // are actually two values in the live interval.  Because of this we
692      // need to take the LiveRegion that defines this register and split it
693      // into two values.
694      assert(interval.containsOneValue());
695      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
696      unsigned RedefIndex = getDefIndex(MIIdx);
697      if (MO.isEarlyClobber())
698        RedefIndex = getUseIndex(MIIdx);
699
700      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
701      VNInfo *OldValNo = OldLR->valno;
702
703      // Delete the initial value, which should be short and continuous,
704      // because the 2-addr copy must be in the same MBB as the redef.
705      interval.removeRange(DefIndex, RedefIndex);
706
707      // Two-address vregs should always only be redefined once.  This means
708      // that at this point, there should be exactly one value number in it.
709      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
710
711      // The new value number (#1) is defined by the instruction we claimed
712      // defined value #0.
713      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
714                                            false, // update at *
715                                            VNInfoAllocator);
716      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
717
718      // Value#0 is now defined by the 2-addr instruction.
719      OldValNo->def  = RedefIndex;
720      OldValNo->copy = 0;
721      if (MO.isEarlyClobber())
722        OldValNo->setHasRedefByEC(true);
723
724      // Add the new live interval which replaces the range for the input copy.
725      LiveRange LR(DefIndex, RedefIndex, ValNo);
726      DOUT << " replace range with " << LR;
727      interval.addRange(LR);
728      interval.addKill(ValNo, RedefIndex, false);
729
730      // If this redefinition is dead, we need to add a dummy unit live
731      // range covering the def slot.
732      if (MO.isDead())
733        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
734
735      DOUT << " RESULT: ";
736      interval.print(DOUT, tri_);
737
738    } else {
739      // Otherwise, this must be because of phi elimination.  If this is the
740      // first redefinition of the vreg that we have seen, go back and change
741      // the live range in the PHI block to be a different value number.
742      if (interval.containsOneValue()) {
743        assert(vi.Kills.size() == 1 &&
744               "PHI elimination vreg should have one kill, the PHI itself!");
745
746        // Remove the old range that we now know has an incorrect number.
747        VNInfo *VNI = interval.getValNumInfo(0);
748        MachineInstr *Killer = vi.Kills[0];
749        unsigned Start = getMBBStartIdx(Killer->getParent());
750        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
751        DOUT << " Removing [" << Start << "," << End << "] from: ";
752        interval.print(DOUT, tri_); DOUT << "\n";
753        interval.removeRange(Start, End);
754        assert(interval.ranges.size() == 1 &&
755               "newly discovered PHI interval has >1 ranges.");
756        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
757        interval.addKill(VNI, terminatorGaps[killMBB], true);
758        VNI->setHasPHIKill(true);
759        DOUT << " RESULT: "; interval.print(DOUT, tri_);
760
761        // Replace the interval with one of a NEW value number.  Note that this
762        // value number isn't actually defined by an instruction, weird huh? :)
763        LiveRange LR(Start, End,
764          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
765        LR.valno->setIsPHIDef(true);
766        DOUT << " replace range with " << LR;
767        interval.addRange(LR);
768        interval.addKill(LR.valno, End, false);
769        DOUT << " RESULT: "; interval.print(DOUT, tri_);
770      }
771
772      // In the case of PHI elimination, each variable definition is only
773      // live until the end of the block.  We've already taken care of the
774      // rest of the live range.
775      unsigned defIndex = getDefIndex(MIIdx);
776      if (MO.isEarlyClobber())
777        defIndex = getUseIndex(MIIdx);
778
779      VNInfo *ValNo;
780      MachineInstr *CopyMI = NULL;
781      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
782      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
783          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
784          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
785          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
786        CopyMI = mi;
787      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
788
789      unsigned killIndex = getMBBEndIdx(mbb) + 1;
790      LiveRange LR(defIndex, killIndex, ValNo);
791      interval.addRange(LR);
792      interval.addKill(ValNo, terminatorGaps[mbb], true);
793      ValNo->setHasPHIKill(true);
794      DOUT << " +" << LR;
795    }
796  }
797
798  DOUT << '\n';
799}
800
801void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
802                                              MachineBasicBlock::iterator mi,
803                                              unsigned MIIdx,
804                                              MachineOperand& MO,
805                                              LiveInterval &interval,
806                                              MachineInstr *CopyMI) {
807  // A physical register cannot be live across basic block, so its
808  // lifetime must end somewhere in its defining basic block.
809  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
810
811  unsigned baseIndex = MIIdx;
812  unsigned start = getDefIndex(baseIndex);
813  // Earlyclobbers move back one.
814  if (MO.isEarlyClobber())
815    start = getUseIndex(MIIdx);
816  unsigned end = start;
817
818  // If it is not used after definition, it is considered dead at
819  // the instruction defining it. Hence its interval is:
820  // [defSlot(def), defSlot(def)+1)
821  if (MO.isDead()) {
822    DOUT << " dead";
823    end = start + 1;
824    goto exit;
825  }
826
827  // If it is not dead on definition, it must be killed by a
828  // subsequent instruction. Hence its interval is:
829  // [defSlot(def), useSlot(kill)+1)
830  baseIndex += InstrSlots::NUM;
831  while (++mi != MBB->end()) {
832    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
833           getInstructionFromIndex(baseIndex) == 0)
834      baseIndex += InstrSlots::NUM;
835    if (mi->killsRegister(interval.reg, tri_)) {
836      DOUT << " killed";
837      end = getUseIndex(baseIndex) + 1;
838      goto exit;
839    } else {
840      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
841      if (DefIdx != -1) {
842        if (mi->isRegTiedToUseOperand(DefIdx)) {
843          // Two-address instruction.
844          end = getDefIndex(baseIndex);
845          if (mi->getOperand(DefIdx).isEarlyClobber())
846            end = getUseIndex(baseIndex);
847        } else {
848          // Another instruction redefines the register before it is ever read.
849          // Then the register is essentially dead at the instruction that defines
850          // it. Hence its interval is:
851          // [defSlot(def), defSlot(def)+1)
852          DOUT << " dead";
853          end = start + 1;
854        }
855        goto exit;
856      }
857    }
858
859    baseIndex += InstrSlots::NUM;
860  }
861
862  // The only case we should have a dead physreg here without a killing or
863  // instruction where we know it's dead is if it is live-in to the function
864  // and never used. Another possible case is the implicit use of the
865  // physical register has been deleted by two-address pass.
866  end = start + 1;
867
868exit:
869  assert(start < end && "did not find end of interval?");
870
871  // Already exists? Extend old live interval.
872  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
873  bool Extend = OldLR != interval.end();
874  VNInfo *ValNo = Extend
875    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
876  if (MO.isEarlyClobber() && Extend)
877    ValNo->setHasRedefByEC(true);
878  LiveRange LR(start, end, ValNo);
879  interval.addRange(LR);
880  interval.addKill(LR.valno, end, false);
881  DOUT << " +" << LR << '\n';
882}
883
884void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
885                                      MachineBasicBlock::iterator MI,
886                                      unsigned MIIdx,
887                                      MachineOperand& MO,
888                                      unsigned MOIdx) {
889  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
890    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
891                             getOrCreateInterval(MO.getReg()));
892  else if (allocatableRegs_[MO.getReg()]) {
893    MachineInstr *CopyMI = NULL;
894    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
895    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
896        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
897        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
898        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
899      CopyMI = MI;
900    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
901                              getOrCreateInterval(MO.getReg()), CopyMI);
902    // Def of a register also defines its sub-registers.
903    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
904      // If MI also modifies the sub-register explicitly, avoid processing it
905      // more than once. Do not pass in TRI here so it checks for exact match.
906      if (!MI->modifiesRegister(*AS))
907        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908                                  getOrCreateInterval(*AS), 0);
909  }
910}
911
912void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
913                                         unsigned MIIdx,
914                                         LiveInterval &interval, bool isAlias) {
915  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
916
917  // Look for kills, if it reaches a def before it's killed, then it shouldn't
918  // be considered a livein.
919  MachineBasicBlock::iterator mi = MBB->begin();
920  unsigned baseIndex = MIIdx;
921  unsigned start = baseIndex;
922  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
923         getInstructionFromIndex(baseIndex) == 0)
924    baseIndex += InstrSlots::NUM;
925  unsigned end = baseIndex;
926  bool SeenDefUse = false;
927
928  while (mi != MBB->end()) {
929    if (mi->killsRegister(interval.reg, tri_)) {
930      DOUT << " killed";
931      end = getUseIndex(baseIndex) + 1;
932      SeenDefUse = true;
933      break;
934    } else if (mi->modifiesRegister(interval.reg, tri_)) {
935      // Another instruction redefines the register before it is ever read.
936      // Then the register is essentially dead at the instruction that defines
937      // it. Hence its interval is:
938      // [defSlot(def), defSlot(def)+1)
939      DOUT << " dead";
940      end = getDefIndex(start) + 1;
941      SeenDefUse = true;
942      break;
943    }
944
945    baseIndex += InstrSlots::NUM;
946    ++mi;
947    if (mi != MBB->end()) {
948      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
949             getInstructionFromIndex(baseIndex) == 0)
950        baseIndex += InstrSlots::NUM;
951    }
952  }
953
954  // Live-in register might not be used at all.
955  if (!SeenDefUse) {
956    if (isAlias) {
957      DOUT << " dead";
958      end = getDefIndex(MIIdx) + 1;
959    } else {
960      DOUT << " live through";
961      end = baseIndex;
962    }
963  }
964
965  VNInfo *vni =
966    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
967  vni->setIsPHIDef(true);
968  LiveRange LR(start, end, vni);
969
970  interval.addRange(LR);
971  interval.addKill(LR.valno, end, false);
972  DOUT << " +" << LR << '\n';
973}
974
975/// computeIntervals - computes the live intervals for virtual
976/// registers. for some ordering of the machine instructions [1,N] a
977/// live interval is an interval [i, j) where 1 <= i <= j < N for
978/// which a variable is live
979void LiveIntervals::computeIntervals() {
980
981  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
982       << "********** Function: "
983       << ((Value*)mf_->getFunction())->getName() << '\n';
984
985  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
986       MBBI != E; ++MBBI) {
987    MachineBasicBlock *MBB = MBBI;
988    // Track the index of the current machine instr.
989    unsigned MIIndex = getMBBStartIdx(MBB);
990    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
991
992    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
993
994    // Create intervals for live-ins to this BB first.
995    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
996           LE = MBB->livein_end(); LI != LE; ++LI) {
997      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
998      // Multiple live-ins can alias the same register.
999      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1000        if (!hasInterval(*AS))
1001          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1002                               true);
1003    }
1004
1005    // Skip over empty initial indices.
1006    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1007           getInstructionFromIndex(MIIndex) == 0)
1008      MIIndex += InstrSlots::NUM;
1009
1010    for (; MI != miEnd; ++MI) {
1011      DOUT << MIIndex << "\t" << *MI;
1012
1013      // Handle defs.
1014      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1015        MachineOperand &MO = MI->getOperand(i);
1016        // handle register defs - build intervals
1017        if (MO.isReg() && MO.getReg() && MO.isDef()) {
1018          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1019        }
1020      }
1021
1022      // Skip over the empty slots after each instruction.
1023      unsigned Slots = MI->getDesc().getNumDefs();
1024      if (Slots == 0)
1025        Slots = 1;
1026      MIIndex += InstrSlots::NUM * Slots;
1027
1028      // Skip over empty indices.
1029      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1030             getInstructionFromIndex(MIIndex) == 0)
1031        MIIndex += InstrSlots::NUM;
1032    }
1033  }
1034}
1035
1036bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1037                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1038  std::vector<IdxMBBPair>::const_iterator I =
1039    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1040
1041  bool ResVal = false;
1042  while (I != Idx2MBBMap.end()) {
1043    if (I->first >= End)
1044      break;
1045    MBBs.push_back(I->second);
1046    ResVal = true;
1047    ++I;
1048  }
1049  return ResVal;
1050}
1051
1052bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1053                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1054  std::vector<IdxMBBPair>::const_iterator I =
1055    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1056
1057  bool ResVal = false;
1058  while (I != Idx2MBBMap.end()) {
1059    if (I->first > End)
1060      break;
1061    MachineBasicBlock *MBB = I->second;
1062    if (getMBBEndIdx(MBB) > End)
1063      break;
1064    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1065           SE = MBB->succ_end(); SI != SE; ++SI)
1066      MBBs.push_back(*SI);
1067    ResVal = true;
1068    ++I;
1069  }
1070  return ResVal;
1071}
1072
1073LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1074  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1075  return new LiveInterval(reg, Weight);
1076}
1077
1078/// dupInterval - Duplicate a live interval. The caller is responsible for
1079/// managing the allocated memory.
1080LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1081  LiveInterval *NewLI = createInterval(li->reg);
1082  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1083  return NewLI;
1084}
1085
1086/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1087/// copy field and returns the source register that defines it.
1088unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1089  if (!VNI->copy)
1090    return 0;
1091
1092  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1093    // If it's extracting out of a physical register, return the sub-register.
1094    unsigned Reg = VNI->copy->getOperand(1).getReg();
1095    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1096      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1097    return Reg;
1098  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1099             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1100    return VNI->copy->getOperand(2).getReg();
1101
1102  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1103  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1104    return SrcReg;
1105  llvm_unreachable("Unrecognized copy instruction!");
1106  return 0;
1107}
1108
1109//===----------------------------------------------------------------------===//
1110// Register allocator hooks.
1111//
1112
1113/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1114/// allow one) virtual register operand, then its uses are implicitly using
1115/// the register. Returns the virtual register.
1116unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1117                                            MachineInstr *MI) const {
1118  unsigned RegOp = 0;
1119  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1120    MachineOperand &MO = MI->getOperand(i);
1121    if (!MO.isReg() || !MO.isUse())
1122      continue;
1123    unsigned Reg = MO.getReg();
1124    if (Reg == 0 || Reg == li.reg)
1125      continue;
1126
1127    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1128        !allocatableRegs_[Reg])
1129      continue;
1130    // FIXME: For now, only remat MI with at most one register operand.
1131    assert(!RegOp &&
1132           "Can't rematerialize instruction with multiple register operand!");
1133    RegOp = MO.getReg();
1134#ifndef NDEBUG
1135    break;
1136#endif
1137  }
1138  return RegOp;
1139}
1140
1141/// isValNoAvailableAt - Return true if the val# of the specified interval
1142/// which reaches the given instruction also reaches the specified use index.
1143bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1144                                       unsigned UseIdx) const {
1145  unsigned Index = getInstructionIndex(MI);
1146  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1147  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1148  return UI != li.end() && UI->valno == ValNo;
1149}
1150
1151/// isReMaterializable - Returns true if the definition MI of the specified
1152/// val# of the specified interval is re-materializable.
1153bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1154                                       const VNInfo *ValNo, MachineInstr *MI,
1155                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1156                                       bool &isLoad) {
1157  if (DisableReMat)
1158    return false;
1159
1160  // FIXME: For now, avoid remating instructions whose definition has a subreg
1161  // index. It's just incredibly difficult to get right.
1162  if (MI->findRegisterDefOperand(li.reg)->getSubReg())
1163    return false;
1164
1165  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1166    return true;
1167
1168  int FrameIdx = 0;
1169  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1170      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1171    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1172    // this but remember this is not safe to fold into a two-address
1173    // instruction.
1174    // This is a load from fixed stack slot. It can be rematerialized.
1175    return true;
1176
1177  // If the target-specific rules don't identify an instruction as
1178  // being trivially rematerializable, use some target-independent
1179  // rules.
1180  if (!MI->getDesc().isRematerializable() ||
1181      !tii_->isTriviallyReMaterializable(MI)) {
1182    if (!EnableAggressiveRemat)
1183      return false;
1184
1185    // If the instruction accesses memory but the memoperands have been lost,
1186    // we can't analyze it.
1187    const TargetInstrDesc &TID = MI->getDesc();
1188    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1189      return false;
1190
1191    // Avoid instructions obviously unsafe for remat.
1192    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1193      return false;
1194
1195    // If the instruction accesses memory and the memory could be non-constant,
1196    // assume the instruction is not rematerializable.
1197    for (std::list<MachineMemOperand>::const_iterator
1198           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1199      const MachineMemOperand &MMO = *I;
1200      if (MMO.isVolatile() || MMO.isStore())
1201        return false;
1202      const Value *V = MMO.getValue();
1203      if (!V)
1204        return false;
1205      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1206        if (!PSV->isConstant(mf_->getFrameInfo()))
1207          return false;
1208      } else if (!aa_->pointsToConstantMemory(V))
1209        return false;
1210    }
1211
1212    // If any of the registers accessed are non-constant, conservatively assume
1213    // the instruction is not rematerializable.
1214    unsigned ImpUse = 0;
1215    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1216      const MachineOperand &MO = MI->getOperand(i);
1217      if (MO.isReg()) {
1218        unsigned Reg = MO.getReg();
1219        if (Reg == 0)
1220          continue;
1221        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1222          return false;
1223
1224        // Only allow one def, and that in the first operand.
1225        if (MO.isDef() != (i == 0))
1226          return false;
1227
1228        // Only allow constant-valued registers.
1229        bool IsLiveIn = mri_->isLiveIn(Reg);
1230        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1231                                          E = mri_->def_end();
1232
1233        // For the def, it should be the only def of that register.
1234        if (MO.isDef() && (next(I) != E || IsLiveIn))
1235          return false;
1236
1237        if (MO.isUse()) {
1238          // Only allow one use other register use, as that's all the
1239          // remat mechanisms support currently.
1240          if (Reg != li.reg) {
1241            if (ImpUse == 0)
1242              ImpUse = Reg;
1243            else if (Reg != ImpUse)
1244              return false;
1245          }
1246          // For the use, there should be only one associated def.
1247          if (I != E && (next(I) != E || IsLiveIn))
1248            return false;
1249        }
1250      }
1251    }
1252  }
1253
1254  unsigned ImpUse = getReMatImplicitUse(li, MI);
1255  if (ImpUse) {
1256    const LiveInterval &ImpLi = getInterval(ImpUse);
1257    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1258           re = mri_->use_end(); ri != re; ++ri) {
1259      MachineInstr *UseMI = &*ri;
1260      unsigned UseIdx = getInstructionIndex(UseMI);
1261      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1262        continue;
1263      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1264        return false;
1265    }
1266
1267    // If a register operand of the re-materialized instruction is going to
1268    // be spilled next, then it's not legal to re-materialize this instruction.
1269    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1270      if (ImpUse == SpillIs[i]->reg)
1271        return false;
1272  }
1273  return true;
1274}
1275
1276/// isReMaterializable - Returns true if the definition MI of the specified
1277/// val# of the specified interval is re-materializable.
1278bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1279                                       const VNInfo *ValNo, MachineInstr *MI) {
1280  SmallVector<LiveInterval*, 4> Dummy1;
1281  bool Dummy2;
1282  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1283}
1284
1285/// isReMaterializable - Returns true if every definition of MI of every
1286/// val# of the specified interval is re-materializable.
1287bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1288                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1289                                       bool &isLoad) {
1290  isLoad = false;
1291  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1292       i != e; ++i) {
1293    const VNInfo *VNI = *i;
1294    if (VNI->isUnused())
1295      continue; // Dead val#.
1296    // Is the def for the val# rematerializable?
1297    if (!VNI->isDefAccurate())
1298      return false;
1299    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1300    bool DefIsLoad = false;
1301    if (!ReMatDefMI ||
1302        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1303      return false;
1304    isLoad |= DefIsLoad;
1305  }
1306  return true;
1307}
1308
1309/// FilterFoldedOps - Filter out two-address use operands. Return
1310/// true if it finds any issue with the operands that ought to prevent
1311/// folding.
1312static bool FilterFoldedOps(MachineInstr *MI,
1313                            SmallVector<unsigned, 2> &Ops,
1314                            unsigned &MRInfo,
1315                            SmallVector<unsigned, 2> &FoldOps) {
1316  MRInfo = 0;
1317  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1318    unsigned OpIdx = Ops[i];
1319    MachineOperand &MO = MI->getOperand(OpIdx);
1320    // FIXME: fold subreg use.
1321    if (MO.getSubReg())
1322      return true;
1323    if (MO.isDef())
1324      MRInfo |= (unsigned)VirtRegMap::isMod;
1325    else {
1326      // Filter out two-address use operand(s).
1327      if (MI->isRegTiedToDefOperand(OpIdx)) {
1328        MRInfo = VirtRegMap::isModRef;
1329        continue;
1330      }
1331      MRInfo |= (unsigned)VirtRegMap::isRef;
1332    }
1333    FoldOps.push_back(OpIdx);
1334  }
1335  return false;
1336}
1337
1338
1339/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1340/// slot / to reg or any rematerialized load into ith operand of specified
1341/// MI. If it is successul, MI is updated with the newly created MI and
1342/// returns true.
1343bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1344                                         VirtRegMap &vrm, MachineInstr *DefMI,
1345                                         unsigned InstrIdx,
1346                                         SmallVector<unsigned, 2> &Ops,
1347                                         bool isSS, int Slot, unsigned Reg) {
1348  // If it is an implicit def instruction, just delete it.
1349  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1350    RemoveMachineInstrFromMaps(MI);
1351    vrm.RemoveMachineInstrFromMaps(MI);
1352    MI->eraseFromParent();
1353    ++numFolds;
1354    return true;
1355  }
1356
1357  // Filter the list of operand indexes that are to be folded. Abort if
1358  // any operand will prevent folding.
1359  unsigned MRInfo = 0;
1360  SmallVector<unsigned, 2> FoldOps;
1361  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1362    return false;
1363
1364  // The only time it's safe to fold into a two address instruction is when
1365  // it's folding reload and spill from / into a spill stack slot.
1366  if (DefMI && (MRInfo & VirtRegMap::isMod))
1367    return false;
1368
1369  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1370                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1371  if (fmi) {
1372    // Remember this instruction uses the spill slot.
1373    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1374
1375    // Attempt to fold the memory reference into the instruction. If
1376    // we can do this, we don't need to insert spill code.
1377    MachineBasicBlock &MBB = *MI->getParent();
1378    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1379      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1380    vrm.transferSpillPts(MI, fmi);
1381    vrm.transferRestorePts(MI, fmi);
1382    vrm.transferEmergencySpills(MI, fmi);
1383    mi2iMap_.erase(MI);
1384    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1385    mi2iMap_[fmi] = InstrIdx;
1386    MI = MBB.insert(MBB.erase(MI), fmi);
1387    ++numFolds;
1388    return true;
1389  }
1390  return false;
1391}
1392
1393/// canFoldMemoryOperand - Returns true if the specified load / store
1394/// folding is possible.
1395bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1396                                         SmallVector<unsigned, 2> &Ops,
1397                                         bool ReMat) const {
1398  // Filter the list of operand indexes that are to be folded. Abort if
1399  // any operand will prevent folding.
1400  unsigned MRInfo = 0;
1401  SmallVector<unsigned, 2> FoldOps;
1402  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1403    return false;
1404
1405  // It's only legal to remat for a use, not a def.
1406  if (ReMat && (MRInfo & VirtRegMap::isMod))
1407    return false;
1408
1409  return tii_->canFoldMemoryOperand(MI, FoldOps);
1410}
1411
1412bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1413  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1414  for (LiveInterval::Ranges::const_iterator
1415         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1416    std::vector<IdxMBBPair>::const_iterator II =
1417      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1418    if (II == Idx2MBBMap.end())
1419      continue;
1420    if (I->end > II->first)  // crossing a MBB.
1421      return false;
1422    MBBs.insert(II->second);
1423    if (MBBs.size() > 1)
1424      return false;
1425  }
1426  return true;
1427}
1428
1429/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1430/// interval on to-be re-materialized operands of MI) with new register.
1431void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1432                                       MachineInstr *MI, unsigned NewVReg,
1433                                       VirtRegMap &vrm) {
1434  // There is an implicit use. That means one of the other operand is
1435  // being remat'ed and the remat'ed instruction has li.reg as an
1436  // use operand. Make sure we rewrite that as well.
1437  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1438    MachineOperand &MO = MI->getOperand(i);
1439    if (!MO.isReg())
1440      continue;
1441    unsigned Reg = MO.getReg();
1442    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1443      continue;
1444    if (!vrm.isReMaterialized(Reg))
1445      continue;
1446    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1447    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1448    if (UseMO)
1449      UseMO->setReg(NewVReg);
1450  }
1451}
1452
1453/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1454/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1455bool LiveIntervals::
1456rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1457                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1458                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1459                 unsigned Slot, int LdSlot,
1460                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1461                 VirtRegMap &vrm,
1462                 const TargetRegisterClass* rc,
1463                 SmallVector<int, 4> &ReMatIds,
1464                 const MachineLoopInfo *loopInfo,
1465                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1466                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1467                 std::vector<LiveInterval*> &NewLIs) {
1468  bool CanFold = false;
1469 RestartInstruction:
1470  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1471    MachineOperand& mop = MI->getOperand(i);
1472    if (!mop.isReg())
1473      continue;
1474    unsigned Reg = mop.getReg();
1475    unsigned RegI = Reg;
1476    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1477      continue;
1478    if (Reg != li.reg)
1479      continue;
1480
1481    bool TryFold = !DefIsReMat;
1482    bool FoldSS = true; // Default behavior unless it's a remat.
1483    int FoldSlot = Slot;
1484    if (DefIsReMat) {
1485      // If this is the rematerializable definition MI itself and
1486      // all of its uses are rematerialized, simply delete it.
1487      if (MI == ReMatOrigDefMI && CanDelete) {
1488        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1489        DOUT << MI << '\n';
1490        RemoveMachineInstrFromMaps(MI);
1491        vrm.RemoveMachineInstrFromMaps(MI);
1492        MI->eraseFromParent();
1493        break;
1494      }
1495
1496      // If def for this use can't be rematerialized, then try folding.
1497      // If def is rematerializable and it's a load, also try folding.
1498      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1499      if (isLoad) {
1500        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1501        FoldSS = isLoadSS;
1502        FoldSlot = LdSlot;
1503      }
1504    }
1505
1506    // Scan all of the operands of this instruction rewriting operands
1507    // to use NewVReg instead of li.reg as appropriate.  We do this for
1508    // two reasons:
1509    //
1510    //   1. If the instr reads the same spilled vreg multiple times, we
1511    //      want to reuse the NewVReg.
1512    //   2. If the instr is a two-addr instruction, we are required to
1513    //      keep the src/dst regs pinned.
1514    //
1515    // Keep track of whether we replace a use and/or def so that we can
1516    // create the spill interval with the appropriate range.
1517
1518    HasUse = mop.isUse();
1519    HasDef = mop.isDef();
1520    SmallVector<unsigned, 2> Ops;
1521    Ops.push_back(i);
1522    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1523      const MachineOperand &MOj = MI->getOperand(j);
1524      if (!MOj.isReg())
1525        continue;
1526      unsigned RegJ = MOj.getReg();
1527      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1528        continue;
1529      if (RegJ == RegI) {
1530        Ops.push_back(j);
1531        HasUse |= MOj.isUse();
1532        HasDef |= MOj.isDef();
1533      }
1534    }
1535
1536    if (HasUse && !li.liveAt(getUseIndex(index)))
1537      // Must be defined by an implicit def. It should not be spilled. Note,
1538      // this is for correctness reason. e.g.
1539      // 8   %reg1024<def> = IMPLICIT_DEF
1540      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1541      // The live range [12, 14) are not part of the r1024 live interval since
1542      // it's defined by an implicit def. It will not conflicts with live
1543      // interval of r1025. Now suppose both registers are spilled, you can
1544      // easily see a situation where both registers are reloaded before
1545      // the INSERT_SUBREG and both target registers that would overlap.
1546      HasUse = false;
1547
1548    // Create a new virtual register for the spill interval.
1549    // Create the new register now so we can map the fold instruction
1550    // to the new register so when it is unfolded we get the correct
1551    // answer.
1552    bool CreatedNewVReg = false;
1553    if (NewVReg == 0) {
1554      NewVReg = mri_->createVirtualRegister(rc);
1555      vrm.grow();
1556      CreatedNewVReg = true;
1557    }
1558
1559    if (!TryFold)
1560      CanFold = false;
1561    else {
1562      // Do not fold load / store here if we are splitting. We'll find an
1563      // optimal point to insert a load / store later.
1564      if (!TrySplit) {
1565        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1566                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1567          // Folding the load/store can completely change the instruction in
1568          // unpredictable ways, rescan it from the beginning.
1569
1570          if (FoldSS) {
1571            // We need to give the new vreg the same stack slot as the
1572            // spilled interval.
1573            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1574          }
1575
1576          HasUse = false;
1577          HasDef = false;
1578          CanFold = false;
1579          if (isNotInMIMap(MI))
1580            break;
1581          goto RestartInstruction;
1582        }
1583      } else {
1584        // We'll try to fold it later if it's profitable.
1585        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1586      }
1587    }
1588
1589    mop.setReg(NewVReg);
1590    if (mop.isImplicit())
1591      rewriteImplicitOps(li, MI, NewVReg, vrm);
1592
1593    // Reuse NewVReg for other reads.
1594    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1595      MachineOperand &mopj = MI->getOperand(Ops[j]);
1596      mopj.setReg(NewVReg);
1597      if (mopj.isImplicit())
1598        rewriteImplicitOps(li, MI, NewVReg, vrm);
1599    }
1600
1601    if (CreatedNewVReg) {
1602      if (DefIsReMat) {
1603        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1604        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1605          // Each valnum may have its own remat id.
1606          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1607        } else {
1608          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1609        }
1610        if (!CanDelete || (HasUse && HasDef)) {
1611          // If this is a two-addr instruction then its use operands are
1612          // rematerializable but its def is not. It should be assigned a
1613          // stack slot.
1614          vrm.assignVirt2StackSlot(NewVReg, Slot);
1615        }
1616      } else {
1617        vrm.assignVirt2StackSlot(NewVReg, Slot);
1618      }
1619    } else if (HasUse && HasDef &&
1620               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1621      // If this interval hasn't been assigned a stack slot (because earlier
1622      // def is a deleted remat def), do it now.
1623      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1624      vrm.assignVirt2StackSlot(NewVReg, Slot);
1625    }
1626
1627    // Re-matting an instruction with virtual register use. Add the
1628    // register as an implicit use on the use MI.
1629    if (DefIsReMat && ImpUse)
1630      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1631
1632    // Create a new register interval for this spill / remat.
1633    LiveInterval &nI = getOrCreateInterval(NewVReg);
1634    if (CreatedNewVReg) {
1635      NewLIs.push_back(&nI);
1636      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1637      if (TrySplit)
1638        vrm.setIsSplitFromReg(NewVReg, li.reg);
1639    }
1640
1641    if (HasUse) {
1642      if (CreatedNewVReg) {
1643        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1644                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1645        DOUT << " +" << LR;
1646        nI.addRange(LR);
1647      } else {
1648        // Extend the split live interval to this def / use.
1649        unsigned End = getUseIndex(index)+1;
1650        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1651                     nI.getValNumInfo(nI.getNumValNums()-1));
1652        DOUT << " +" << LR;
1653        nI.addRange(LR);
1654      }
1655    }
1656    if (HasDef) {
1657      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1658                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1659      DOUT << " +" << LR;
1660      nI.addRange(LR);
1661    }
1662
1663    DOUT << "\t\t\t\tAdded new interval: ";
1664    nI.print(DOUT, tri_);
1665    DOUT << '\n';
1666  }
1667  return CanFold;
1668}
1669bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1670                                   const VNInfo *VNI,
1671                                   MachineBasicBlock *MBB, unsigned Idx) const {
1672  unsigned End = getMBBEndIdx(MBB);
1673  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1674    if (VNI->kills[j].isPHIKill)
1675      continue;
1676
1677    unsigned KillIdx = VNI->kills[j].killIdx;
1678    if (KillIdx > Idx && KillIdx < End)
1679      return true;
1680  }
1681  return false;
1682}
1683
1684/// RewriteInfo - Keep track of machine instrs that will be rewritten
1685/// during spilling.
1686namespace {
1687  struct RewriteInfo {
1688    unsigned Index;
1689    MachineInstr *MI;
1690    bool HasUse;
1691    bool HasDef;
1692    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1693      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1694  };
1695
1696  struct RewriteInfoCompare {
1697    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1698      return LHS.Index < RHS.Index;
1699    }
1700  };
1701}
1702
1703void LiveIntervals::
1704rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1705                    LiveInterval::Ranges::const_iterator &I,
1706                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1707                    unsigned Slot, int LdSlot,
1708                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1709                    VirtRegMap &vrm,
1710                    const TargetRegisterClass* rc,
1711                    SmallVector<int, 4> &ReMatIds,
1712                    const MachineLoopInfo *loopInfo,
1713                    BitVector &SpillMBBs,
1714                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1715                    BitVector &RestoreMBBs,
1716                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1717                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1718                    std::vector<LiveInterval*> &NewLIs) {
1719  bool AllCanFold = true;
1720  unsigned NewVReg = 0;
1721  unsigned start = getBaseIndex(I->start);
1722  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1723
1724  // First collect all the def / use in this live range that will be rewritten.
1725  // Make sure they are sorted according to instruction index.
1726  std::vector<RewriteInfo> RewriteMIs;
1727  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1728         re = mri_->reg_end(); ri != re; ) {
1729    MachineInstr *MI = &*ri;
1730    MachineOperand &O = ri.getOperand();
1731    ++ri;
1732    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1733    unsigned index = getInstructionIndex(MI);
1734    if (index < start || index >= end)
1735      continue;
1736    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1737      // Must be defined by an implicit def. It should not be spilled. Note,
1738      // this is for correctness reason. e.g.
1739      // 8   %reg1024<def> = IMPLICIT_DEF
1740      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1741      // The live range [12, 14) are not part of the r1024 live interval since
1742      // it's defined by an implicit def. It will not conflicts with live
1743      // interval of r1025. Now suppose both registers are spilled, you can
1744      // easily see a situation where both registers are reloaded before
1745      // the INSERT_SUBREG and both target registers that would overlap.
1746      continue;
1747    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1748  }
1749  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1750
1751  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1752  // Now rewrite the defs and uses.
1753  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1754    RewriteInfo &rwi = RewriteMIs[i];
1755    ++i;
1756    unsigned index = rwi.Index;
1757    bool MIHasUse = rwi.HasUse;
1758    bool MIHasDef = rwi.HasDef;
1759    MachineInstr *MI = rwi.MI;
1760    // If MI def and/or use the same register multiple times, then there
1761    // are multiple entries.
1762    unsigned NumUses = MIHasUse;
1763    while (i != e && RewriteMIs[i].MI == MI) {
1764      assert(RewriteMIs[i].Index == index);
1765      bool isUse = RewriteMIs[i].HasUse;
1766      if (isUse) ++NumUses;
1767      MIHasUse |= isUse;
1768      MIHasDef |= RewriteMIs[i].HasDef;
1769      ++i;
1770    }
1771    MachineBasicBlock *MBB = MI->getParent();
1772
1773    if (ImpUse && MI != ReMatDefMI) {
1774      // Re-matting an instruction with virtual register use. Update the
1775      // register interval's spill weight to HUGE_VALF to prevent it from
1776      // being spilled.
1777      LiveInterval &ImpLi = getInterval(ImpUse);
1778      ImpLi.weight = HUGE_VALF;
1779    }
1780
1781    unsigned MBBId = MBB->getNumber();
1782    unsigned ThisVReg = 0;
1783    if (TrySplit) {
1784      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1785      if (NVI != MBBVRegsMap.end()) {
1786        ThisVReg = NVI->second;
1787        // One common case:
1788        // x = use
1789        // ...
1790        // ...
1791        // def = ...
1792        //     = use
1793        // It's better to start a new interval to avoid artifically
1794        // extend the new interval.
1795        if (MIHasDef && !MIHasUse) {
1796          MBBVRegsMap.erase(MBB->getNumber());
1797          ThisVReg = 0;
1798        }
1799      }
1800    }
1801
1802    bool IsNew = ThisVReg == 0;
1803    if (IsNew) {
1804      // This ends the previous live interval. If all of its def / use
1805      // can be folded, give it a low spill weight.
1806      if (NewVReg && TrySplit && AllCanFold) {
1807        LiveInterval &nI = getOrCreateInterval(NewVReg);
1808        nI.weight /= 10.0F;
1809      }
1810      AllCanFold = true;
1811    }
1812    NewVReg = ThisVReg;
1813
1814    bool HasDef = false;
1815    bool HasUse = false;
1816    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1817                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1818                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1819                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1820                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1821    if (!HasDef && !HasUse)
1822      continue;
1823
1824    AllCanFold &= CanFold;
1825
1826    // Update weight of spill interval.
1827    LiveInterval &nI = getOrCreateInterval(NewVReg);
1828    if (!TrySplit) {
1829      // The spill weight is now infinity as it cannot be spilled again.
1830      nI.weight = HUGE_VALF;
1831      continue;
1832    }
1833
1834    // Keep track of the last def and first use in each MBB.
1835    if (HasDef) {
1836      if (MI != ReMatOrigDefMI || !CanDelete) {
1837        bool HasKill = false;
1838        if (!HasUse)
1839          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1840        else {
1841          // If this is a two-address code, then this index starts a new VNInfo.
1842          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1843          if (VNI)
1844            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1845        }
1846        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1847          SpillIdxes.find(MBBId);
1848        if (!HasKill) {
1849          if (SII == SpillIdxes.end()) {
1850            std::vector<SRInfo> S;
1851            S.push_back(SRInfo(index, NewVReg, true));
1852            SpillIdxes.insert(std::make_pair(MBBId, S));
1853          } else if (SII->second.back().vreg != NewVReg) {
1854            SII->second.push_back(SRInfo(index, NewVReg, true));
1855          } else if ((int)index > SII->second.back().index) {
1856            // If there is an earlier def and this is a two-address
1857            // instruction, then it's not possible to fold the store (which
1858            // would also fold the load).
1859            SRInfo &Info = SII->second.back();
1860            Info.index = index;
1861            Info.canFold = !HasUse;
1862          }
1863          SpillMBBs.set(MBBId);
1864        } else if (SII != SpillIdxes.end() &&
1865                   SII->second.back().vreg == NewVReg &&
1866                   (int)index > SII->second.back().index) {
1867          // There is an earlier def that's not killed (must be two-address).
1868          // The spill is no longer needed.
1869          SII->second.pop_back();
1870          if (SII->second.empty()) {
1871            SpillIdxes.erase(MBBId);
1872            SpillMBBs.reset(MBBId);
1873          }
1874        }
1875      }
1876    }
1877
1878    if (HasUse) {
1879      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1880        SpillIdxes.find(MBBId);
1881      if (SII != SpillIdxes.end() &&
1882          SII->second.back().vreg == NewVReg &&
1883          (int)index > SII->second.back().index)
1884        // Use(s) following the last def, it's not safe to fold the spill.
1885        SII->second.back().canFold = false;
1886      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1887        RestoreIdxes.find(MBBId);
1888      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1889        // If we are splitting live intervals, only fold if it's the first
1890        // use and there isn't another use later in the MBB.
1891        RII->second.back().canFold = false;
1892      else if (IsNew) {
1893        // Only need a reload if there isn't an earlier def / use.
1894        if (RII == RestoreIdxes.end()) {
1895          std::vector<SRInfo> Infos;
1896          Infos.push_back(SRInfo(index, NewVReg, true));
1897          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1898        } else {
1899          RII->second.push_back(SRInfo(index, NewVReg, true));
1900        }
1901        RestoreMBBs.set(MBBId);
1902      }
1903    }
1904
1905    // Update spill weight.
1906    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1907    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1908  }
1909
1910  if (NewVReg && TrySplit && AllCanFold) {
1911    // If all of its def / use can be folded, give it a low spill weight.
1912    LiveInterval &nI = getOrCreateInterval(NewVReg);
1913    nI.weight /= 10.0F;
1914  }
1915}
1916
1917bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1918                        BitVector &RestoreMBBs,
1919                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1920  if (!RestoreMBBs[Id])
1921    return false;
1922  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1923  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1924    if (Restores[i].index == index &&
1925        Restores[i].vreg == vr &&
1926        Restores[i].canFold)
1927      return true;
1928  return false;
1929}
1930
1931void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1932                        BitVector &RestoreMBBs,
1933                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1934  if (!RestoreMBBs[Id])
1935    return;
1936  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1937  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1938    if (Restores[i].index == index && Restores[i].vreg)
1939      Restores[i].index = -1;
1940}
1941
1942/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1943/// spilled and create empty intervals for their uses.
1944void
1945LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1946                                    const TargetRegisterClass* rc,
1947                                    std::vector<LiveInterval*> &NewLIs) {
1948  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1949         re = mri_->reg_end(); ri != re; ) {
1950    MachineOperand &O = ri.getOperand();
1951    MachineInstr *MI = &*ri;
1952    ++ri;
1953    if (O.isDef()) {
1954      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1955             "Register def was not rewritten?");
1956      RemoveMachineInstrFromMaps(MI);
1957      vrm.RemoveMachineInstrFromMaps(MI);
1958      MI->eraseFromParent();
1959    } else {
1960      // This must be an use of an implicit_def so it's not part of the live
1961      // interval. Create a new empty live interval for it.
1962      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1963      unsigned NewVReg = mri_->createVirtualRegister(rc);
1964      vrm.grow();
1965      vrm.setIsImplicitlyDefined(NewVReg);
1966      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1967      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1968        MachineOperand &MO = MI->getOperand(i);
1969        if (MO.isReg() && MO.getReg() == li.reg) {
1970          MO.setReg(NewVReg);
1971          MO.setIsUndef();
1972        }
1973      }
1974    }
1975  }
1976}
1977
1978std::vector<LiveInterval*> LiveIntervals::
1979addIntervalsForSpillsFast(const LiveInterval &li,
1980                          const MachineLoopInfo *loopInfo,
1981                          VirtRegMap &vrm) {
1982  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1983
1984  std::vector<LiveInterval*> added;
1985
1986  assert(li.weight != HUGE_VALF &&
1987         "attempt to spill already spilled interval!");
1988
1989  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1990  DEBUG(li.dump());
1991  DOUT << '\n';
1992
1993  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1994
1995  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1996  while (RI != mri_->reg_end()) {
1997    MachineInstr* MI = &*RI;
1998
1999    SmallVector<unsigned, 2> Indices;
2000    bool HasUse = false;
2001    bool HasDef = false;
2002
2003    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2004      MachineOperand& mop = MI->getOperand(i);
2005      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2006
2007      HasUse |= MI->getOperand(i).isUse();
2008      HasDef |= MI->getOperand(i).isDef();
2009
2010      Indices.push_back(i);
2011    }
2012
2013    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2014                              Indices, true, slot, li.reg)) {
2015      unsigned NewVReg = mri_->createVirtualRegister(rc);
2016      vrm.grow();
2017      vrm.assignVirt2StackSlot(NewVReg, slot);
2018
2019      // create a new register for this spill
2020      LiveInterval &nI = getOrCreateInterval(NewVReg);
2021
2022      // the spill weight is now infinity as it
2023      // cannot be spilled again
2024      nI.weight = HUGE_VALF;
2025
2026      // Rewrite register operands to use the new vreg.
2027      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2028           E = Indices.end(); I != E; ++I) {
2029        MI->getOperand(*I).setReg(NewVReg);
2030
2031        if (MI->getOperand(*I).isUse())
2032          MI->getOperand(*I).setIsKill(true);
2033      }
2034
2035      // Fill in  the new live interval.
2036      unsigned index = getInstructionIndex(MI);
2037      if (HasUse) {
2038        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2039                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2040        DOUT << " +" << LR;
2041        nI.addRange(LR);
2042        vrm.addRestorePoint(NewVReg, MI);
2043      }
2044      if (HasDef) {
2045        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2046                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2047        DOUT << " +" << LR;
2048        nI.addRange(LR);
2049        vrm.addSpillPoint(NewVReg, true, MI);
2050      }
2051
2052      added.push_back(&nI);
2053
2054      DOUT << "\t\t\t\tadded new interval: ";
2055      DEBUG(nI.dump());
2056      DOUT << '\n';
2057    }
2058
2059
2060    RI = mri_->reg_begin(li.reg);
2061  }
2062
2063  return added;
2064}
2065
2066std::vector<LiveInterval*> LiveIntervals::
2067addIntervalsForSpills(const LiveInterval &li,
2068                      SmallVectorImpl<LiveInterval*> &SpillIs,
2069                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2070
2071  if (EnableFastSpilling)
2072    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2073
2074  assert(li.weight != HUGE_VALF &&
2075         "attempt to spill already spilled interval!");
2076
2077  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2078  li.print(DOUT, tri_);
2079  DOUT << '\n';
2080
2081  // Each bit specify whether a spill is required in the MBB.
2082  BitVector SpillMBBs(mf_->getNumBlockIDs());
2083  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2084  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2085  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2086  DenseMap<unsigned,unsigned> MBBVRegsMap;
2087  std::vector<LiveInterval*> NewLIs;
2088  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2089
2090  unsigned NumValNums = li.getNumValNums();
2091  SmallVector<MachineInstr*, 4> ReMatDefs;
2092  ReMatDefs.resize(NumValNums, NULL);
2093  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2094  ReMatOrigDefs.resize(NumValNums, NULL);
2095  SmallVector<int, 4> ReMatIds;
2096  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2097  BitVector ReMatDelete(NumValNums);
2098  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2099
2100  // Spilling a split live interval. It cannot be split any further. Also,
2101  // it's also guaranteed to be a single val# / range interval.
2102  if (vrm.getPreSplitReg(li.reg)) {
2103    vrm.setIsSplitFromReg(li.reg, 0);
2104    // Unset the split kill marker on the last use.
2105    unsigned KillIdx = vrm.getKillPoint(li.reg);
2106    if (KillIdx) {
2107      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2108      assert(KillMI && "Last use disappeared?");
2109      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2110      assert(KillOp != -1 && "Last use disappeared?");
2111      KillMI->getOperand(KillOp).setIsKill(false);
2112    }
2113    vrm.removeKillPoint(li.reg);
2114    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2115    Slot = vrm.getStackSlot(li.reg);
2116    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2117    MachineInstr *ReMatDefMI = DefIsReMat ?
2118      vrm.getReMaterializedMI(li.reg) : NULL;
2119    int LdSlot = 0;
2120    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2121    bool isLoad = isLoadSS ||
2122      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2123    bool IsFirstRange = true;
2124    for (LiveInterval::Ranges::const_iterator
2125           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2126      // If this is a split live interval with multiple ranges, it means there
2127      // are two-address instructions that re-defined the value. Only the
2128      // first def can be rematerialized!
2129      if (IsFirstRange) {
2130        // Note ReMatOrigDefMI has already been deleted.
2131        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2132                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2133                             false, vrm, rc, ReMatIds, loopInfo,
2134                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2135                             MBBVRegsMap, NewLIs);
2136      } else {
2137        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2138                             Slot, 0, false, false, false,
2139                             false, vrm, rc, ReMatIds, loopInfo,
2140                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2141                             MBBVRegsMap, NewLIs);
2142      }
2143      IsFirstRange = false;
2144    }
2145
2146    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2147    return NewLIs;
2148  }
2149
2150  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2151  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2152    TrySplit = false;
2153  if (TrySplit)
2154    ++numSplits;
2155  bool NeedStackSlot = false;
2156  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2157       i != e; ++i) {
2158    const VNInfo *VNI = *i;
2159    unsigned VN = VNI->id;
2160    if (VNI->isUnused())
2161      continue; // Dead val#.
2162    // Is the def for the val# rematerializable?
2163    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2164      ? getInstructionFromIndex(VNI->def) : 0;
2165    bool dummy;
2166    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2167      // Remember how to remat the def of this val#.
2168      ReMatOrigDefs[VN] = ReMatDefMI;
2169      // Original def may be modified so we have to make a copy here.
2170      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2171      ClonedMIs.push_back(Clone);
2172      ReMatDefs[VN] = Clone;
2173
2174      bool CanDelete = true;
2175      if (VNI->hasPHIKill()) {
2176        // A kill is a phi node, not all of its uses can be rematerialized.
2177        // It must not be deleted.
2178        CanDelete = false;
2179        // Need a stack slot if there is any live range where uses cannot be
2180        // rematerialized.
2181        NeedStackSlot = true;
2182      }
2183      if (CanDelete)
2184        ReMatDelete.set(VN);
2185    } else {
2186      // Need a stack slot if there is any live range where uses cannot be
2187      // rematerialized.
2188      NeedStackSlot = true;
2189    }
2190  }
2191
2192  // One stack slot per live interval.
2193  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2194    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2195      Slot = vrm.assignVirt2StackSlot(li.reg);
2196
2197    // This case only occurs when the prealloc splitter has already assigned
2198    // a stack slot to this vreg.
2199    else
2200      Slot = vrm.getStackSlot(li.reg);
2201  }
2202
2203  // Create new intervals and rewrite defs and uses.
2204  for (LiveInterval::Ranges::const_iterator
2205         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2206    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2207    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2208    bool DefIsReMat = ReMatDefMI != NULL;
2209    bool CanDelete = ReMatDelete[I->valno->id];
2210    int LdSlot = 0;
2211    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2212    bool isLoad = isLoadSS ||
2213      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2214    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2215                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2216                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2217                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2218                               MBBVRegsMap, NewLIs);
2219  }
2220
2221  // Insert spills / restores if we are splitting.
2222  if (!TrySplit) {
2223    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2224    return NewLIs;
2225  }
2226
2227  SmallPtrSet<LiveInterval*, 4> AddedKill;
2228  SmallVector<unsigned, 2> Ops;
2229  if (NeedStackSlot) {
2230    int Id = SpillMBBs.find_first();
2231    while (Id != -1) {
2232      std::vector<SRInfo> &spills = SpillIdxes[Id];
2233      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2234        int index = spills[i].index;
2235        unsigned VReg = spills[i].vreg;
2236        LiveInterval &nI = getOrCreateInterval(VReg);
2237        bool isReMat = vrm.isReMaterialized(VReg);
2238        MachineInstr *MI = getInstructionFromIndex(index);
2239        bool CanFold = false;
2240        bool FoundUse = false;
2241        Ops.clear();
2242        if (spills[i].canFold) {
2243          CanFold = true;
2244          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2245            MachineOperand &MO = MI->getOperand(j);
2246            if (!MO.isReg() || MO.getReg() != VReg)
2247              continue;
2248
2249            Ops.push_back(j);
2250            if (MO.isDef())
2251              continue;
2252            if (isReMat ||
2253                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2254                                                RestoreMBBs, RestoreIdxes))) {
2255              // MI has two-address uses of the same register. If the use
2256              // isn't the first and only use in the BB, then we can't fold
2257              // it. FIXME: Move this to rewriteInstructionsForSpills.
2258              CanFold = false;
2259              break;
2260            }
2261            FoundUse = true;
2262          }
2263        }
2264        // Fold the store into the def if possible.
2265        bool Folded = false;
2266        if (CanFold && !Ops.empty()) {
2267          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2268            Folded = true;
2269            if (FoundUse) {
2270              // Also folded uses, do not issue a load.
2271              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2272              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2273            }
2274            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2275          }
2276        }
2277
2278        // Otherwise tell the spiller to issue a spill.
2279        if (!Folded) {
2280          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2281          bool isKill = LR->end == getStoreIndex(index);
2282          if (!MI->registerDefIsDead(nI.reg))
2283            // No need to spill a dead def.
2284            vrm.addSpillPoint(VReg, isKill, MI);
2285          if (isKill)
2286            AddedKill.insert(&nI);
2287        }
2288      }
2289      Id = SpillMBBs.find_next(Id);
2290    }
2291  }
2292
2293  int Id = RestoreMBBs.find_first();
2294  while (Id != -1) {
2295    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2296    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2297      int index = restores[i].index;
2298      if (index == -1)
2299        continue;
2300      unsigned VReg = restores[i].vreg;
2301      LiveInterval &nI = getOrCreateInterval(VReg);
2302      bool isReMat = vrm.isReMaterialized(VReg);
2303      MachineInstr *MI = getInstructionFromIndex(index);
2304      bool CanFold = false;
2305      Ops.clear();
2306      if (restores[i].canFold) {
2307        CanFold = true;
2308        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2309          MachineOperand &MO = MI->getOperand(j);
2310          if (!MO.isReg() || MO.getReg() != VReg)
2311            continue;
2312
2313          if (MO.isDef()) {
2314            // If this restore were to be folded, it would have been folded
2315            // already.
2316            CanFold = false;
2317            break;
2318          }
2319          Ops.push_back(j);
2320        }
2321      }
2322
2323      // Fold the load into the use if possible.
2324      bool Folded = false;
2325      if (CanFold && !Ops.empty()) {
2326        if (!isReMat)
2327          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2328        else {
2329          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2330          int LdSlot = 0;
2331          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2332          // If the rematerializable def is a load, also try to fold it.
2333          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2334            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2335                                          Ops, isLoadSS, LdSlot, VReg);
2336          if (!Folded) {
2337            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2338            if (ImpUse) {
2339              // Re-matting an instruction with virtual register use. Add the
2340              // register as an implicit use on the use MI and update the register
2341              // interval's spill weight to HUGE_VALF to prevent it from being
2342              // spilled.
2343              LiveInterval &ImpLi = getInterval(ImpUse);
2344              ImpLi.weight = HUGE_VALF;
2345              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2346            }
2347          }
2348        }
2349      }
2350      // If folding is not possible / failed, then tell the spiller to issue a
2351      // load / rematerialization for us.
2352      if (Folded)
2353        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2354      else
2355        vrm.addRestorePoint(VReg, MI);
2356    }
2357    Id = RestoreMBBs.find_next(Id);
2358  }
2359
2360  // Finalize intervals: add kills, finalize spill weights, and filter out
2361  // dead intervals.
2362  std::vector<LiveInterval*> RetNewLIs;
2363  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2364    LiveInterval *LI = NewLIs[i];
2365    if (!LI->empty()) {
2366      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2367      if (!AddedKill.count(LI)) {
2368        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2369        unsigned LastUseIdx = getBaseIndex(LR->end);
2370        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2371        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2372        assert(UseIdx != -1);
2373        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2374          LastUse->getOperand(UseIdx).setIsKill();
2375          vrm.addKillPoint(LI->reg, LastUseIdx);
2376        }
2377      }
2378      RetNewLIs.push_back(LI);
2379    }
2380  }
2381
2382  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2383  return RetNewLIs;
2384}
2385
2386/// hasAllocatableSuperReg - Return true if the specified physical register has
2387/// any super register that's allocatable.
2388bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2389  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2390    if (allocatableRegs_[*AS] && hasInterval(*AS))
2391      return true;
2392  return false;
2393}
2394
2395/// getRepresentativeReg - Find the largest super register of the specified
2396/// physical register.
2397unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2398  // Find the largest super-register that is allocatable.
2399  unsigned BestReg = Reg;
2400  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2401    unsigned SuperReg = *AS;
2402    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2403      BestReg = SuperReg;
2404      break;
2405    }
2406  }
2407  return BestReg;
2408}
2409
2410/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2411/// specified interval that conflicts with the specified physical register.
2412unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2413                                                   unsigned PhysReg) const {
2414  unsigned NumConflicts = 0;
2415  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2416  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2417         E = mri_->reg_end(); I != E; ++I) {
2418    MachineOperand &O = I.getOperand();
2419    MachineInstr *MI = O.getParent();
2420    unsigned Index = getInstructionIndex(MI);
2421    if (pli.liveAt(Index))
2422      ++NumConflicts;
2423  }
2424  return NumConflicts;
2425}
2426
2427/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2428/// around all defs and uses of the specified interval. Return true if it
2429/// was able to cut its interval.
2430bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2431                                            unsigned PhysReg, VirtRegMap &vrm) {
2432  unsigned SpillReg = getRepresentativeReg(PhysReg);
2433
2434  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2435    // If there are registers which alias PhysReg, but which are not a
2436    // sub-register of the chosen representative super register. Assert
2437    // since we can't handle it yet.
2438    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2439           tri_->isSuperRegister(*AS, SpillReg));
2440
2441  bool Cut = false;
2442  LiveInterval &pli = getInterval(SpillReg);
2443  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2444  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2445         E = mri_->reg_end(); I != E; ++I) {
2446    MachineOperand &O = I.getOperand();
2447    MachineInstr *MI = O.getParent();
2448    if (SeenMIs.count(MI))
2449      continue;
2450    SeenMIs.insert(MI);
2451    unsigned Index = getInstructionIndex(MI);
2452    if (pli.liveAt(Index)) {
2453      vrm.addEmergencySpill(SpillReg, MI);
2454      unsigned StartIdx = getLoadIndex(Index);
2455      unsigned EndIdx = getStoreIndex(Index)+1;
2456      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2457        pli.removeRange(StartIdx, EndIdx);
2458        Cut = true;
2459      } else {
2460        std::string msg;
2461        raw_string_ostream Msg(msg);
2462        Msg << "Ran out of registers during register allocation!";
2463        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2464          Msg << "\nPlease check your inline asm statement for invalid "
2465               << "constraints:\n";
2466          MI->print(Msg, tm_);
2467        }
2468        llvm_report_error(Msg.str());
2469      }
2470      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2471        if (!hasInterval(*AS))
2472          continue;
2473        LiveInterval &spli = getInterval(*AS);
2474        if (spli.liveAt(Index))
2475          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2476      }
2477    }
2478  }
2479  return Cut;
2480}
2481
2482LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2483                                                  MachineInstr* startInst) {
2484  LiveInterval& Interval = getOrCreateInterval(reg);
2485  VNInfo* VN = Interval.getNextValue(
2486            getInstructionIndex(startInst) + InstrSlots::DEF,
2487            startInst, true, getVNInfoAllocator());
2488  VN->setHasPHIKill(true);
2489  VN->kills.push_back(
2490    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2491  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2492               getMBBEndIdx(startInst->getParent()) + 1, VN);
2493  Interval.addRange(LR);
2494
2495  return LR;
2496}
2497