LiveIntervalAnalysis.cpp revision d2bd8627d9bac9a1c4528ea19d76ac7740d52517
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/ADT/DepthFirstIterator.h"
38#include "llvm/ADT/SmallSet.h"
39#include "llvm/ADT/Statistic.h"
40#include "llvm/ADT/STLExtras.h"
41#include <algorithm>
42#include <limits>
43#include <cmath>
44using namespace llvm;
45
46// Hidden options for help debugging.
47static cl::opt<bool> DisableReMat("disable-rematerialization",
48                                  cl::init(false), cl::Hidden);
49
50static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
51                               cl::init(true), cl::Hidden);
52static cl::opt<int> SplitLimit("split-limit",
53                               cl::init(-1), cl::Hidden);
54
55static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
56
57static cl::opt<bool> EnableFastSpilling("fast-spill",
58                                        cl::init(false), cl::Hidden);
59
60STATISTIC(numIntervals, "Number of original intervals");
61STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
62STATISTIC(numSplits   , "Number of intervals split");
63
64char LiveIntervals::ID = 0;
65static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
66
67void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
68  AU.addRequired<AliasAnalysis>();
69  AU.addPreserved<AliasAnalysis>();
70  AU.addPreserved<LiveVariables>();
71  AU.addRequired<LiveVariables>();
72  AU.addPreservedID(MachineLoopInfoID);
73  AU.addPreservedID(MachineDominatorsID);
74
75  if (!StrongPHIElim) {
76    AU.addPreservedID(PHIEliminationID);
77    AU.addRequiredID(PHIEliminationID);
78  }
79
80  AU.addRequiredID(TwoAddressInstructionPassID);
81  MachineFunctionPass::getAnalysisUsage(AU);
82}
83
84void LiveIntervals::releaseMemory() {
85  // Free the live intervals themselves.
86  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87       E = r2iMap_.end(); I != E; ++I)
88    delete I->second;
89
90  MBB2IdxMap.clear();
91  Idx2MBBMap.clear();
92  mi2iMap_.clear();
93  i2miMap_.clear();
94  r2iMap_.clear();
95  terminatorGaps.clear();
96
97  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
98  VNInfoAllocator.Reset();
99  while (!ClonedMIs.empty()) {
100    MachineInstr *MI = ClonedMIs.back();
101    ClonedMIs.pop_back();
102    mf_->DeleteMachineInstr(MI);
103  }
104}
105
106/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
107/// there is one implicit_def for each use. Add isUndef marker to
108/// implicit_def defs and their uses.
109void LiveIntervals::processImplicitDefs() {
110  SmallSet<unsigned, 8> ImpDefRegs;
111  SmallVector<MachineInstr*, 8> ImpDefMIs;
112  MachineBasicBlock *Entry = mf_->begin();
113  SmallPtrSet<MachineBasicBlock*,16> Visited;
114  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
115         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
116       DFI != E; ++DFI) {
117    MachineBasicBlock *MBB = *DFI;
118    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
119         I != E; ) {
120      MachineInstr *MI = &*I;
121      ++I;
122      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
123        unsigned Reg = MI->getOperand(0).getReg();
124        MI->getOperand(0).setIsUndef();
125        ImpDefRegs.insert(Reg);
126        ImpDefMIs.push_back(MI);
127        continue;
128      }
129
130      bool ChangedToImpDef = false;
131      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
132        MachineOperand& MO = MI->getOperand(i);
133        if (!MO.isReg() || !MO.isUse())
134          continue;
135        unsigned Reg = MO.getReg();
136        if (!Reg)
137          continue;
138        if (!ImpDefRegs.count(Reg))
139          continue;
140        // Use is a copy, just turn it into an implicit_def.
141        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
142        if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
143            Reg == SrcReg) {
144          bool isKill = MO.isKill();
145          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
146          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
147            MI->RemoveOperand(j);
148          if (isKill)
149            ImpDefRegs.erase(Reg);
150          ChangedToImpDef = true;
151          break;
152        }
153
154        MO.setIsUndef();
155        if (MO.isKill() || MI->isRegTiedToDefOperand(i))
156          ImpDefRegs.erase(Reg);
157      }
158
159      if (ChangedToImpDef) {
160        // Backtrack to process this new implicit_def.
161        --I;
162      } else {
163        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
164          MachineOperand& MO = MI->getOperand(i);
165          if (!MO.isReg() || !MO.isDef())
166            continue;
167          ImpDefRegs.erase(MO.getReg());
168        }
169      }
170    }
171
172    // Any outstanding liveout implicit_def's?
173    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
174      MachineInstr *MI = ImpDefMIs[i];
175      unsigned Reg = MI->getOperand(0).getReg();
176      if (TargetRegisterInfo::isPhysicalRegister(Reg))
177        // Physical registers are not liveout (yet).
178        continue;
179      if (!ImpDefRegs.count(Reg))
180        continue;
181
182      // If there are multiple defs of the same register and at least one
183      // is not an implicit_def, do not insert implicit_def's before the
184      // uses.
185      bool Skip = false;
186      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
187             DE = mri_->def_end(); DI != DE; ++DI) {
188        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
189          Skip = true;
190          break;
191        }
192      }
193      if (Skip)
194        continue;
195
196      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
197             UE = mri_->use_end(); UI != UE; ) {
198        MachineOperand &RMO = UI.getOperand();
199        MachineInstr *RMI = &*UI;
200        ++UI;
201        MachineBasicBlock *RMBB = RMI->getParent();
202        if (RMBB == MBB)
203          continue;
204        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
205        unsigned NewVReg = mri_->createVirtualRegister(RC);
206        MachineInstrBuilder MIB =
207          BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
208                  tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
209        (*MIB).getOperand(0).setIsUndef();
210        RMO.setReg(NewVReg);
211        RMO.setIsUndef();
212        RMO.setIsKill();
213      }
214    }
215    ImpDefRegs.clear();
216    ImpDefMIs.clear();
217  }
218}
219
220void LiveIntervals::computeNumbering() {
221  Index2MiMap OldI2MI = i2miMap_;
222  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
223
224  Idx2MBBMap.clear();
225  MBB2IdxMap.clear();
226  mi2iMap_.clear();
227  i2miMap_.clear();
228  terminatorGaps.clear();
229
230  FunctionSize = 0;
231
232  // Number MachineInstrs and MachineBasicBlocks.
233  // Initialize MBB indexes to a sentinal.
234  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
235
236  unsigned MIIndex = 0;
237  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
238       MBB != E; ++MBB) {
239    unsigned StartIdx = MIIndex;
240
241    // Insert an empty slot at the beginning of each block.
242    MIIndex += InstrSlots::NUM;
243    i2miMap_.push_back(0);
244
245    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
246         I != E; ++I) {
247
248      if (I == MBB->getFirstTerminator()) {
249        // Leave a gap for before terminators, this is where we will point
250        // PHI kills.
251        bool inserted =
252          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
253        assert(inserted &&
254               "Multiple 'first' terminators encountered during numbering.");
255        i2miMap_.push_back(0);
256
257        MIIndex += InstrSlots::NUM;
258      }
259
260      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
261      assert(inserted && "multiple MachineInstr -> index mappings");
262      inserted = true;
263      i2miMap_.push_back(I);
264      MIIndex += InstrSlots::NUM;
265      FunctionSize++;
266
267      // Insert max(1, numdefs) empty slots after every instruction.
268      unsigned Slots = I->getDesc().getNumDefs();
269      if (Slots == 0)
270        Slots = 1;
271      MIIndex += InstrSlots::NUM * Slots;
272      while (Slots--)
273        i2miMap_.push_back(0);
274    }
275
276    if (MBB->getFirstTerminator() == MBB->end()) {
277      // Leave a gap for before terminators, this is where we will point
278      // PHI kills.
279      bool inserted =
280        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
281      assert(inserted &&
282             "Multiple 'first' terminators encountered during numbering.");
283      i2miMap_.push_back(0);
284
285      MIIndex += InstrSlots::NUM;
286    }
287
288    // Set the MBB2IdxMap entry for this MBB.
289    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
290    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
291  }
292
293  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
294
295  if (!OldI2MI.empty())
296    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
297      for (LiveInterval::iterator LI = OI->second->begin(),
298           LE = OI->second->end(); LI != LE; ++LI) {
299
300        // Remap the start index of the live range to the corresponding new
301        // number, or our best guess at what it _should_ correspond to if the
302        // original instruction has been erased.  This is either the following
303        // instruction or its predecessor.
304        unsigned index = LI->start / InstrSlots::NUM;
305        unsigned offset = LI->start % InstrSlots::NUM;
306        if (offset == InstrSlots::LOAD) {
307          std::vector<IdxMBBPair>::const_iterator I =
308                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
309          // Take the pair containing the index
310          std::vector<IdxMBBPair>::const_iterator J =
311                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
312
313          LI->start = getMBBStartIdx(J->second);
314        } else {
315          LI->start = mi2iMap_[OldI2MI[index]] + offset;
316        }
317
318        // Remap the ending index in the same way that we remapped the start,
319        // except for the final step where we always map to the immediately
320        // following instruction.
321        index = (LI->end - 1) / InstrSlots::NUM;
322        offset  = LI->end % InstrSlots::NUM;
323        if (offset == InstrSlots::LOAD) {
324          // VReg dies at end of block.
325          std::vector<IdxMBBPair>::const_iterator I =
326                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
327          --I;
328
329          LI->end = getMBBEndIdx(I->second) + 1;
330        } else {
331          unsigned idx = index;
332          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
333
334          if (index != OldI2MI.size())
335            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
336          else
337            LI->end = InstrSlots::NUM * i2miMap_.size();
338        }
339      }
340
341      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
342           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
343        VNInfo* vni = *VNI;
344
345        // Remap the VNInfo def index, which works the same as the
346        // start indices above. VN's with special sentinel defs
347        // don't need to be remapped.
348        if (vni->isDefAccurate() && !vni->isUnused()) {
349          unsigned index = vni->def / InstrSlots::NUM;
350          unsigned offset = vni->def % InstrSlots::NUM;
351          if (offset == InstrSlots::LOAD) {
352            std::vector<IdxMBBPair>::const_iterator I =
353                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
354            // Take the pair containing the index
355            std::vector<IdxMBBPair>::const_iterator J =
356                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
357
358            vni->def = getMBBStartIdx(J->second);
359          } else {
360            vni->def = mi2iMap_[OldI2MI[index]] + offset;
361          }
362        }
363
364        // Remap the VNInfo kill indices, which works the same as
365        // the end indices above.
366        for (size_t i = 0; i < vni->kills.size(); ++i) {
367          unsigned killIdx = vni->kills[i].killIdx;
368
369          unsigned index = (killIdx - 1) / InstrSlots::NUM;
370          unsigned offset = killIdx % InstrSlots::NUM;
371
372          if (offset == InstrSlots::LOAD) {
373            assert("Value killed at a load slot.");
374            /*std::vector<IdxMBBPair>::const_iterator I =
375             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
376            --I;
377
378            vni->kills[i] = getMBBEndIdx(I->second);*/
379          } else {
380            if (vni->kills[i].isPHIKill) {
381              std::vector<IdxMBBPair>::const_iterator I =
382                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
383              --I;
384              vni->kills[i].killIdx = terminatorGaps[I->second];
385            } else {
386              assert(OldI2MI[index] != 0 &&
387                     "Kill refers to instruction not present in index maps.");
388              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
389            }
390
391            /*
392            unsigned idx = index;
393            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
394
395            if (index != OldI2MI.size())
396              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
397                              (idx == index ? offset : 0);
398            else
399              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
400            */
401          }
402        }
403      }
404    }
405}
406
407void LiveIntervals::scaleNumbering(int factor) {
408  // Need to
409  //  * scale MBB begin and end points
410  //  * scale all ranges.
411  //  * Update VNI structures.
412  //  * Scale instruction numberings
413
414  // Scale the MBB indices.
415  Idx2MBBMap.clear();
416  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
417       MBB != MBBE; ++MBB) {
418    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
419    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
420    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
421    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
422  }
423  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
424
425  // Scale terminator gaps.
426  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
427       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
428       TGI != TGE; ++TGI) {
429    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
430  }
431
432  // Scale the intervals.
433  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
434    LI->second->scaleNumbering(factor);
435  }
436
437  // Scale MachineInstrs.
438  Mi2IndexMap oldmi2iMap = mi2iMap_;
439  unsigned highestSlot = 0;
440  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
441       MI != ME; ++MI) {
442    unsigned newSlot = InstrSlots::scale(MI->second, factor);
443    mi2iMap_[MI->first] = newSlot;
444    highestSlot = std::max(highestSlot, newSlot);
445  }
446
447  i2miMap_.clear();
448  i2miMap_.resize(highestSlot + 1);
449  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
450       MI != ME; ++MI) {
451    i2miMap_[MI->second] = MI->first;
452  }
453
454}
455
456
457/// runOnMachineFunction - Register allocate the whole function
458///
459bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
460  mf_ = &fn;
461  mri_ = &mf_->getRegInfo();
462  tm_ = &fn.getTarget();
463  tri_ = tm_->getRegisterInfo();
464  tii_ = tm_->getInstrInfo();
465  aa_ = &getAnalysis<AliasAnalysis>();
466  lv_ = &getAnalysis<LiveVariables>();
467  allocatableRegs_ = tri_->getAllocatableSet(fn);
468
469  processImplicitDefs();
470  computeNumbering();
471  computeIntervals();
472
473  numIntervals += getNumIntervals();
474
475  DEBUG(dump());
476  return true;
477}
478
479/// print - Implement the dump method.
480void LiveIntervals::print(std::ostream &O, const Module* ) const {
481  O << "********** INTERVALS **********\n";
482  for (const_iterator I = begin(), E = end(); I != E; ++I) {
483    I->second->print(O, tri_);
484    O << "\n";
485  }
486
487  O << "********** MACHINEINSTRS **********\n";
488  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
489       mbbi != mbbe; ++mbbi) {
490    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
491    for (MachineBasicBlock::iterator mii = mbbi->begin(),
492           mie = mbbi->end(); mii != mie; ++mii) {
493      O << getInstructionIndex(mii) << '\t' << *mii;
494    }
495  }
496}
497
498/// conflictsWithPhysRegDef - Returns true if the specified register
499/// is defined during the duration of the specified interval.
500bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
501                                            VirtRegMap &vrm, unsigned reg) {
502  for (LiveInterval::Ranges::const_iterator
503         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
504    for (unsigned index = getBaseIndex(I->start),
505           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
506         index += InstrSlots::NUM) {
507      // skip deleted instructions
508      while (index != end && !getInstructionFromIndex(index))
509        index += InstrSlots::NUM;
510      if (index == end) break;
511
512      MachineInstr *MI = getInstructionFromIndex(index);
513      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
514      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
515        if (SrcReg == li.reg || DstReg == li.reg)
516          continue;
517      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
518        MachineOperand& mop = MI->getOperand(i);
519        if (!mop.isReg())
520          continue;
521        unsigned PhysReg = mop.getReg();
522        if (PhysReg == 0 || PhysReg == li.reg)
523          continue;
524        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
525          if (!vrm.hasPhys(PhysReg))
526            continue;
527          PhysReg = vrm.getPhys(PhysReg);
528        }
529        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
530          return true;
531      }
532    }
533  }
534
535  return false;
536}
537
538/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
539/// it can check use as well.
540bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
541                                            unsigned Reg, bool CheckUse,
542                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
543  for (LiveInterval::Ranges::const_iterator
544         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
545    for (unsigned index = getBaseIndex(I->start),
546           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
547         index += InstrSlots::NUM) {
548      // Skip deleted instructions.
549      MachineInstr *MI = 0;
550      while (index != end) {
551        MI = getInstructionFromIndex(index);
552        if (MI)
553          break;
554        index += InstrSlots::NUM;
555      }
556      if (index == end) break;
557
558      if (JoinedCopies.count(MI))
559        continue;
560      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
561        MachineOperand& MO = MI->getOperand(i);
562        if (!MO.isReg())
563          continue;
564        if (MO.isUse() && !CheckUse)
565          continue;
566        unsigned PhysReg = MO.getReg();
567        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
568          continue;
569        if (tri_->isSubRegister(Reg, PhysReg))
570          return true;
571      }
572    }
573  }
574
575  return false;
576}
577
578
579void LiveIntervals::printRegName(unsigned reg) const {
580  if (TargetRegisterInfo::isPhysicalRegister(reg))
581    cerr << tri_->getName(reg);
582  else
583    cerr << "%reg" << reg;
584}
585
586void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
587                                             MachineBasicBlock::iterator mi,
588                                             unsigned MIIdx, MachineOperand& MO,
589                                             unsigned MOIdx,
590                                             LiveInterval &interval) {
591  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
592  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
593
594  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
595    DOUT << "is a implicit_def\n";
596    return;
597  }
598
599  // Virtual registers may be defined multiple times (due to phi
600  // elimination and 2-addr elimination).  Much of what we do only has to be
601  // done once for the vreg.  We use an empty interval to detect the first
602  // time we see a vreg.
603  if (interval.empty()) {
604    // Get the Idx of the defining instructions.
605    unsigned defIndex = getDefIndex(MIIdx);
606    // Earlyclobbers move back one.
607    if (MO.isEarlyClobber())
608      defIndex = getUseIndex(MIIdx);
609    VNInfo *ValNo;
610    MachineInstr *CopyMI = NULL;
611    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
612    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
613        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
614        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
615        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
616      CopyMI = mi;
617    // Earlyclobbers move back one.
618    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
619
620    assert(ValNo->id == 0 && "First value in interval is not 0?");
621
622    // Loop over all of the blocks that the vreg is defined in.  There are
623    // two cases we have to handle here.  The most common case is a vreg
624    // whose lifetime is contained within a basic block.  In this case there
625    // will be a single kill, in MBB, which comes after the definition.
626    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
627      // FIXME: what about dead vars?
628      unsigned killIdx;
629      if (vi.Kills[0] != mi)
630        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
631      else
632        killIdx = defIndex+1;
633
634      // If the kill happens after the definition, we have an intra-block
635      // live range.
636      if (killIdx > defIndex) {
637        assert(vi.AliveBlocks.empty() &&
638               "Shouldn't be alive across any blocks!");
639        LiveRange LR(defIndex, killIdx, ValNo);
640        interval.addRange(LR);
641        DOUT << " +" << LR << "\n";
642        interval.addKill(ValNo, killIdx, false);
643        return;
644      }
645    }
646
647    // The other case we handle is when a virtual register lives to the end
648    // of the defining block, potentially live across some blocks, then is
649    // live into some number of blocks, but gets killed.  Start by adding a
650    // range that goes from this definition to the end of the defining block.
651    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
652    DOUT << " +" << NewLR;
653    interval.addRange(NewLR);
654
655    // Iterate over all of the blocks that the variable is completely
656    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
657    // live interval.
658    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
659             E = vi.AliveBlocks.end(); I != E; ++I) {
660      LiveRange LR(getMBBStartIdx(*I),
661                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
662                   ValNo);
663      interval.addRange(LR);
664      DOUT << " +" << LR;
665    }
666
667    // Finally, this virtual register is live from the start of any killing
668    // block to the 'use' slot of the killing instruction.
669    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
670      MachineInstr *Kill = vi.Kills[i];
671      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
672      LiveRange LR(getMBBStartIdx(Kill->getParent()),
673                   killIdx, ValNo);
674      interval.addRange(LR);
675      interval.addKill(ValNo, killIdx, false);
676      DOUT << " +" << LR;
677    }
678
679  } else {
680    // If this is the second time we see a virtual register definition, it
681    // must be due to phi elimination or two addr elimination.  If this is
682    // the result of two address elimination, then the vreg is one of the
683    // def-and-use register operand.
684    if (mi->isRegTiedToUseOperand(MOIdx)) {
685      // If this is a two-address definition, then we have already processed
686      // the live range.  The only problem is that we didn't realize there
687      // are actually two values in the live interval.  Because of this we
688      // need to take the LiveRegion that defines this register and split it
689      // into two values.
690      assert(interval.containsOneValue());
691      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
692      unsigned RedefIndex = getDefIndex(MIIdx);
693      if (MO.isEarlyClobber())
694        RedefIndex = getUseIndex(MIIdx);
695
696      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
697      VNInfo *OldValNo = OldLR->valno;
698
699      // Delete the initial value, which should be short and continuous,
700      // because the 2-addr copy must be in the same MBB as the redef.
701      interval.removeRange(DefIndex, RedefIndex);
702
703      // Two-address vregs should always only be redefined once.  This means
704      // that at this point, there should be exactly one value number in it.
705      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
706
707      // The new value number (#1) is defined by the instruction we claimed
708      // defined value #0.
709      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
710                                            false, // update at *
711                                            VNInfoAllocator);
712      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
713
714      // Value#0 is now defined by the 2-addr instruction.
715      OldValNo->def  = RedefIndex;
716      OldValNo->copy = 0;
717      if (MO.isEarlyClobber())
718        OldValNo->setHasRedefByEC(true);
719
720      // Add the new live interval which replaces the range for the input copy.
721      LiveRange LR(DefIndex, RedefIndex, ValNo);
722      DOUT << " replace range with " << LR;
723      interval.addRange(LR);
724      interval.addKill(ValNo, RedefIndex, false);
725
726      // If this redefinition is dead, we need to add a dummy unit live
727      // range covering the def slot.
728      if (MO.isDead())
729        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
730
731      DOUT << " RESULT: ";
732      interval.print(DOUT, tri_);
733
734    } else {
735      // Otherwise, this must be because of phi elimination.  If this is the
736      // first redefinition of the vreg that we have seen, go back and change
737      // the live range in the PHI block to be a different value number.
738      if (interval.containsOneValue()) {
739        assert(vi.Kills.size() == 1 &&
740               "PHI elimination vreg should have one kill, the PHI itself!");
741
742        // Remove the old range that we now know has an incorrect number.
743        VNInfo *VNI = interval.getValNumInfo(0);
744        MachineInstr *Killer = vi.Kills[0];
745        unsigned Start = getMBBStartIdx(Killer->getParent());
746        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
747        DOUT << " Removing [" << Start << "," << End << "] from: ";
748        interval.print(DOUT, tri_); DOUT << "\n";
749        interval.removeRange(Start, End);
750        assert(interval.ranges.size() == 1 &&
751               "newly discovered PHI interval has >1 ranges.");
752        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
753        interval.addKill(VNI, terminatorGaps[killMBB], true);
754        VNI->setHasPHIKill(true);
755        DOUT << " RESULT: "; interval.print(DOUT, tri_);
756
757        // Replace the interval with one of a NEW value number.  Note that this
758        // value number isn't actually defined by an instruction, weird huh? :)
759        LiveRange LR(Start, End,
760          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
761        LR.valno->setIsPHIDef(true);
762        DOUT << " replace range with " << LR;
763        interval.addRange(LR);
764        interval.addKill(LR.valno, End, false);
765        DOUT << " RESULT: "; interval.print(DOUT, tri_);
766      }
767
768      // In the case of PHI elimination, each variable definition is only
769      // live until the end of the block.  We've already taken care of the
770      // rest of the live range.
771      unsigned defIndex = getDefIndex(MIIdx);
772      if (MO.isEarlyClobber())
773        defIndex = getUseIndex(MIIdx);
774
775      VNInfo *ValNo;
776      MachineInstr *CopyMI = NULL;
777      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
778      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
779          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
780          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
781          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
782        CopyMI = mi;
783      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
784
785      unsigned killIndex = getMBBEndIdx(mbb) + 1;
786      LiveRange LR(defIndex, killIndex, ValNo);
787      interval.addRange(LR);
788      interval.addKill(ValNo, terminatorGaps[mbb], true);
789      ValNo->setHasPHIKill(true);
790      DOUT << " +" << LR;
791    }
792  }
793
794  DOUT << '\n';
795}
796
797void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
798                                              MachineBasicBlock::iterator mi,
799                                              unsigned MIIdx,
800                                              MachineOperand& MO,
801                                              LiveInterval &interval,
802                                              MachineInstr *CopyMI) {
803  // A physical register cannot be live across basic block, so its
804  // lifetime must end somewhere in its defining basic block.
805  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
806
807  unsigned baseIndex = MIIdx;
808  unsigned start = getDefIndex(baseIndex);
809  // Earlyclobbers move back one.
810  if (MO.isEarlyClobber())
811    start = getUseIndex(MIIdx);
812  unsigned end = start;
813
814  // If it is not used after definition, it is considered dead at
815  // the instruction defining it. Hence its interval is:
816  // [defSlot(def), defSlot(def)+1)
817  if (MO.isDead()) {
818    DOUT << " dead";
819    end = start + 1;
820    goto exit;
821  }
822
823  // If it is not dead on definition, it must be killed by a
824  // subsequent instruction. Hence its interval is:
825  // [defSlot(def), useSlot(kill)+1)
826  baseIndex += InstrSlots::NUM;
827  while (++mi != MBB->end()) {
828    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
829           getInstructionFromIndex(baseIndex) == 0)
830      baseIndex += InstrSlots::NUM;
831    if (mi->killsRegister(interval.reg, tri_)) {
832      DOUT << " killed";
833      end = getUseIndex(baseIndex) + 1;
834      goto exit;
835    } else {
836      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
837      if (DefIdx != -1) {
838        if (mi->isRegTiedToUseOperand(DefIdx)) {
839          // Two-address instruction.
840          end = getDefIndex(baseIndex);
841          if (mi->getOperand(DefIdx).isEarlyClobber())
842            end = getUseIndex(baseIndex);
843        } else {
844          // Another instruction redefines the register before it is ever read.
845          // Then the register is essentially dead at the instruction that defines
846          // it. Hence its interval is:
847          // [defSlot(def), defSlot(def)+1)
848          DOUT << " dead";
849          end = start + 1;
850        }
851        goto exit;
852      }
853    }
854
855    baseIndex += InstrSlots::NUM;
856  }
857
858  // The only case we should have a dead physreg here without a killing or
859  // instruction where we know it's dead is if it is live-in to the function
860  // and never used. Another possible case is the implicit use of the
861  // physical register has been deleted by two-address pass.
862  end = start + 1;
863
864exit:
865  assert(start < end && "did not find end of interval?");
866
867  // Already exists? Extend old live interval.
868  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
869  bool Extend = OldLR != interval.end();
870  VNInfo *ValNo = Extend
871    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
872  if (MO.isEarlyClobber() && Extend)
873    ValNo->setHasRedefByEC(true);
874  LiveRange LR(start, end, ValNo);
875  interval.addRange(LR);
876  interval.addKill(LR.valno, end, false);
877  DOUT << " +" << LR << '\n';
878}
879
880void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
881                                      MachineBasicBlock::iterator MI,
882                                      unsigned MIIdx,
883                                      MachineOperand& MO,
884                                      unsigned MOIdx) {
885  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
886    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
887                             getOrCreateInterval(MO.getReg()));
888  else if (allocatableRegs_[MO.getReg()]) {
889    MachineInstr *CopyMI = NULL;
890    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
891    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
892        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
893        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
894        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
895      CopyMI = MI;
896    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
897                              getOrCreateInterval(MO.getReg()), CopyMI);
898    // Def of a register also defines its sub-registers.
899    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
900      // If MI also modifies the sub-register explicitly, avoid processing it
901      // more than once. Do not pass in TRI here so it checks for exact match.
902      if (!MI->modifiesRegister(*AS))
903        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
904                                  getOrCreateInterval(*AS), 0);
905  }
906}
907
908void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
909                                         unsigned MIIdx,
910                                         LiveInterval &interval, bool isAlias) {
911  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
912
913  // Look for kills, if it reaches a def before it's killed, then it shouldn't
914  // be considered a livein.
915  MachineBasicBlock::iterator mi = MBB->begin();
916  unsigned baseIndex = MIIdx;
917  unsigned start = baseIndex;
918  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
919         getInstructionFromIndex(baseIndex) == 0)
920    baseIndex += InstrSlots::NUM;
921  unsigned end = baseIndex;
922  bool SeenDefUse = false;
923
924  while (mi != MBB->end()) {
925    if (mi->killsRegister(interval.reg, tri_)) {
926      DOUT << " killed";
927      end = getUseIndex(baseIndex) + 1;
928      SeenDefUse = true;
929      break;
930    } else if (mi->modifiesRegister(interval.reg, tri_)) {
931      // Another instruction redefines the register before it is ever read.
932      // Then the register is essentially dead at the instruction that defines
933      // it. Hence its interval is:
934      // [defSlot(def), defSlot(def)+1)
935      DOUT << " dead";
936      end = getDefIndex(start) + 1;
937      SeenDefUse = true;
938      break;
939    }
940
941    baseIndex += InstrSlots::NUM;
942    ++mi;
943    if (mi != MBB->end()) {
944      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
945             getInstructionFromIndex(baseIndex) == 0)
946        baseIndex += InstrSlots::NUM;
947    }
948  }
949
950  // Live-in register might not be used at all.
951  if (!SeenDefUse) {
952    if (isAlias) {
953      DOUT << " dead";
954      end = getDefIndex(MIIdx) + 1;
955    } else {
956      DOUT << " live through";
957      end = baseIndex;
958    }
959  }
960
961  VNInfo *vni =
962    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
963  vni->setIsPHIDef(true);
964  LiveRange LR(start, end, vni);
965
966  interval.addRange(LR);
967  interval.addKill(LR.valno, end, false);
968  DOUT << " +" << LR << '\n';
969}
970
971/// computeIntervals - computes the live intervals for virtual
972/// registers. for some ordering of the machine instructions [1,N] a
973/// live interval is an interval [i, j) where 1 <= i <= j < N for
974/// which a variable is live
975void LiveIntervals::computeIntervals() {
976
977  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
978       << "********** Function: "
979       << ((Value*)mf_->getFunction())->getName() << '\n';
980
981  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
982       MBBI != E; ++MBBI) {
983    MachineBasicBlock *MBB = MBBI;
984    // Track the index of the current machine instr.
985    unsigned MIIndex = getMBBStartIdx(MBB);
986    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
987
988    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
989
990    // Create intervals for live-ins to this BB first.
991    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
992           LE = MBB->livein_end(); LI != LE; ++LI) {
993      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
994      // Multiple live-ins can alias the same register.
995      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
996        if (!hasInterval(*AS))
997          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
998                               true);
999    }
1000
1001    // Skip over empty initial indices.
1002    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1003           getInstructionFromIndex(MIIndex) == 0)
1004      MIIndex += InstrSlots::NUM;
1005
1006    for (; MI != miEnd; ++MI) {
1007      DOUT << MIIndex << "\t" << *MI;
1008
1009      // Handle defs.
1010      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1011        MachineOperand &MO = MI->getOperand(i);
1012        // handle register defs - build intervals
1013        if (MO.isReg() && MO.getReg() && MO.isDef()) {
1014          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1015        }
1016      }
1017
1018      // Skip over the empty slots after each instruction.
1019      unsigned Slots = MI->getDesc().getNumDefs();
1020      if (Slots == 0)
1021        Slots = 1;
1022      MIIndex += InstrSlots::NUM * Slots;
1023
1024      // Skip over empty indices.
1025      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1026             getInstructionFromIndex(MIIndex) == 0)
1027        MIIndex += InstrSlots::NUM;
1028    }
1029  }
1030}
1031
1032bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1033                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1034  std::vector<IdxMBBPair>::const_iterator I =
1035    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1036
1037  bool ResVal = false;
1038  while (I != Idx2MBBMap.end()) {
1039    if (I->first >= End)
1040      break;
1041    MBBs.push_back(I->second);
1042    ResVal = true;
1043    ++I;
1044  }
1045  return ResVal;
1046}
1047
1048bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1049                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1050  std::vector<IdxMBBPair>::const_iterator I =
1051    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1052
1053  bool ResVal = false;
1054  while (I != Idx2MBBMap.end()) {
1055    if (I->first > End)
1056      break;
1057    MachineBasicBlock *MBB = I->second;
1058    if (getMBBEndIdx(MBB) > End)
1059      break;
1060    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1061           SE = MBB->succ_end(); SI != SE; ++SI)
1062      MBBs.push_back(*SI);
1063    ResVal = true;
1064    ++I;
1065  }
1066  return ResVal;
1067}
1068
1069LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1070  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1071  return new LiveInterval(reg, Weight);
1072}
1073
1074/// dupInterval - Duplicate a live interval. The caller is responsible for
1075/// managing the allocated memory.
1076LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1077  LiveInterval *NewLI = createInterval(li->reg);
1078  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1079  return NewLI;
1080}
1081
1082/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1083/// copy field and returns the source register that defines it.
1084unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1085  if (!VNI->copy)
1086    return 0;
1087
1088  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1089    // If it's extracting out of a physical register, return the sub-register.
1090    unsigned Reg = VNI->copy->getOperand(1).getReg();
1091    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1092      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1093    return Reg;
1094  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1095             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1096    return VNI->copy->getOperand(2).getReg();
1097
1098  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1099  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1100    return SrcReg;
1101  assert(0 && "Unrecognized copy instruction!");
1102  return 0;
1103}
1104
1105//===----------------------------------------------------------------------===//
1106// Register allocator hooks.
1107//
1108
1109/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1110/// allow one) virtual register operand, then its uses are implicitly using
1111/// the register. Returns the virtual register.
1112unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1113                                            MachineInstr *MI) const {
1114  unsigned RegOp = 0;
1115  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1116    MachineOperand &MO = MI->getOperand(i);
1117    if (!MO.isReg() || !MO.isUse())
1118      continue;
1119    unsigned Reg = MO.getReg();
1120    if (Reg == 0 || Reg == li.reg)
1121      continue;
1122
1123    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1124        !allocatableRegs_[Reg])
1125      continue;
1126    // FIXME: For now, only remat MI with at most one register operand.
1127    assert(!RegOp &&
1128           "Can't rematerialize instruction with multiple register operand!");
1129    RegOp = MO.getReg();
1130#ifndef NDEBUG
1131    break;
1132#endif
1133  }
1134  return RegOp;
1135}
1136
1137/// isValNoAvailableAt - Return true if the val# of the specified interval
1138/// which reaches the given instruction also reaches the specified use index.
1139bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1140                                       unsigned UseIdx) const {
1141  unsigned Index = getInstructionIndex(MI);
1142  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1143  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1144  return UI != li.end() && UI->valno == ValNo;
1145}
1146
1147/// isReMaterializable - Returns true if the definition MI of the specified
1148/// val# of the specified interval is re-materializable.
1149bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1150                                       const VNInfo *ValNo, MachineInstr *MI,
1151                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1152                                       bool &isLoad) {
1153  if (DisableReMat)
1154    return false;
1155
1156  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1157    return true;
1158
1159  int FrameIdx = 0;
1160  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1161      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1162    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1163    // this but remember this is not safe to fold into a two-address
1164    // instruction.
1165    // This is a load from fixed stack slot. It can be rematerialized.
1166    return true;
1167
1168  // If the target-specific rules don't identify an instruction as
1169  // being trivially rematerializable, use some target-independent
1170  // rules.
1171  if (!MI->getDesc().isRematerializable() ||
1172      !tii_->isTriviallyReMaterializable(MI)) {
1173    if (!EnableAggressiveRemat)
1174      return false;
1175
1176    // If the instruction accesses memory but the memoperands have been lost,
1177    // we can't analyze it.
1178    const TargetInstrDesc &TID = MI->getDesc();
1179    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1180      return false;
1181
1182    // Avoid instructions obviously unsafe for remat.
1183    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1184      return false;
1185
1186    // If the instruction accesses memory and the memory could be non-constant,
1187    // assume the instruction is not rematerializable.
1188    for (std::list<MachineMemOperand>::const_iterator
1189           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1190      const MachineMemOperand &MMO = *I;
1191      if (MMO.isVolatile() || MMO.isStore())
1192        return false;
1193      const Value *V = MMO.getValue();
1194      if (!V)
1195        return false;
1196      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1197        if (!PSV->isConstant(mf_->getFrameInfo()))
1198          return false;
1199      } else if (!aa_->pointsToConstantMemory(V))
1200        return false;
1201    }
1202
1203    // If any of the registers accessed are non-constant, conservatively assume
1204    // the instruction is not rematerializable.
1205    unsigned ImpUse = 0;
1206    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1207      const MachineOperand &MO = MI->getOperand(i);
1208      if (MO.isReg()) {
1209        unsigned Reg = MO.getReg();
1210        if (Reg == 0)
1211          continue;
1212        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1213          return false;
1214
1215        // Only allow one def, and that in the first operand.
1216        if (MO.isDef() != (i == 0))
1217          return false;
1218
1219        // Only allow constant-valued registers.
1220        bool IsLiveIn = mri_->isLiveIn(Reg);
1221        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1222                                          E = mri_->def_end();
1223
1224        // For the def, it should be the only def of that register.
1225        if (MO.isDef() && (next(I) != E || IsLiveIn))
1226          return false;
1227
1228        if (MO.isUse()) {
1229          // Only allow one use other register use, as that's all the
1230          // remat mechanisms support currently.
1231          if (Reg != li.reg) {
1232            if (ImpUse == 0)
1233              ImpUse = Reg;
1234            else if (Reg != ImpUse)
1235              return false;
1236          }
1237          // For the use, there should be only one associated def.
1238          if (I != E && (next(I) != E || IsLiveIn))
1239            return false;
1240        }
1241      }
1242    }
1243  }
1244
1245  unsigned ImpUse = getReMatImplicitUse(li, MI);
1246  if (ImpUse) {
1247    const LiveInterval &ImpLi = getInterval(ImpUse);
1248    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1249           re = mri_->use_end(); ri != re; ++ri) {
1250      MachineInstr *UseMI = &*ri;
1251      unsigned UseIdx = getInstructionIndex(UseMI);
1252      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1253        continue;
1254      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1255        return false;
1256    }
1257
1258    // If a register operand of the re-materialized instruction is going to
1259    // be spilled next, then it's not legal to re-materialize this instruction.
1260    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1261      if (ImpUse == SpillIs[i]->reg)
1262        return false;
1263  }
1264  return true;
1265}
1266
1267/// isReMaterializable - Returns true if the definition MI of the specified
1268/// val# of the specified interval is re-materializable.
1269bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1270                                       const VNInfo *ValNo, MachineInstr *MI) {
1271  SmallVector<LiveInterval*, 4> Dummy1;
1272  bool Dummy2;
1273  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1274}
1275
1276/// isReMaterializable - Returns true if every definition of MI of every
1277/// val# of the specified interval is re-materializable.
1278bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1279                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1280                                       bool &isLoad) {
1281  isLoad = false;
1282  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1283       i != e; ++i) {
1284    const VNInfo *VNI = *i;
1285    if (VNI->isUnused())
1286      continue; // Dead val#.
1287    // Is the def for the val# rematerializable?
1288    if (!VNI->isDefAccurate())
1289      return false;
1290    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1291    bool DefIsLoad = false;
1292    if (!ReMatDefMI ||
1293        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1294      return false;
1295    isLoad |= DefIsLoad;
1296  }
1297  return true;
1298}
1299
1300/// FilterFoldedOps - Filter out two-address use operands. Return
1301/// true if it finds any issue with the operands that ought to prevent
1302/// folding.
1303static bool FilterFoldedOps(MachineInstr *MI,
1304                            SmallVector<unsigned, 2> &Ops,
1305                            unsigned &MRInfo,
1306                            SmallVector<unsigned, 2> &FoldOps) {
1307  MRInfo = 0;
1308  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1309    unsigned OpIdx = Ops[i];
1310    MachineOperand &MO = MI->getOperand(OpIdx);
1311    // FIXME: fold subreg use.
1312    if (MO.getSubReg())
1313      return true;
1314    if (MO.isDef())
1315      MRInfo |= (unsigned)VirtRegMap::isMod;
1316    else {
1317      // Filter out two-address use operand(s).
1318      if (MI->isRegTiedToDefOperand(OpIdx)) {
1319        MRInfo = VirtRegMap::isModRef;
1320        continue;
1321      }
1322      MRInfo |= (unsigned)VirtRegMap::isRef;
1323    }
1324    FoldOps.push_back(OpIdx);
1325  }
1326  return false;
1327}
1328
1329
1330/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1331/// slot / to reg or any rematerialized load into ith operand of specified
1332/// MI. If it is successul, MI is updated with the newly created MI and
1333/// returns true.
1334bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1335                                         VirtRegMap &vrm, MachineInstr *DefMI,
1336                                         unsigned InstrIdx,
1337                                         SmallVector<unsigned, 2> &Ops,
1338                                         bool isSS, int Slot, unsigned Reg) {
1339  // If it is an implicit def instruction, just delete it.
1340  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1341    RemoveMachineInstrFromMaps(MI);
1342    vrm.RemoveMachineInstrFromMaps(MI);
1343    MI->eraseFromParent();
1344    ++numFolds;
1345    return true;
1346  }
1347
1348  // Filter the list of operand indexes that are to be folded. Abort if
1349  // any operand will prevent folding.
1350  unsigned MRInfo = 0;
1351  SmallVector<unsigned, 2> FoldOps;
1352  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1353    return false;
1354
1355  // The only time it's safe to fold into a two address instruction is when
1356  // it's folding reload and spill from / into a spill stack slot.
1357  if (DefMI && (MRInfo & VirtRegMap::isMod))
1358    return false;
1359
1360  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1361                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1362  if (fmi) {
1363    // Remember this instruction uses the spill slot.
1364    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1365
1366    // Attempt to fold the memory reference into the instruction. If
1367    // we can do this, we don't need to insert spill code.
1368    MachineBasicBlock &MBB = *MI->getParent();
1369    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1370      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1371    vrm.transferSpillPts(MI, fmi);
1372    vrm.transferRestorePts(MI, fmi);
1373    vrm.transferEmergencySpills(MI, fmi);
1374    mi2iMap_.erase(MI);
1375    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1376    mi2iMap_[fmi] = InstrIdx;
1377    MI = MBB.insert(MBB.erase(MI), fmi);
1378    ++numFolds;
1379    return true;
1380  }
1381  return false;
1382}
1383
1384/// canFoldMemoryOperand - Returns true if the specified load / store
1385/// folding is possible.
1386bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1387                                         SmallVector<unsigned, 2> &Ops,
1388                                         bool ReMat) const {
1389  // Filter the list of operand indexes that are to be folded. Abort if
1390  // any operand will prevent folding.
1391  unsigned MRInfo = 0;
1392  SmallVector<unsigned, 2> FoldOps;
1393  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1394    return false;
1395
1396  // It's only legal to remat for a use, not a def.
1397  if (ReMat && (MRInfo & VirtRegMap::isMod))
1398    return false;
1399
1400  return tii_->canFoldMemoryOperand(MI, FoldOps);
1401}
1402
1403bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1404  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1405  for (LiveInterval::Ranges::const_iterator
1406         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1407    std::vector<IdxMBBPair>::const_iterator II =
1408      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1409    if (II == Idx2MBBMap.end())
1410      continue;
1411    if (I->end > II->first)  // crossing a MBB.
1412      return false;
1413    MBBs.insert(II->second);
1414    if (MBBs.size() > 1)
1415      return false;
1416  }
1417  return true;
1418}
1419
1420/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1421/// interval on to-be re-materialized operands of MI) with new register.
1422void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1423                                       MachineInstr *MI, unsigned NewVReg,
1424                                       VirtRegMap &vrm) {
1425  // There is an implicit use. That means one of the other operand is
1426  // being remat'ed and the remat'ed instruction has li.reg as an
1427  // use operand. Make sure we rewrite that as well.
1428  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1429    MachineOperand &MO = MI->getOperand(i);
1430    if (!MO.isReg())
1431      continue;
1432    unsigned Reg = MO.getReg();
1433    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1434      continue;
1435    if (!vrm.isReMaterialized(Reg))
1436      continue;
1437    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1438    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1439    if (UseMO)
1440      UseMO->setReg(NewVReg);
1441  }
1442}
1443
1444/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1445/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1446bool LiveIntervals::
1447rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1448                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1449                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1450                 unsigned Slot, int LdSlot,
1451                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1452                 VirtRegMap &vrm,
1453                 const TargetRegisterClass* rc,
1454                 SmallVector<int, 4> &ReMatIds,
1455                 const MachineLoopInfo *loopInfo,
1456                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1457                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1458                 std::vector<LiveInterval*> &NewLIs) {
1459  bool CanFold = false;
1460 RestartInstruction:
1461  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1462    MachineOperand& mop = MI->getOperand(i);
1463    if (!mop.isReg())
1464      continue;
1465    unsigned Reg = mop.getReg();
1466    unsigned RegI = Reg;
1467    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1468      continue;
1469    if (Reg != li.reg)
1470      continue;
1471
1472    bool TryFold = !DefIsReMat;
1473    bool FoldSS = true; // Default behavior unless it's a remat.
1474    int FoldSlot = Slot;
1475    if (DefIsReMat) {
1476      // If this is the rematerializable definition MI itself and
1477      // all of its uses are rematerialized, simply delete it.
1478      if (MI == ReMatOrigDefMI && CanDelete) {
1479        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1480        DOUT << MI << '\n';
1481        RemoveMachineInstrFromMaps(MI);
1482        vrm.RemoveMachineInstrFromMaps(MI);
1483        MI->eraseFromParent();
1484        break;
1485      }
1486
1487      // If def for this use can't be rematerialized, then try folding.
1488      // If def is rematerializable and it's a load, also try folding.
1489      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1490      if (isLoad) {
1491        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1492        FoldSS = isLoadSS;
1493        FoldSlot = LdSlot;
1494      }
1495    }
1496
1497    // Scan all of the operands of this instruction rewriting operands
1498    // to use NewVReg instead of li.reg as appropriate.  We do this for
1499    // two reasons:
1500    //
1501    //   1. If the instr reads the same spilled vreg multiple times, we
1502    //      want to reuse the NewVReg.
1503    //   2. If the instr is a two-addr instruction, we are required to
1504    //      keep the src/dst regs pinned.
1505    //
1506    // Keep track of whether we replace a use and/or def so that we can
1507    // create the spill interval with the appropriate range.
1508
1509    HasUse = mop.isUse();
1510    HasDef = mop.isDef();
1511    SmallVector<unsigned, 2> Ops;
1512    Ops.push_back(i);
1513    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1514      const MachineOperand &MOj = MI->getOperand(j);
1515      if (!MOj.isReg())
1516        continue;
1517      unsigned RegJ = MOj.getReg();
1518      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1519        continue;
1520      if (RegJ == RegI) {
1521        Ops.push_back(j);
1522        HasUse |= MOj.isUse();
1523        HasDef |= MOj.isDef();
1524      }
1525    }
1526
1527    if (HasUse && !li.liveAt(getUseIndex(index)))
1528      // Must be defined by an implicit def. It should not be spilled. Note,
1529      // this is for correctness reason. e.g.
1530      // 8   %reg1024<def> = IMPLICIT_DEF
1531      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1532      // The live range [12, 14) are not part of the r1024 live interval since
1533      // it's defined by an implicit def. It will not conflicts with live
1534      // interval of r1025. Now suppose both registers are spilled, you can
1535      // easily see a situation where both registers are reloaded before
1536      // the INSERT_SUBREG and both target registers that would overlap.
1537      HasUse = false;
1538
1539    // Create a new virtual register for the spill interval.
1540    // Create the new register now so we can map the fold instruction
1541    // to the new register so when it is unfolded we get the correct
1542    // answer.
1543    bool CreatedNewVReg = false;
1544    if (NewVReg == 0) {
1545      NewVReg = mri_->createVirtualRegister(rc);
1546      vrm.grow();
1547      CreatedNewVReg = true;
1548    }
1549
1550    if (!TryFold)
1551      CanFold = false;
1552    else {
1553      // Do not fold load / store here if we are splitting. We'll find an
1554      // optimal point to insert a load / store later.
1555      if (!TrySplit) {
1556        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1557                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1558          // Folding the load/store can completely change the instruction in
1559          // unpredictable ways, rescan it from the beginning.
1560
1561          if (FoldSS) {
1562            // We need to give the new vreg the same stack slot as the
1563            // spilled interval.
1564            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1565          }
1566
1567          HasUse = false;
1568          HasDef = false;
1569          CanFold = false;
1570          if (isNotInMIMap(MI))
1571            break;
1572          goto RestartInstruction;
1573        }
1574      } else {
1575        // We'll try to fold it later if it's profitable.
1576        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1577      }
1578    }
1579
1580    mop.setReg(NewVReg);
1581    if (mop.isImplicit())
1582      rewriteImplicitOps(li, MI, NewVReg, vrm);
1583
1584    // Reuse NewVReg for other reads.
1585    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1586      MachineOperand &mopj = MI->getOperand(Ops[j]);
1587      mopj.setReg(NewVReg);
1588      if (mopj.isImplicit())
1589        rewriteImplicitOps(li, MI, NewVReg, vrm);
1590    }
1591
1592    if (CreatedNewVReg) {
1593      if (DefIsReMat) {
1594        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1595        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1596          // Each valnum may have its own remat id.
1597          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1598        } else {
1599          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1600        }
1601        if (!CanDelete || (HasUse && HasDef)) {
1602          // If this is a two-addr instruction then its use operands are
1603          // rematerializable but its def is not. It should be assigned a
1604          // stack slot.
1605          vrm.assignVirt2StackSlot(NewVReg, Slot);
1606        }
1607      } else {
1608        vrm.assignVirt2StackSlot(NewVReg, Slot);
1609      }
1610    } else if (HasUse && HasDef &&
1611               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1612      // If this interval hasn't been assigned a stack slot (because earlier
1613      // def is a deleted remat def), do it now.
1614      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1615      vrm.assignVirt2StackSlot(NewVReg, Slot);
1616    }
1617
1618    // Re-matting an instruction with virtual register use. Add the
1619    // register as an implicit use on the use MI.
1620    if (DefIsReMat && ImpUse)
1621      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1622
1623    // Create a new register interval for this spill / remat.
1624    LiveInterval &nI = getOrCreateInterval(NewVReg);
1625    if (CreatedNewVReg) {
1626      NewLIs.push_back(&nI);
1627      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1628      if (TrySplit)
1629        vrm.setIsSplitFromReg(NewVReg, li.reg);
1630    }
1631
1632    if (HasUse) {
1633      if (CreatedNewVReg) {
1634        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1635                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1636        DOUT << " +" << LR;
1637        nI.addRange(LR);
1638      } else {
1639        // Extend the split live interval to this def / use.
1640        unsigned End = getUseIndex(index)+1;
1641        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1642                     nI.getValNumInfo(nI.getNumValNums()-1));
1643        DOUT << " +" << LR;
1644        nI.addRange(LR);
1645      }
1646    }
1647    if (HasDef) {
1648      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1649                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1650      DOUT << " +" << LR;
1651      nI.addRange(LR);
1652    }
1653
1654    DOUT << "\t\t\t\tAdded new interval: ";
1655    nI.print(DOUT, tri_);
1656    DOUT << '\n';
1657  }
1658  return CanFold;
1659}
1660bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1661                                   const VNInfo *VNI,
1662                                   MachineBasicBlock *MBB, unsigned Idx) const {
1663  unsigned End = getMBBEndIdx(MBB);
1664  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1665    if (VNI->kills[j].isPHIKill)
1666      continue;
1667
1668    unsigned KillIdx = VNI->kills[j].killIdx;
1669    if (KillIdx > Idx && KillIdx < End)
1670      return true;
1671  }
1672  return false;
1673}
1674
1675/// RewriteInfo - Keep track of machine instrs that will be rewritten
1676/// during spilling.
1677namespace {
1678  struct RewriteInfo {
1679    unsigned Index;
1680    MachineInstr *MI;
1681    bool HasUse;
1682    bool HasDef;
1683    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1684      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1685  };
1686
1687  struct RewriteInfoCompare {
1688    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1689      return LHS.Index < RHS.Index;
1690    }
1691  };
1692}
1693
1694void LiveIntervals::
1695rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1696                    LiveInterval::Ranges::const_iterator &I,
1697                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1698                    unsigned Slot, int LdSlot,
1699                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1700                    VirtRegMap &vrm,
1701                    const TargetRegisterClass* rc,
1702                    SmallVector<int, 4> &ReMatIds,
1703                    const MachineLoopInfo *loopInfo,
1704                    BitVector &SpillMBBs,
1705                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1706                    BitVector &RestoreMBBs,
1707                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1708                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1709                    std::vector<LiveInterval*> &NewLIs) {
1710  bool AllCanFold = true;
1711  unsigned NewVReg = 0;
1712  unsigned start = getBaseIndex(I->start);
1713  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1714
1715  // First collect all the def / use in this live range that will be rewritten.
1716  // Make sure they are sorted according to instruction index.
1717  std::vector<RewriteInfo> RewriteMIs;
1718  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1719         re = mri_->reg_end(); ri != re; ) {
1720    MachineInstr *MI = &*ri;
1721    MachineOperand &O = ri.getOperand();
1722    ++ri;
1723    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1724    unsigned index = getInstructionIndex(MI);
1725    if (index < start || index >= end)
1726      continue;
1727    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1728      // Must be defined by an implicit def. It should not be spilled. Note,
1729      // this is for correctness reason. e.g.
1730      // 8   %reg1024<def> = IMPLICIT_DEF
1731      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1732      // The live range [12, 14) are not part of the r1024 live interval since
1733      // it's defined by an implicit def. It will not conflicts with live
1734      // interval of r1025. Now suppose both registers are spilled, you can
1735      // easily see a situation where both registers are reloaded before
1736      // the INSERT_SUBREG and both target registers that would overlap.
1737      continue;
1738    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1739  }
1740  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1741
1742  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1743  // Now rewrite the defs and uses.
1744  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1745    RewriteInfo &rwi = RewriteMIs[i];
1746    ++i;
1747    unsigned index = rwi.Index;
1748    bool MIHasUse = rwi.HasUse;
1749    bool MIHasDef = rwi.HasDef;
1750    MachineInstr *MI = rwi.MI;
1751    // If MI def and/or use the same register multiple times, then there
1752    // are multiple entries.
1753    unsigned NumUses = MIHasUse;
1754    while (i != e && RewriteMIs[i].MI == MI) {
1755      assert(RewriteMIs[i].Index == index);
1756      bool isUse = RewriteMIs[i].HasUse;
1757      if (isUse) ++NumUses;
1758      MIHasUse |= isUse;
1759      MIHasDef |= RewriteMIs[i].HasDef;
1760      ++i;
1761    }
1762    MachineBasicBlock *MBB = MI->getParent();
1763
1764    if (ImpUse && MI != ReMatDefMI) {
1765      // Re-matting an instruction with virtual register use. Update the
1766      // register interval's spill weight to HUGE_VALF to prevent it from
1767      // being spilled.
1768      LiveInterval &ImpLi = getInterval(ImpUse);
1769      ImpLi.weight = HUGE_VALF;
1770    }
1771
1772    unsigned MBBId = MBB->getNumber();
1773    unsigned ThisVReg = 0;
1774    if (TrySplit) {
1775      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1776      if (NVI != MBBVRegsMap.end()) {
1777        ThisVReg = NVI->second;
1778        // One common case:
1779        // x = use
1780        // ...
1781        // ...
1782        // def = ...
1783        //     = use
1784        // It's better to start a new interval to avoid artifically
1785        // extend the new interval.
1786        if (MIHasDef && !MIHasUse) {
1787          MBBVRegsMap.erase(MBB->getNumber());
1788          ThisVReg = 0;
1789        }
1790      }
1791    }
1792
1793    bool IsNew = ThisVReg == 0;
1794    if (IsNew) {
1795      // This ends the previous live interval. If all of its def / use
1796      // can be folded, give it a low spill weight.
1797      if (NewVReg && TrySplit && AllCanFold) {
1798        LiveInterval &nI = getOrCreateInterval(NewVReg);
1799        nI.weight /= 10.0F;
1800      }
1801      AllCanFold = true;
1802    }
1803    NewVReg = ThisVReg;
1804
1805    bool HasDef = false;
1806    bool HasUse = false;
1807    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1808                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1809                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1810                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1811                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1812    if (!HasDef && !HasUse)
1813      continue;
1814
1815    AllCanFold &= CanFold;
1816
1817    // Update weight of spill interval.
1818    LiveInterval &nI = getOrCreateInterval(NewVReg);
1819    if (!TrySplit) {
1820      // The spill weight is now infinity as it cannot be spilled again.
1821      nI.weight = HUGE_VALF;
1822      continue;
1823    }
1824
1825    // Keep track of the last def and first use in each MBB.
1826    if (HasDef) {
1827      if (MI != ReMatOrigDefMI || !CanDelete) {
1828        bool HasKill = false;
1829        if (!HasUse)
1830          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1831        else {
1832          // If this is a two-address code, then this index starts a new VNInfo.
1833          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1834          if (VNI)
1835            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1836        }
1837        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1838          SpillIdxes.find(MBBId);
1839        if (!HasKill) {
1840          if (SII == SpillIdxes.end()) {
1841            std::vector<SRInfo> S;
1842            S.push_back(SRInfo(index, NewVReg, true));
1843            SpillIdxes.insert(std::make_pair(MBBId, S));
1844          } else if (SII->second.back().vreg != NewVReg) {
1845            SII->second.push_back(SRInfo(index, NewVReg, true));
1846          } else if ((int)index > SII->second.back().index) {
1847            // If there is an earlier def and this is a two-address
1848            // instruction, then it's not possible to fold the store (which
1849            // would also fold the load).
1850            SRInfo &Info = SII->second.back();
1851            Info.index = index;
1852            Info.canFold = !HasUse;
1853          }
1854          SpillMBBs.set(MBBId);
1855        } else if (SII != SpillIdxes.end() &&
1856                   SII->second.back().vreg == NewVReg &&
1857                   (int)index > SII->second.back().index) {
1858          // There is an earlier def that's not killed (must be two-address).
1859          // The spill is no longer needed.
1860          SII->second.pop_back();
1861          if (SII->second.empty()) {
1862            SpillIdxes.erase(MBBId);
1863            SpillMBBs.reset(MBBId);
1864          }
1865        }
1866      }
1867    }
1868
1869    if (HasUse) {
1870      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1871        SpillIdxes.find(MBBId);
1872      if (SII != SpillIdxes.end() &&
1873          SII->second.back().vreg == NewVReg &&
1874          (int)index > SII->second.back().index)
1875        // Use(s) following the last def, it's not safe to fold the spill.
1876        SII->second.back().canFold = false;
1877      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1878        RestoreIdxes.find(MBBId);
1879      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1880        // If we are splitting live intervals, only fold if it's the first
1881        // use and there isn't another use later in the MBB.
1882        RII->second.back().canFold = false;
1883      else if (IsNew) {
1884        // Only need a reload if there isn't an earlier def / use.
1885        if (RII == RestoreIdxes.end()) {
1886          std::vector<SRInfo> Infos;
1887          Infos.push_back(SRInfo(index, NewVReg, true));
1888          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1889        } else {
1890          RII->second.push_back(SRInfo(index, NewVReg, true));
1891        }
1892        RestoreMBBs.set(MBBId);
1893      }
1894    }
1895
1896    // Update spill weight.
1897    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1898    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1899  }
1900
1901  if (NewVReg && TrySplit && AllCanFold) {
1902    // If all of its def / use can be folded, give it a low spill weight.
1903    LiveInterval &nI = getOrCreateInterval(NewVReg);
1904    nI.weight /= 10.0F;
1905  }
1906}
1907
1908bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1909                        BitVector &RestoreMBBs,
1910                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1911  if (!RestoreMBBs[Id])
1912    return false;
1913  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1914  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1915    if (Restores[i].index == index &&
1916        Restores[i].vreg == vr &&
1917        Restores[i].canFold)
1918      return true;
1919  return false;
1920}
1921
1922void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1923                        BitVector &RestoreMBBs,
1924                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1925  if (!RestoreMBBs[Id])
1926    return;
1927  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1928  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1929    if (Restores[i].index == index && Restores[i].vreg)
1930      Restores[i].index = -1;
1931}
1932
1933/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1934/// spilled and create empty intervals for their uses.
1935void
1936LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1937                                    const TargetRegisterClass* rc,
1938                                    std::vector<LiveInterval*> &NewLIs) {
1939  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1940         re = mri_->reg_end(); ri != re; ) {
1941    MachineOperand &O = ri.getOperand();
1942    MachineInstr *MI = &*ri;
1943    ++ri;
1944    if (O.isDef()) {
1945      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1946             "Register def was not rewritten?");
1947      RemoveMachineInstrFromMaps(MI);
1948      vrm.RemoveMachineInstrFromMaps(MI);
1949      MI->eraseFromParent();
1950    } else {
1951      // This must be an use of an implicit_def so it's not part of the live
1952      // interval. Create a new empty live interval for it.
1953      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1954      unsigned NewVReg = mri_->createVirtualRegister(rc);
1955      vrm.grow();
1956      vrm.setIsImplicitlyDefined(NewVReg);
1957      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1958      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1959        MachineOperand &MO = MI->getOperand(i);
1960        if (MO.isReg() && MO.getReg() == li.reg) {
1961          MO.setReg(NewVReg);
1962          MO.setIsUndef();
1963        }
1964      }
1965    }
1966  }
1967}
1968
1969std::vector<LiveInterval*> LiveIntervals::
1970addIntervalsForSpillsFast(const LiveInterval &li,
1971                          const MachineLoopInfo *loopInfo,
1972                          VirtRegMap &vrm) {
1973  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1974
1975  std::vector<LiveInterval*> added;
1976
1977  assert(li.weight != HUGE_VALF &&
1978         "attempt to spill already spilled interval!");
1979
1980  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1981  DEBUG(li.dump());
1982  DOUT << '\n';
1983
1984  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1985
1986  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1987  while (RI != mri_->reg_end()) {
1988    MachineInstr* MI = &*RI;
1989
1990    SmallVector<unsigned, 2> Indices;
1991    bool HasUse = false;
1992    bool HasDef = false;
1993
1994    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1995      MachineOperand& mop = MI->getOperand(i);
1996      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1997
1998      HasUse |= MI->getOperand(i).isUse();
1999      HasDef |= MI->getOperand(i).isDef();
2000
2001      Indices.push_back(i);
2002    }
2003
2004    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2005                              Indices, true, slot, li.reg)) {
2006      unsigned NewVReg = mri_->createVirtualRegister(rc);
2007      vrm.grow();
2008      vrm.assignVirt2StackSlot(NewVReg, slot);
2009
2010      // create a new register for this spill
2011      LiveInterval &nI = getOrCreateInterval(NewVReg);
2012
2013      // the spill weight is now infinity as it
2014      // cannot be spilled again
2015      nI.weight = HUGE_VALF;
2016
2017      // Rewrite register operands to use the new vreg.
2018      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2019           E = Indices.end(); I != E; ++I) {
2020        MI->getOperand(*I).setReg(NewVReg);
2021
2022        if (MI->getOperand(*I).isUse())
2023          MI->getOperand(*I).setIsKill(true);
2024      }
2025
2026      // Fill in  the new live interval.
2027      unsigned index = getInstructionIndex(MI);
2028      if (HasUse) {
2029        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2030                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2031        DOUT << " +" << LR;
2032        nI.addRange(LR);
2033        vrm.addRestorePoint(NewVReg, MI);
2034      }
2035      if (HasDef) {
2036        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2037                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2038        DOUT << " +" << LR;
2039        nI.addRange(LR);
2040        vrm.addSpillPoint(NewVReg, true, MI);
2041      }
2042
2043      added.push_back(&nI);
2044
2045      DOUT << "\t\t\t\tadded new interval: ";
2046      DEBUG(nI.dump());
2047      DOUT << '\n';
2048    }
2049
2050
2051    RI = mri_->reg_begin(li.reg);
2052  }
2053
2054  return added;
2055}
2056
2057std::vector<LiveInterval*> LiveIntervals::
2058addIntervalsForSpills(const LiveInterval &li,
2059                      SmallVectorImpl<LiveInterval*> &SpillIs,
2060                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2061
2062  if (EnableFastSpilling)
2063    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2064
2065  assert(li.weight != HUGE_VALF &&
2066         "attempt to spill already spilled interval!");
2067
2068  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2069  li.print(DOUT, tri_);
2070  DOUT << '\n';
2071
2072  // Each bit specify whether a spill is required in the MBB.
2073  BitVector SpillMBBs(mf_->getNumBlockIDs());
2074  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2075  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2076  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2077  DenseMap<unsigned,unsigned> MBBVRegsMap;
2078  std::vector<LiveInterval*> NewLIs;
2079  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2080
2081  unsigned NumValNums = li.getNumValNums();
2082  SmallVector<MachineInstr*, 4> ReMatDefs;
2083  ReMatDefs.resize(NumValNums, NULL);
2084  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2085  ReMatOrigDefs.resize(NumValNums, NULL);
2086  SmallVector<int, 4> ReMatIds;
2087  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2088  BitVector ReMatDelete(NumValNums);
2089  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2090
2091  // Spilling a split live interval. It cannot be split any further. Also,
2092  // it's also guaranteed to be a single val# / range interval.
2093  if (vrm.getPreSplitReg(li.reg)) {
2094    vrm.setIsSplitFromReg(li.reg, 0);
2095    // Unset the split kill marker on the last use.
2096    unsigned KillIdx = vrm.getKillPoint(li.reg);
2097    if (KillIdx) {
2098      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2099      assert(KillMI && "Last use disappeared?");
2100      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2101      assert(KillOp != -1 && "Last use disappeared?");
2102      KillMI->getOperand(KillOp).setIsKill(false);
2103    }
2104    vrm.removeKillPoint(li.reg);
2105    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2106    Slot = vrm.getStackSlot(li.reg);
2107    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2108    MachineInstr *ReMatDefMI = DefIsReMat ?
2109      vrm.getReMaterializedMI(li.reg) : NULL;
2110    int LdSlot = 0;
2111    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2112    bool isLoad = isLoadSS ||
2113      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2114    bool IsFirstRange = true;
2115    for (LiveInterval::Ranges::const_iterator
2116           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2117      // If this is a split live interval with multiple ranges, it means there
2118      // are two-address instructions that re-defined the value. Only the
2119      // first def can be rematerialized!
2120      if (IsFirstRange) {
2121        // Note ReMatOrigDefMI has already been deleted.
2122        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2123                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2124                             false, vrm, rc, ReMatIds, loopInfo,
2125                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2126                             MBBVRegsMap, NewLIs);
2127      } else {
2128        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2129                             Slot, 0, false, false, false,
2130                             false, vrm, rc, ReMatIds, loopInfo,
2131                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2132                             MBBVRegsMap, NewLIs);
2133      }
2134      IsFirstRange = false;
2135    }
2136
2137    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2138    return NewLIs;
2139  }
2140
2141  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2142  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2143    TrySplit = false;
2144  if (TrySplit)
2145    ++numSplits;
2146  bool NeedStackSlot = false;
2147  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2148       i != e; ++i) {
2149    const VNInfo *VNI = *i;
2150    unsigned VN = VNI->id;
2151    if (VNI->isUnused())
2152      continue; // Dead val#.
2153    // Is the def for the val# rematerializable?
2154    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2155      ? getInstructionFromIndex(VNI->def) : 0;
2156    bool dummy;
2157    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2158      // Remember how to remat the def of this val#.
2159      ReMatOrigDefs[VN] = ReMatDefMI;
2160      // Original def may be modified so we have to make a copy here.
2161      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2162      ClonedMIs.push_back(Clone);
2163      ReMatDefs[VN] = Clone;
2164
2165      bool CanDelete = true;
2166      if (VNI->hasPHIKill()) {
2167        // A kill is a phi node, not all of its uses can be rematerialized.
2168        // It must not be deleted.
2169        CanDelete = false;
2170        // Need a stack slot if there is any live range where uses cannot be
2171        // rematerialized.
2172        NeedStackSlot = true;
2173      }
2174      if (CanDelete)
2175        ReMatDelete.set(VN);
2176    } else {
2177      // Need a stack slot if there is any live range where uses cannot be
2178      // rematerialized.
2179      NeedStackSlot = true;
2180    }
2181  }
2182
2183  // One stack slot per live interval.
2184  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2185    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2186      Slot = vrm.assignVirt2StackSlot(li.reg);
2187
2188    // This case only occurs when the prealloc splitter has already assigned
2189    // a stack slot to this vreg.
2190    else
2191      Slot = vrm.getStackSlot(li.reg);
2192  }
2193
2194  // Create new intervals and rewrite defs and uses.
2195  for (LiveInterval::Ranges::const_iterator
2196         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2197    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2198    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2199    bool DefIsReMat = ReMatDefMI != NULL;
2200    bool CanDelete = ReMatDelete[I->valno->id];
2201    int LdSlot = 0;
2202    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2203    bool isLoad = isLoadSS ||
2204      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2205    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2206                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2207                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2208                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2209                               MBBVRegsMap, NewLIs);
2210  }
2211
2212  // Insert spills / restores if we are splitting.
2213  if (!TrySplit) {
2214    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2215    return NewLIs;
2216  }
2217
2218  SmallPtrSet<LiveInterval*, 4> AddedKill;
2219  SmallVector<unsigned, 2> Ops;
2220  if (NeedStackSlot) {
2221    int Id = SpillMBBs.find_first();
2222    while (Id != -1) {
2223      std::vector<SRInfo> &spills = SpillIdxes[Id];
2224      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2225        int index = spills[i].index;
2226        unsigned VReg = spills[i].vreg;
2227        LiveInterval &nI = getOrCreateInterval(VReg);
2228        bool isReMat = vrm.isReMaterialized(VReg);
2229        MachineInstr *MI = getInstructionFromIndex(index);
2230        bool CanFold = false;
2231        bool FoundUse = false;
2232        Ops.clear();
2233        if (spills[i].canFold) {
2234          CanFold = true;
2235          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2236            MachineOperand &MO = MI->getOperand(j);
2237            if (!MO.isReg() || MO.getReg() != VReg)
2238              continue;
2239
2240            Ops.push_back(j);
2241            if (MO.isDef())
2242              continue;
2243            if (isReMat ||
2244                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2245                                                RestoreMBBs, RestoreIdxes))) {
2246              // MI has two-address uses of the same register. If the use
2247              // isn't the first and only use in the BB, then we can't fold
2248              // it. FIXME: Move this to rewriteInstructionsForSpills.
2249              CanFold = false;
2250              break;
2251            }
2252            FoundUse = true;
2253          }
2254        }
2255        // Fold the store into the def if possible.
2256        bool Folded = false;
2257        if (CanFold && !Ops.empty()) {
2258          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2259            Folded = true;
2260            if (FoundUse) {
2261              // Also folded uses, do not issue a load.
2262              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2263              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2264            }
2265            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2266          }
2267        }
2268
2269        // Otherwise tell the spiller to issue a spill.
2270        if (!Folded) {
2271          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2272          bool isKill = LR->end == getStoreIndex(index);
2273          if (!MI->registerDefIsDead(nI.reg))
2274            // No need to spill a dead def.
2275            vrm.addSpillPoint(VReg, isKill, MI);
2276          if (isKill)
2277            AddedKill.insert(&nI);
2278        }
2279      }
2280      Id = SpillMBBs.find_next(Id);
2281    }
2282  }
2283
2284  int Id = RestoreMBBs.find_first();
2285  while (Id != -1) {
2286    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2287    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2288      int index = restores[i].index;
2289      if (index == -1)
2290        continue;
2291      unsigned VReg = restores[i].vreg;
2292      LiveInterval &nI = getOrCreateInterval(VReg);
2293      bool isReMat = vrm.isReMaterialized(VReg);
2294      MachineInstr *MI = getInstructionFromIndex(index);
2295      bool CanFold = false;
2296      Ops.clear();
2297      if (restores[i].canFold) {
2298        CanFold = true;
2299        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2300          MachineOperand &MO = MI->getOperand(j);
2301          if (!MO.isReg() || MO.getReg() != VReg)
2302            continue;
2303
2304          if (MO.isDef()) {
2305            // If this restore were to be folded, it would have been folded
2306            // already.
2307            CanFold = false;
2308            break;
2309          }
2310          Ops.push_back(j);
2311        }
2312      }
2313
2314      // Fold the load into the use if possible.
2315      bool Folded = false;
2316      if (CanFold && !Ops.empty()) {
2317        if (!isReMat)
2318          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2319        else {
2320          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2321          int LdSlot = 0;
2322          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2323          // If the rematerializable def is a load, also try to fold it.
2324          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2325            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2326                                          Ops, isLoadSS, LdSlot, VReg);
2327          if (!Folded) {
2328            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2329            if (ImpUse) {
2330              // Re-matting an instruction with virtual register use. Add the
2331              // register as an implicit use on the use MI and update the register
2332              // interval's spill weight to HUGE_VALF to prevent it from being
2333              // spilled.
2334              LiveInterval &ImpLi = getInterval(ImpUse);
2335              ImpLi.weight = HUGE_VALF;
2336              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2337            }
2338          }
2339        }
2340      }
2341      // If folding is not possible / failed, then tell the spiller to issue a
2342      // load / rematerialization for us.
2343      if (Folded)
2344        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2345      else
2346        vrm.addRestorePoint(VReg, MI);
2347    }
2348    Id = RestoreMBBs.find_next(Id);
2349  }
2350
2351  // Finalize intervals: add kills, finalize spill weights, and filter out
2352  // dead intervals.
2353  std::vector<LiveInterval*> RetNewLIs;
2354  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2355    LiveInterval *LI = NewLIs[i];
2356    if (!LI->empty()) {
2357      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2358      if (!AddedKill.count(LI)) {
2359        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2360        unsigned LastUseIdx = getBaseIndex(LR->end);
2361        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2362        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2363        assert(UseIdx != -1);
2364        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2365          LastUse->getOperand(UseIdx).setIsKill();
2366          vrm.addKillPoint(LI->reg, LastUseIdx);
2367        }
2368      }
2369      RetNewLIs.push_back(LI);
2370    }
2371  }
2372
2373  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2374  return RetNewLIs;
2375}
2376
2377/// hasAllocatableSuperReg - Return true if the specified physical register has
2378/// any super register that's allocatable.
2379bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2380  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2381    if (allocatableRegs_[*AS] && hasInterval(*AS))
2382      return true;
2383  return false;
2384}
2385
2386/// getRepresentativeReg - Find the largest super register of the specified
2387/// physical register.
2388unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2389  // Find the largest super-register that is allocatable.
2390  unsigned BestReg = Reg;
2391  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2392    unsigned SuperReg = *AS;
2393    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2394      BestReg = SuperReg;
2395      break;
2396    }
2397  }
2398  return BestReg;
2399}
2400
2401/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2402/// specified interval that conflicts with the specified physical register.
2403unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2404                                                   unsigned PhysReg) const {
2405  unsigned NumConflicts = 0;
2406  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2407  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2408         E = mri_->reg_end(); I != E; ++I) {
2409    MachineOperand &O = I.getOperand();
2410    MachineInstr *MI = O.getParent();
2411    unsigned Index = getInstructionIndex(MI);
2412    if (pli.liveAt(Index))
2413      ++NumConflicts;
2414  }
2415  return NumConflicts;
2416}
2417
2418/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2419/// around all defs and uses of the specified interval. Return true if it
2420/// was able to cut its interval.
2421bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2422                                            unsigned PhysReg, VirtRegMap &vrm) {
2423  unsigned SpillReg = getRepresentativeReg(PhysReg);
2424
2425  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2426    // If there are registers which alias PhysReg, but which are not a
2427    // sub-register of the chosen representative super register. Assert
2428    // since we can't handle it yet.
2429    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2430           tri_->isSuperRegister(*AS, SpillReg));
2431
2432  bool Cut = false;
2433  LiveInterval &pli = getInterval(SpillReg);
2434  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2435  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2436         E = mri_->reg_end(); I != E; ++I) {
2437    MachineOperand &O = I.getOperand();
2438    MachineInstr *MI = O.getParent();
2439    if (SeenMIs.count(MI))
2440      continue;
2441    SeenMIs.insert(MI);
2442    unsigned Index = getInstructionIndex(MI);
2443    if (pli.liveAt(Index)) {
2444      vrm.addEmergencySpill(SpillReg, MI);
2445      unsigned StartIdx = getLoadIndex(Index);
2446      unsigned EndIdx = getStoreIndex(Index)+1;
2447      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2448        pli.removeRange(StartIdx, EndIdx);
2449        Cut = true;
2450      } else {
2451        cerr << "Ran out of registers during register allocation!\n";
2452        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2453          cerr << "Please check your inline asm statement for invalid "
2454               << "constraints:\n";
2455          MI->print(cerr.stream(), tm_);
2456        }
2457        exit(1);
2458      }
2459      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2460        if (!hasInterval(*AS))
2461          continue;
2462        LiveInterval &spli = getInterval(*AS);
2463        if (spli.liveAt(Index))
2464          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2465      }
2466    }
2467  }
2468  return Cut;
2469}
2470
2471LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2472                                                  MachineInstr* startInst) {
2473  LiveInterval& Interval = getOrCreateInterval(reg);
2474  VNInfo* VN = Interval.getNextValue(
2475            getInstructionIndex(startInst) + InstrSlots::DEF,
2476            startInst, true, getVNInfoAllocator());
2477  VN->setHasPHIKill(true);
2478  VN->kills.push_back(
2479    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2480  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2481               getMBBEndIdx(startInst->getParent()) + 1, VN);
2482  Interval.addRange(LR);
2483
2484  return LR;
2485}
2486