LiveIntervalAnalysis.cpp revision 0a4c09e724bd3ab7c9a1d3a1615894e7bf209179
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/PseudoSourceValue.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
57
58static cl::opt<int> CoalescingLimit("early-coalescing-limit",
59                                    cl::init(-1), cl::Hidden);
60
61STATISTIC(numIntervals , "Number of original intervals");
62STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
63STATISTIC(numSplits    , "Number of intervals split");
64STATISTIC(numCoalescing, "Number of early coalescing performed");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.setPreservesCFG();
71  AU.addRequired<AliasAnalysis>();
72  AU.addPreserved<AliasAnalysis>();
73  AU.addPreserved<LiveVariables>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreservedID(MachineLoopInfoID);
76  AU.addPreservedID(MachineDominatorsID);
77
78  if (!StrongPHIElim) {
79    AU.addPreservedID(PHIEliminationID);
80    AU.addRequiredID(PHIEliminationID);
81  }
82
83  AU.addRequiredID(TwoAddressInstructionPassID);
84  MachineFunctionPass::getAnalysisUsage(AU);
85}
86
87void LiveIntervals::releaseMemory() {
88  // Free the live intervals themselves.
89  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90       E = r2iMap_.end(); I != E; ++I)
91    delete I->second;
92
93  MBB2IdxMap.clear();
94  Idx2MBBMap.clear();
95  mi2iMap_.clear();
96  i2miMap_.clear();
97  r2iMap_.clear();
98  terminatorGaps.clear();
99  phiJoinCopies.clear();
100
101  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
102  VNInfoAllocator.Reset();
103  while (!CloneMIs.empty()) {
104    MachineInstr *MI = CloneMIs.back();
105    CloneMIs.pop_back();
106    mf_->DeleteMachineInstr(MI);
107  }
108}
109
110static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
111                                   unsigned OpIdx, const TargetInstrInfo *tii_){
112  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
113  if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
114      Reg == SrcReg)
115    return true;
116
117  if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
118    return true;
119  if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
120    return true;
121  return false;
122}
123
124/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
125/// there is one implicit_def for each use. Add isUndef marker to
126/// implicit_def defs and their uses.
127void LiveIntervals::processImplicitDefs() {
128  SmallSet<unsigned, 8> ImpDefRegs;
129  SmallVector<MachineInstr*, 8> ImpDefMIs;
130  MachineBasicBlock *Entry = mf_->begin();
131  SmallPtrSet<MachineBasicBlock*,16> Visited;
132  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
133         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
134       DFI != E; ++DFI) {
135    MachineBasicBlock *MBB = *DFI;
136    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
137         I != E; ) {
138      MachineInstr *MI = &*I;
139      ++I;
140      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
141        unsigned Reg = MI->getOperand(0).getReg();
142        ImpDefRegs.insert(Reg);
143        if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
144          for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
145            ImpDefRegs.insert(*SS);
146        }
147        ImpDefMIs.push_back(MI);
148        continue;
149      }
150
151      if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
152        MachineOperand &MO = MI->getOperand(2);
153        if (ImpDefRegs.count(MO.getReg())) {
154          // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
155          // This is an identity copy, eliminate it now.
156          if (MO.isKill()) {
157            LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
158            vi.removeKill(MI);
159          }
160          MI->eraseFromParent();
161          continue;
162        }
163      }
164
165      bool ChangedToImpDef = false;
166      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
167        MachineOperand& MO = MI->getOperand(i);
168        if (!MO.isReg() || !MO.isUse() || MO.isUndef())
169          continue;
170        unsigned Reg = MO.getReg();
171        if (!Reg)
172          continue;
173        if (!ImpDefRegs.count(Reg))
174          continue;
175        // Use is a copy, just turn it into an implicit_def.
176        if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
177          bool isKill = MO.isKill();
178          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
179          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
180            MI->RemoveOperand(j);
181          if (isKill) {
182            ImpDefRegs.erase(Reg);
183            LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
184            vi.removeKill(MI);
185          }
186          ChangedToImpDef = true;
187          break;
188        }
189
190        MO.setIsUndef();
191        if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
192          // Make sure other uses of
193          for (unsigned j = i+1; j != e; ++j) {
194            MachineOperand &MOJ = MI->getOperand(j);
195            if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
196              MOJ.setIsUndef();
197          }
198          ImpDefRegs.erase(Reg);
199        }
200      }
201
202      if (ChangedToImpDef) {
203        // Backtrack to process this new implicit_def.
204        --I;
205      } else {
206        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
207          MachineOperand& MO = MI->getOperand(i);
208          if (!MO.isReg() || !MO.isDef())
209            continue;
210          ImpDefRegs.erase(MO.getReg());
211        }
212      }
213    }
214
215    // Any outstanding liveout implicit_def's?
216    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
217      MachineInstr *MI = ImpDefMIs[i];
218      unsigned Reg = MI->getOperand(0).getReg();
219      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
220          !ImpDefRegs.count(Reg)) {
221        // Delete all "local" implicit_def's. That include those which define
222        // physical registers since they cannot be liveout.
223        MI->eraseFromParent();
224        continue;
225      }
226
227      // If there are multiple defs of the same register and at least one
228      // is not an implicit_def, do not insert implicit_def's before the
229      // uses.
230      bool Skip = false;
231      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
232             DE = mri_->def_end(); DI != DE; ++DI) {
233        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
234          Skip = true;
235          break;
236        }
237      }
238      if (Skip)
239        continue;
240
241      // The only implicit_def which we want to keep are those that are live
242      // out of its block.
243      MI->eraseFromParent();
244
245      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
246             UE = mri_->use_end(); UI != UE; ) {
247        MachineOperand &RMO = UI.getOperand();
248        MachineInstr *RMI = &*UI;
249        ++UI;
250        MachineBasicBlock *RMBB = RMI->getParent();
251        if (RMBB == MBB)
252          continue;
253
254        // Turn a copy use into an implicit_def.
255        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
256        if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
257            Reg == SrcReg) {
258          RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
259          for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
260            RMI->RemoveOperand(j);
261          continue;
262        }
263
264        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
265        unsigned NewVReg = mri_->createVirtualRegister(RC);
266        RMO.setReg(NewVReg);
267        RMO.setIsUndef();
268        RMO.setIsKill();
269      }
270    }
271    ImpDefRegs.clear();
272    ImpDefMIs.clear();
273  }
274}
275
276
277void LiveIntervals::computeNumbering() {
278  Index2MiMap OldI2MI = i2miMap_;
279  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
280
281  Idx2MBBMap.clear();
282  MBB2IdxMap.clear();
283  mi2iMap_.clear();
284  i2miMap_.clear();
285  terminatorGaps.clear();
286  phiJoinCopies.clear();
287
288  FunctionSize = 0;
289
290  // Number MachineInstrs and MachineBasicBlocks.
291  // Initialize MBB indexes to a sentinal.
292  MBB2IdxMap.resize(mf_->getNumBlockIDs(),
293                    std::make_pair(LiveIndex(),LiveIndex()));
294
295  LiveIndex MIIndex;
296  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
297       MBB != E; ++MBB) {
298    LiveIndex StartIdx = MIIndex;
299
300    // Insert an empty slot at the beginning of each block.
301    MIIndex = getNextIndex(MIIndex);
302    i2miMap_.push_back(0);
303
304    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
305         I != E; ++I) {
306
307      if (I == MBB->getFirstTerminator()) {
308        // Leave a gap for before terminators, this is where we will point
309        // PHI kills.
310        LiveIndex tGap(true, MIIndex);
311        bool inserted =
312          terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
313        assert(inserted &&
314               "Multiple 'first' terminators encountered during numbering.");
315        inserted = inserted; // Avoid compiler warning if assertions turned off.
316        i2miMap_.push_back(0);
317
318        MIIndex = getNextIndex(MIIndex);
319      }
320
321      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
322      assert(inserted && "multiple MachineInstr -> index mappings");
323      inserted = true;
324      i2miMap_.push_back(I);
325      MIIndex = getNextIndex(MIIndex);
326      FunctionSize++;
327
328      // Insert max(1, numdefs) empty slots after every instruction.
329      unsigned Slots = I->getDesc().getNumDefs();
330      if (Slots == 0)
331        Slots = 1;
332      while (Slots--) {
333        MIIndex = getNextIndex(MIIndex);
334        i2miMap_.push_back(0);
335      }
336
337    }
338
339    if (MBB->getFirstTerminator() == MBB->end()) {
340      // Leave a gap for before terminators, this is where we will point
341      // PHI kills.
342      LiveIndex tGap(true, MIIndex);
343      bool inserted =
344        terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
345      assert(inserted &&
346             "Multiple 'first' terminators encountered during numbering.");
347      inserted = inserted; // Avoid compiler warning if assertions turned off.
348      i2miMap_.push_back(0);
349
350      MIIndex = getNextIndex(MIIndex);
351    }
352
353    // Set the MBB2IdxMap entry for this MBB.
354    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
355    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
356  }
357
358  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
359
360  if (!OldI2MI.empty())
361    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
362      for (LiveInterval::iterator LI = OI->second->begin(),
363           LE = OI->second->end(); LI != LE; ++LI) {
364
365        // Remap the start index of the live range to the corresponding new
366        // number, or our best guess at what it _should_ correspond to if the
367        // original instruction has been erased.  This is either the following
368        // instruction or its predecessor.
369        unsigned index = LI->start.getVecIndex();
370        LiveIndex::Slot offset = LI->start.getSlot();
371        if (LI->start.isLoad()) {
372          std::vector<IdxMBBPair>::const_iterator I =
373                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
374          // Take the pair containing the index
375          std::vector<IdxMBBPair>::const_iterator J =
376                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
377
378          LI->start = getMBBStartIdx(J->second);
379        } else {
380          LI->start = LiveIndex(
381            LiveIndex(mi2iMap_[OldI2MI[index]]),
382                              (LiveIndex::Slot)offset);
383        }
384
385        // Remap the ending index in the same way that we remapped the start,
386        // except for the final step where we always map to the immediately
387        // following instruction.
388        index = (getPrevSlot(LI->end)).getVecIndex();
389        offset  = LI->end.getSlot();
390        if (LI->end.isLoad()) {
391          // VReg dies at end of block.
392          std::vector<IdxMBBPair>::const_iterator I =
393                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
394          --I;
395
396          LI->end = getNextSlot(getMBBEndIdx(I->second));
397        } else {
398          unsigned idx = index;
399          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
400
401          if (index != OldI2MI.size())
402            LI->end =
403              LiveIndex(mi2iMap_[OldI2MI[index]],
404                (idx == index ? offset : LiveIndex::LOAD));
405          else
406            LI->end =
407              LiveIndex(LiveIndex::NUM * i2miMap_.size());
408        }
409      }
410
411      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
412           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
413        VNInfo* vni = *VNI;
414
415        // Remap the VNInfo def index, which works the same as the
416        // start indices above. VN's with special sentinel defs
417        // don't need to be remapped.
418        if (vni->isDefAccurate() && !vni->isUnused()) {
419          unsigned index = vni->def.getVecIndex();
420          LiveIndex::Slot offset = vni->def.getSlot();
421          if (vni->def.isLoad()) {
422            std::vector<IdxMBBPair>::const_iterator I =
423                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
424            // Take the pair containing the index
425            std::vector<IdxMBBPair>::const_iterator J =
426                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
427
428            vni->def = getMBBStartIdx(J->second);
429          } else {
430            vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
431          }
432        }
433
434        // Remap the VNInfo kill indices, which works the same as
435        // the end indices above.
436        for (size_t i = 0; i < vni->kills.size(); ++i) {
437          unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
438          LiveIndex::Slot offset = vni->kills[i].getSlot();
439
440          if (vni->kills[i].isLoad()) {
441            assert("Value killed at a load slot.");
442            /*std::vector<IdxMBBPair>::const_iterator I =
443             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
444            --I;
445
446            vni->kills[i] = getMBBEndIdx(I->second);*/
447          } else {
448            if (vni->kills[i].isPHIIndex()) {
449              std::vector<IdxMBBPair>::const_iterator I =
450                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
451              --I;
452              vni->kills[i] = terminatorGaps[I->second];
453            } else {
454              assert(OldI2MI[index] != 0 &&
455                     "Kill refers to instruction not present in index maps.");
456              vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
457            }
458
459            /*
460            unsigned idx = index;
461            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
462
463            if (index != OldI2MI.size())
464              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
465                              (idx == index ? offset : 0);
466            else
467              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
468            */
469          }
470        }
471      }
472    }
473}
474
475void LiveIntervals::scaleNumbering(int factor) {
476  // Need to
477  //  * scale MBB begin and end points
478  //  * scale all ranges.
479  //  * Update VNI structures.
480  //  * Scale instruction numberings
481
482  // Scale the MBB indices.
483  Idx2MBBMap.clear();
484  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
485       MBB != MBBE; ++MBB) {
486    std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
487    mbbIndices.first = mbbIndices.first.scale(factor);
488    mbbIndices.second = mbbIndices.second.scale(factor);
489    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
490  }
491  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
492
493  // Scale terminator gaps.
494  for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
495       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
496       TGI != TGE; ++TGI) {
497    terminatorGaps[TGI->first] = TGI->second.scale(factor);
498  }
499
500  // Scale the intervals.
501  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
502    LI->second->scaleNumbering(factor);
503  }
504
505  // Scale MachineInstrs.
506  Mi2IndexMap oldmi2iMap = mi2iMap_;
507  LiveIndex highestSlot;
508  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
509       MI != ME; ++MI) {
510    LiveIndex newSlot = MI->second.scale(factor);
511    mi2iMap_[MI->first] = newSlot;
512    highestSlot = std::max(highestSlot, newSlot);
513  }
514
515  unsigned highestVIndex = highestSlot.getVecIndex();
516  i2miMap_.clear();
517  i2miMap_.resize(highestVIndex + 1);
518  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
519       MI != ME; ++MI) {
520    i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
521  }
522
523}
524
525
526/// runOnMachineFunction - Register allocate the whole function
527///
528bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
529  mf_ = &fn;
530  mri_ = &mf_->getRegInfo();
531  tm_ = &fn.getTarget();
532  tri_ = tm_->getRegisterInfo();
533  tii_ = tm_->getInstrInfo();
534  aa_ = &getAnalysis<AliasAnalysis>();
535  lv_ = &getAnalysis<LiveVariables>();
536  allocatableRegs_ = tri_->getAllocatableSet(fn);
537
538  processImplicitDefs();
539  computeNumbering();
540  computeIntervals();
541  performEarlyCoalescing();
542
543  numIntervals += getNumIntervals();
544
545  DEBUG(dump());
546  return true;
547}
548
549/// print - Implement the dump method.
550void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
551  OS << "********** INTERVALS **********\n";
552  for (const_iterator I = begin(), E = end(); I != E; ++I) {
553    I->second->print(OS, tri_);
554    OS << "\n";
555  }
556
557  printInstrs(OS);
558}
559
560void LiveIntervals::printInstrs(raw_ostream &OS) const {
561  OS << "********** MACHINEINSTRS **********\n";
562
563  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
564       mbbi != mbbe; ++mbbi) {
565    OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
566    for (MachineBasicBlock::iterator mii = mbbi->begin(),
567           mie = mbbi->end(); mii != mie; ++mii) {
568      OS << getInstructionIndex(mii) << '\t' << *mii;
569    }
570  }
571}
572
573void LiveIntervals::dumpInstrs() const {
574  printInstrs(errs());
575}
576
577/// conflictsWithPhysRegDef - Returns true if the specified register
578/// is defined during the duration of the specified interval.
579bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
580                                            VirtRegMap &vrm, unsigned reg) {
581  for (LiveInterval::Ranges::const_iterator
582         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
583    for (LiveIndex index = getBaseIndex(I->start),
584           end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
585         index = getNextIndex(index)) {
586      // skip deleted instructions
587      while (index != end && !getInstructionFromIndex(index))
588        index = getNextIndex(index);
589      if (index == end) break;
590
591      MachineInstr *MI = getInstructionFromIndex(index);
592      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
593      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
594        if (SrcReg == li.reg || DstReg == li.reg)
595          continue;
596      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
597        MachineOperand& mop = MI->getOperand(i);
598        if (!mop.isReg())
599          continue;
600        unsigned PhysReg = mop.getReg();
601        if (PhysReg == 0 || PhysReg == li.reg)
602          continue;
603        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
604          if (!vrm.hasPhys(PhysReg))
605            continue;
606          PhysReg = vrm.getPhys(PhysReg);
607        }
608        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
609          return true;
610      }
611    }
612  }
613
614  return false;
615}
616
617/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
618/// it can check use as well.
619bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
620                                            unsigned Reg, bool CheckUse,
621                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
622  for (LiveInterval::Ranges::const_iterator
623         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
624    for (LiveIndex index = getBaseIndex(I->start),
625           end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
626         index = getNextIndex(index)) {
627      // Skip deleted instructions.
628      MachineInstr *MI = 0;
629      while (index != end) {
630        MI = getInstructionFromIndex(index);
631        if (MI)
632          break;
633        index = getNextIndex(index);
634      }
635      if (index == end) break;
636
637      if (JoinedCopies.count(MI))
638        continue;
639      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
640        MachineOperand& MO = MI->getOperand(i);
641        if (!MO.isReg())
642          continue;
643        if (MO.isUse() && !CheckUse)
644          continue;
645        unsigned PhysReg = MO.getReg();
646        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
647          continue;
648        if (tri_->isSubRegister(Reg, PhysReg))
649          return true;
650      }
651    }
652  }
653
654  return false;
655}
656
657#ifndef NDEBUG
658static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
659  if (TargetRegisterInfo::isPhysicalRegister(reg))
660    errs() << tri_->getName(reg);
661  else
662    errs() << "%reg" << reg;
663}
664#endif
665
666void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
667                                             MachineBasicBlock::iterator mi,
668                                             LiveIndex MIIdx,
669                                             MachineOperand& MO,
670                                             unsigned MOIdx,
671                                             LiveInterval &interval) {
672  DEBUG({
673      errs() << "\t\tregister: ";
674      printRegName(interval.reg, tri_);
675    });
676
677  // Virtual registers may be defined multiple times (due to phi
678  // elimination and 2-addr elimination).  Much of what we do only has to be
679  // done once for the vreg.  We use an empty interval to detect the first
680  // time we see a vreg.
681  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
682  if (interval.empty()) {
683    // Get the Idx of the defining instructions.
684    LiveIndex defIndex = getDefIndex(MIIdx);
685    // Earlyclobbers move back one, so that they overlap the live range
686    // of inputs.
687    if (MO.isEarlyClobber())
688      defIndex = getUseIndex(MIIdx);
689    VNInfo *ValNo;
690    MachineInstr *CopyMI = NULL;
691    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
692    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
693        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
694        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
695        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
696      CopyMI = mi;
697    // Earlyclobbers move back one.
698    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
699
700    assert(ValNo->id == 0 && "First value in interval is not 0?");
701
702    // Loop over all of the blocks that the vreg is defined in.  There are
703    // two cases we have to handle here.  The most common case is a vreg
704    // whose lifetime is contained within a basic block.  In this case there
705    // will be a single kill, in MBB, which comes after the definition.
706    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
707      // FIXME: what about dead vars?
708      LiveIndex killIdx;
709      if (vi.Kills[0] != mi)
710        killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
711      else if (MO.isEarlyClobber())
712        // Earlyclobbers that die in this instruction move up one extra, to
713        // compensate for having the starting point moved back one.  This
714        // gets them to overlap the live range of other outputs.
715        killIdx = getNextSlot(getNextSlot(defIndex));
716      else
717        killIdx = getNextSlot(defIndex);
718
719      // If the kill happens after the definition, we have an intra-block
720      // live range.
721      if (killIdx > defIndex) {
722        assert(vi.AliveBlocks.empty() &&
723               "Shouldn't be alive across any blocks!");
724        LiveRange LR(defIndex, killIdx, ValNo);
725        interval.addRange(LR);
726        DEBUG(errs() << " +" << LR << "\n");
727        ValNo->addKill(killIdx);
728        return;
729      }
730    }
731
732    // The other case we handle is when a virtual register lives to the end
733    // of the defining block, potentially live across some blocks, then is
734    // live into some number of blocks, but gets killed.  Start by adding a
735    // range that goes from this definition to the end of the defining block.
736    LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
737    DEBUG(errs() << " +" << NewLR);
738    interval.addRange(NewLR);
739
740    // Iterate over all of the blocks that the variable is completely
741    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
742    // live interval.
743    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
744             E = vi.AliveBlocks.end(); I != E; ++I) {
745      LiveRange LR(getMBBStartIdx(*I),
746                   getNextSlot(getMBBEndIdx(*I)),  // MBB ends at -1.
747                   ValNo);
748      interval.addRange(LR);
749      DEBUG(errs() << " +" << LR);
750    }
751
752    // Finally, this virtual register is live from the start of any killing
753    // block to the 'use' slot of the killing instruction.
754    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
755      MachineInstr *Kill = vi.Kills[i];
756      LiveIndex killIdx =
757        getNextSlot(getUseIndex(getInstructionIndex(Kill)));
758      LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
759      interval.addRange(LR);
760      ValNo->addKill(killIdx);
761      DEBUG(errs() << " +" << LR);
762    }
763
764  } else {
765    // If this is the second time we see a virtual register definition, it
766    // must be due to phi elimination or two addr elimination.  If this is
767    // the result of two address elimination, then the vreg is one of the
768    // def-and-use register operand.
769    if (mi->isRegTiedToUseOperand(MOIdx)) {
770      // If this is a two-address definition, then we have already processed
771      // the live range.  The only problem is that we didn't realize there
772      // are actually two values in the live interval.  Because of this we
773      // need to take the LiveRegion that defines this register and split it
774      // into two values.
775      assert(interval.containsOneValue());
776      LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
777      LiveIndex RedefIndex = getDefIndex(MIIdx);
778      if (MO.isEarlyClobber())
779        RedefIndex = getUseIndex(MIIdx);
780
781      const LiveRange *OldLR =
782        interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
783      VNInfo *OldValNo = OldLR->valno;
784
785      // Delete the initial value, which should be short and continuous,
786      // because the 2-addr copy must be in the same MBB as the redef.
787      interval.removeRange(DefIndex, RedefIndex);
788
789      // Two-address vregs should always only be redefined once.  This means
790      // that at this point, there should be exactly one value number in it.
791      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
792
793      // The new value number (#1) is defined by the instruction we claimed
794      // defined value #0.
795      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
796                                            false, // update at *
797                                            VNInfoAllocator);
798      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
799
800      // Value#0 is now defined by the 2-addr instruction.
801      OldValNo->def  = RedefIndex;
802      OldValNo->setCopy(0);
803      if (MO.isEarlyClobber())
804        OldValNo->setHasRedefByEC(true);
805
806      // Add the new live interval which replaces the range for the input copy.
807      LiveRange LR(DefIndex, RedefIndex, ValNo);
808      DEBUG(errs() << " replace range with " << LR);
809      interval.addRange(LR);
810      ValNo->addKill(RedefIndex);
811
812      // If this redefinition is dead, we need to add a dummy unit live
813      // range covering the def slot.
814      if (MO.isDead())
815        interval.addRange(
816          LiveRange(RedefIndex, MO.isEarlyClobber() ?
817                                getNextSlot(getNextSlot(RedefIndex)) :
818                                getNextSlot(RedefIndex), OldValNo));
819
820      DEBUG({
821          errs() << " RESULT: ";
822          interval.print(errs(), tri_);
823        });
824    } else {
825      // Otherwise, this must be because of phi elimination.  If this is the
826      // first redefinition of the vreg that we have seen, go back and change
827      // the live range in the PHI block to be a different value number.
828      if (interval.containsOneValue()) {
829        // Remove the old range that we now know has an incorrect number.
830        VNInfo *VNI = interval.getValNumInfo(0);
831        MachineInstr *Killer = vi.Kills[0];
832        phiJoinCopies.push_back(Killer);
833        LiveIndex Start = getMBBStartIdx(Killer->getParent());
834        LiveIndex End =
835          getNextSlot(getUseIndex(getInstructionIndex(Killer)));
836        DEBUG({
837            errs() << " Removing [" << Start << "," << End << "] from: ";
838            interval.print(errs(), tri_);
839            errs() << "\n";
840          });
841        interval.removeRange(Start, End);
842        assert(interval.ranges.size() == 1 &&
843               "Newly discovered PHI interval has >1 ranges.");
844        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
845        VNI->addKill(terminatorGaps[killMBB]);
846        VNI->setHasPHIKill(true);
847        DEBUG({
848            errs() << " RESULT: ";
849            interval.print(errs(), tri_);
850          });
851
852        // Replace the interval with one of a NEW value number.  Note that this
853        // value number isn't actually defined by an instruction, weird huh? :)
854        LiveRange LR(Start, End,
855          interval.getNextValue(LiveIndex(mbb->getNumber()),
856                                0, false, VNInfoAllocator));
857        LR.valno->setIsPHIDef(true);
858        DEBUG(errs() << " replace range with " << LR);
859        interval.addRange(LR);
860        LR.valno->addKill(End);
861        DEBUG({
862            errs() << " RESULT: ";
863            interval.print(errs(), tri_);
864          });
865      }
866
867      // In the case of PHI elimination, each variable definition is only
868      // live until the end of the block.  We've already taken care of the
869      // rest of the live range.
870      LiveIndex defIndex = getDefIndex(MIIdx);
871      if (MO.isEarlyClobber())
872        defIndex = getUseIndex(MIIdx);
873
874      VNInfo *ValNo;
875      MachineInstr *CopyMI = NULL;
876      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
877      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
878          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
879          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
880          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
881        CopyMI = mi;
882      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
883
884      LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
885      LiveRange LR(defIndex, killIndex, ValNo);
886      interval.addRange(LR);
887      ValNo->addKill(terminatorGaps[mbb]);
888      ValNo->setHasPHIKill(true);
889      DEBUG(errs() << " +" << LR);
890    }
891  }
892
893  DEBUG(errs() << '\n');
894}
895
896void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
897                                              MachineBasicBlock::iterator mi,
898                                              LiveIndex MIIdx,
899                                              MachineOperand& MO,
900                                              LiveInterval &interval,
901                                              MachineInstr *CopyMI) {
902  // A physical register cannot be live across basic block, so its
903  // lifetime must end somewhere in its defining basic block.
904  DEBUG({
905      errs() << "\t\tregister: ";
906      printRegName(interval.reg, tri_);
907    });
908
909  LiveIndex baseIndex = MIIdx;
910  LiveIndex start = getDefIndex(baseIndex);
911  // Earlyclobbers move back one.
912  if (MO.isEarlyClobber())
913    start = getUseIndex(MIIdx);
914  LiveIndex end = start;
915
916  // If it is not used after definition, it is considered dead at
917  // the instruction defining it. Hence its interval is:
918  // [defSlot(def), defSlot(def)+1)
919  // For earlyclobbers, the defSlot was pushed back one; the extra
920  // advance below compensates.
921  if (MO.isDead()) {
922    DEBUG(errs() << " dead");
923    if (MO.isEarlyClobber())
924      end = getNextSlot(getNextSlot(start));
925    else
926      end = getNextSlot(start);
927    goto exit;
928  }
929
930  // If it is not dead on definition, it must be killed by a
931  // subsequent instruction. Hence its interval is:
932  // [defSlot(def), useSlot(kill)+1)
933  baseIndex = getNextIndex(baseIndex);
934  while (++mi != MBB->end()) {
935    while (baseIndex.getVecIndex() < i2miMap_.size() &&
936           getInstructionFromIndex(baseIndex) == 0)
937      baseIndex = getNextIndex(baseIndex);
938    if (mi->killsRegister(interval.reg, tri_)) {
939      DEBUG(errs() << " killed");
940      end = getNextSlot(getUseIndex(baseIndex));
941      goto exit;
942    } else {
943      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
944      if (DefIdx != -1) {
945        if (mi->isRegTiedToUseOperand(DefIdx)) {
946          // Two-address instruction.
947          end = getDefIndex(baseIndex);
948          if (mi->getOperand(DefIdx).isEarlyClobber())
949            end = getUseIndex(baseIndex);
950        } else {
951          // Another instruction redefines the register before it is ever read.
952          // Then the register is essentially dead at the instruction that defines
953          // it. Hence its interval is:
954          // [defSlot(def), defSlot(def)+1)
955          DEBUG(errs() << " dead");
956          end = getNextSlot(start);
957        }
958        goto exit;
959      }
960    }
961
962    baseIndex = getNextIndex(baseIndex);
963  }
964
965  // The only case we should have a dead physreg here without a killing or
966  // instruction where we know it's dead is if it is live-in to the function
967  // and never used. Another possible case is the implicit use of the
968  // physical register has been deleted by two-address pass.
969  end = getNextSlot(start);
970
971exit:
972  assert(start < end && "did not find end of interval?");
973
974  // Already exists? Extend old live interval.
975  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
976  bool Extend = OldLR != interval.end();
977  VNInfo *ValNo = Extend
978    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
979  if (MO.isEarlyClobber() && Extend)
980    ValNo->setHasRedefByEC(true);
981  LiveRange LR(start, end, ValNo);
982  interval.addRange(LR);
983  LR.valno->addKill(end);
984  DEBUG(errs() << " +" << LR << '\n');
985}
986
987void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
988                                      MachineBasicBlock::iterator MI,
989                                      LiveIndex MIIdx,
990                                      MachineOperand& MO,
991                                      unsigned MOIdx) {
992  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
993    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
994                             getOrCreateInterval(MO.getReg()));
995  else if (allocatableRegs_[MO.getReg()]) {
996    MachineInstr *CopyMI = NULL;
997    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
998    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
999        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1000        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1001        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1002      CopyMI = MI;
1003    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1004                              getOrCreateInterval(MO.getReg()), CopyMI);
1005    // Def of a register also defines its sub-registers.
1006    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1007      // If MI also modifies the sub-register explicitly, avoid processing it
1008      // more than once. Do not pass in TRI here so it checks for exact match.
1009      if (!MI->modifiesRegister(*AS))
1010        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1011                                  getOrCreateInterval(*AS), 0);
1012  }
1013}
1014
1015void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1016                                         LiveIndex MIIdx,
1017                                         LiveInterval &interval, bool isAlias) {
1018  DEBUG({
1019      errs() << "\t\tlivein register: ";
1020      printRegName(interval.reg, tri_);
1021    });
1022
1023  // Look for kills, if it reaches a def before it's killed, then it shouldn't
1024  // be considered a livein.
1025  MachineBasicBlock::iterator mi = MBB->begin();
1026  LiveIndex baseIndex = MIIdx;
1027  LiveIndex start = baseIndex;
1028  while (baseIndex.getVecIndex() < i2miMap_.size() &&
1029         getInstructionFromIndex(baseIndex) == 0)
1030    baseIndex = getNextIndex(baseIndex);
1031  LiveIndex end = baseIndex;
1032  bool SeenDefUse = false;
1033
1034  while (mi != MBB->end()) {
1035    if (mi->killsRegister(interval.reg, tri_)) {
1036      DEBUG(errs() << " killed");
1037      end = getNextSlot(getUseIndex(baseIndex));
1038      SeenDefUse = true;
1039      break;
1040    } else if (mi->modifiesRegister(interval.reg, tri_)) {
1041      // Another instruction redefines the register before it is ever read.
1042      // Then the register is essentially dead at the instruction that defines
1043      // it. Hence its interval is:
1044      // [defSlot(def), defSlot(def)+1)
1045      DEBUG(errs() << " dead");
1046      end = getNextSlot(getDefIndex(start));
1047      SeenDefUse = true;
1048      break;
1049    }
1050
1051    baseIndex = getNextIndex(baseIndex);
1052    ++mi;
1053    if (mi != MBB->end()) {
1054      while (baseIndex.getVecIndex() < i2miMap_.size() &&
1055             getInstructionFromIndex(baseIndex) == 0)
1056        baseIndex = getNextIndex(baseIndex);
1057    }
1058  }
1059
1060  // Live-in register might not be used at all.
1061  if (!SeenDefUse) {
1062    if (isAlias) {
1063      DEBUG(errs() << " dead");
1064      end = getNextSlot(getDefIndex(MIIdx));
1065    } else {
1066      DEBUG(errs() << " live through");
1067      end = baseIndex;
1068    }
1069  }
1070
1071  VNInfo *vni =
1072    interval.getNextValue(LiveIndex(MBB->getNumber()),
1073                          0, false, VNInfoAllocator);
1074  vni->setIsPHIDef(true);
1075  LiveRange LR(start, end, vni);
1076
1077  interval.addRange(LR);
1078  LR.valno->addKill(end);
1079  DEBUG(errs() << " +" << LR << '\n');
1080}
1081
1082bool
1083LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1084                                   SmallVector<MachineInstr*,16> &IdentCopies,
1085                                   SmallVector<MachineInstr*,16> &OtherCopies) {
1086  bool HaveConflict = false;
1087  unsigned NumIdent = 0;
1088  for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1089         re = mri_->def_end(); ri != re; ++ri) {
1090    MachineInstr *MI = &*ri;
1091    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1092    if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1093      return false;
1094    if (SrcReg != DstInt.reg) {
1095      OtherCopies.push_back(MI);
1096      HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1097    } else {
1098      IdentCopies.push_back(MI);
1099      ++NumIdent;
1100    }
1101  }
1102
1103  if (!HaveConflict)
1104    return false; // Let coalescer handle it
1105  return IdentCopies.size() > OtherCopies.size();
1106}
1107
1108void LiveIntervals::performEarlyCoalescing() {
1109  if (!EarlyCoalescing)
1110    return;
1111
1112  /// Perform early coalescing: eliminate copies which feed into phi joins
1113  /// and whose sources are defined by the phi joins.
1114  for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1115    MachineInstr *Join = phiJoinCopies[i];
1116    if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1117      break;
1118
1119    unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1120    bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1121#ifndef NDEBUG
1122    assert(isMove && "PHI join instruction must be a move!");
1123#else
1124    isMove = isMove;
1125#endif
1126
1127    LiveInterval &DstInt = getInterval(PHIDst);
1128    LiveInterval &SrcInt = getInterval(PHISrc);
1129    SmallVector<MachineInstr*, 16> IdentCopies;
1130    SmallVector<MachineInstr*, 16> OtherCopies;
1131    if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1132      continue;
1133
1134    DEBUG(errs() << "PHI Join: " << *Join);
1135    assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1136    VNInfo *VNI = DstInt.getValNumInfo(0);
1137
1138    // Change the non-identity copies to directly target the phi destination.
1139    for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1140      MachineInstr *PHICopy = OtherCopies[i];
1141      DEBUG(errs() << "Moving: " << *PHICopy);
1142
1143      LiveIndex MIIndex = getInstructionIndex(PHICopy);
1144      LiveIndex DefIndex = getDefIndex(MIIndex);
1145      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1146      LiveIndex StartIndex = SLR->start;
1147      LiveIndex EndIndex = SLR->end;
1148
1149      // Delete val# defined by the now identity copy and add the range from
1150      // beginning of the mbb to the end of the range.
1151      SrcInt.removeValNo(SLR->valno);
1152      DEBUG(errs() << "  added range [" << StartIndex << ','
1153            << EndIndex << "] to reg" << DstInt.reg << '\n');
1154      if (DstInt.liveAt(StartIndex))
1155        DstInt.removeRange(StartIndex, EndIndex);
1156      VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1157                                           VNInfoAllocator);
1158      NewVNI->setHasPHIKill(true);
1159      DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1160      for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1161        MachineOperand &MO = PHICopy->getOperand(j);
1162        if (!MO.isReg() || MO.getReg() != PHISrc)
1163          continue;
1164        MO.setReg(PHIDst);
1165      }
1166    }
1167
1168    // Now let's eliminate all the would-be identity copies.
1169    for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1170      MachineInstr *PHICopy = IdentCopies[i];
1171      DEBUG(errs() << "Coalescing: " << *PHICopy);
1172
1173      LiveIndex MIIndex = getInstructionIndex(PHICopy);
1174      LiveIndex DefIndex = getDefIndex(MIIndex);
1175      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1176      LiveIndex StartIndex = SLR->start;
1177      LiveIndex EndIndex = SLR->end;
1178
1179      // Delete val# defined by the now identity copy and add the range from
1180      // beginning of the mbb to the end of the range.
1181      SrcInt.removeValNo(SLR->valno);
1182      RemoveMachineInstrFromMaps(PHICopy);
1183      PHICopy->eraseFromParent();
1184      DEBUG(errs() << "  added range [" << StartIndex << ','
1185            << EndIndex << "] to reg" << DstInt.reg << '\n');
1186      DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1187    }
1188
1189    // Remove the phi join and update the phi block liveness.
1190    LiveIndex MIIndex = getInstructionIndex(Join);
1191    LiveIndex UseIndex = getUseIndex(MIIndex);
1192    LiveIndex DefIndex = getDefIndex(MIIndex);
1193    LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1194    LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1195    DLR->valno->setCopy(0);
1196    DLR->valno->setIsDefAccurate(false);
1197    DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1198    SrcInt.removeRange(SLR->start, SLR->end);
1199    assert(SrcInt.empty());
1200    removeInterval(PHISrc);
1201    RemoveMachineInstrFromMaps(Join);
1202    Join->eraseFromParent();
1203
1204    ++numCoalescing;
1205  }
1206}
1207
1208/// computeIntervals - computes the live intervals for virtual
1209/// registers. for some ordering of the machine instructions [1,N] a
1210/// live interval is an interval [i, j) where 1 <= i <= j < N for
1211/// which a variable is live
1212void LiveIntervals::computeIntervals() {
1213  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1214               << "********** Function: "
1215               << ((Value*)mf_->getFunction())->getName() << '\n');
1216
1217  SmallVector<unsigned, 8> UndefUses;
1218  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1219       MBBI != E; ++MBBI) {
1220    MachineBasicBlock *MBB = MBBI;
1221    // Track the index of the current machine instr.
1222    LiveIndex MIIndex = getMBBStartIdx(MBB);
1223    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1224
1225    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1226
1227    // Create intervals for live-ins to this BB first.
1228    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1229           LE = MBB->livein_end(); LI != LE; ++LI) {
1230      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1231      // Multiple live-ins can alias the same register.
1232      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1233        if (!hasInterval(*AS))
1234          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1235                               true);
1236    }
1237
1238    // Skip over empty initial indices.
1239    while (MIIndex.getVecIndex() < i2miMap_.size() &&
1240           getInstructionFromIndex(MIIndex) == 0)
1241      MIIndex = getNextIndex(MIIndex);
1242
1243    for (; MI != miEnd; ++MI) {
1244      DEBUG(errs() << MIIndex << "\t" << *MI);
1245
1246      // Handle defs.
1247      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1248        MachineOperand &MO = MI->getOperand(i);
1249        if (!MO.isReg() || !MO.getReg())
1250          continue;
1251
1252        // handle register defs - build intervals
1253        if (MO.isDef())
1254          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1255        else if (MO.isUndef())
1256          UndefUses.push_back(MO.getReg());
1257      }
1258
1259      // Skip over the empty slots after each instruction.
1260      unsigned Slots = MI->getDesc().getNumDefs();
1261      if (Slots == 0)
1262        Slots = 1;
1263
1264      while (Slots--)
1265        MIIndex = getNextIndex(MIIndex);
1266
1267      // Skip over empty indices.
1268      while (MIIndex.getVecIndex() < i2miMap_.size() &&
1269             getInstructionFromIndex(MIIndex) == 0)
1270        MIIndex = getNextIndex(MIIndex);
1271    }
1272  }
1273
1274  // Create empty intervals for registers defined by implicit_def's (except
1275  // for those implicit_def that define values which are liveout of their
1276  // blocks.
1277  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1278    unsigned UndefReg = UndefUses[i];
1279    (void)getOrCreateInterval(UndefReg);
1280  }
1281}
1282
1283bool LiveIntervals::findLiveInMBBs(
1284                              LiveIndex Start, LiveIndex End,
1285                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1286  std::vector<IdxMBBPair>::const_iterator I =
1287    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1288
1289  bool ResVal = false;
1290  while (I != Idx2MBBMap.end()) {
1291    if (I->first >= End)
1292      break;
1293    MBBs.push_back(I->second);
1294    ResVal = true;
1295    ++I;
1296  }
1297  return ResVal;
1298}
1299
1300bool LiveIntervals::findReachableMBBs(
1301                              LiveIndex Start, LiveIndex End,
1302                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1303  std::vector<IdxMBBPair>::const_iterator I =
1304    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1305
1306  bool ResVal = false;
1307  while (I != Idx2MBBMap.end()) {
1308    if (I->first > End)
1309      break;
1310    MachineBasicBlock *MBB = I->second;
1311    if (getMBBEndIdx(MBB) > End)
1312      break;
1313    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1314           SE = MBB->succ_end(); SI != SE; ++SI)
1315      MBBs.push_back(*SI);
1316    ResVal = true;
1317    ++I;
1318  }
1319  return ResVal;
1320}
1321
1322LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1323  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1324  return new LiveInterval(reg, Weight);
1325}
1326
1327/// dupInterval - Duplicate a live interval. The caller is responsible for
1328/// managing the allocated memory.
1329LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1330  LiveInterval *NewLI = createInterval(li->reg);
1331  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1332  return NewLI;
1333}
1334
1335/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1336/// copy field and returns the source register that defines it.
1337unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1338  if (!VNI->getCopy())
1339    return 0;
1340
1341  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1342    // If it's extracting out of a physical register, return the sub-register.
1343    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1344    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1345      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1346    return Reg;
1347  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1348             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1349    return VNI->getCopy()->getOperand(2).getReg();
1350
1351  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1352  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1353    return SrcReg;
1354  llvm_unreachable("Unrecognized copy instruction!");
1355  return 0;
1356}
1357
1358//===----------------------------------------------------------------------===//
1359// Register allocator hooks.
1360//
1361
1362/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1363/// allow one) virtual register operand, then its uses are implicitly using
1364/// the register. Returns the virtual register.
1365unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1366                                            MachineInstr *MI) const {
1367  unsigned RegOp = 0;
1368  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1369    MachineOperand &MO = MI->getOperand(i);
1370    if (!MO.isReg() || !MO.isUse())
1371      continue;
1372    unsigned Reg = MO.getReg();
1373    if (Reg == 0 || Reg == li.reg)
1374      continue;
1375
1376    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1377        !allocatableRegs_[Reg])
1378      continue;
1379    // FIXME: For now, only remat MI with at most one register operand.
1380    assert(!RegOp &&
1381           "Can't rematerialize instruction with multiple register operand!");
1382    RegOp = MO.getReg();
1383#ifndef NDEBUG
1384    break;
1385#endif
1386  }
1387  return RegOp;
1388}
1389
1390/// isValNoAvailableAt - Return true if the val# of the specified interval
1391/// which reaches the given instruction also reaches the specified use index.
1392bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1393                                       LiveIndex UseIdx) const {
1394  LiveIndex Index = getInstructionIndex(MI);
1395  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1396  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1397  return UI != li.end() && UI->valno == ValNo;
1398}
1399
1400/// isReMaterializable - Returns true if the definition MI of the specified
1401/// val# of the specified interval is re-materializable.
1402bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1403                                       const VNInfo *ValNo, MachineInstr *MI,
1404                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1405                                       bool &isLoad) {
1406  if (DisableReMat)
1407    return false;
1408
1409  if (!tii_->isTriviallyReMaterializable(MI, aa_))
1410    return false;
1411
1412  // Target-specific code can mark an instruction as being rematerializable
1413  // if it has one virtual reg use, though it had better be something like
1414  // a PIC base register which is likely to be live everywhere.
1415  unsigned ImpUse = getReMatImplicitUse(li, MI);
1416  if (ImpUse) {
1417    const LiveInterval &ImpLi = getInterval(ImpUse);
1418    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1419           re = mri_->use_end(); ri != re; ++ri) {
1420      MachineInstr *UseMI = &*ri;
1421      LiveIndex UseIdx = getInstructionIndex(UseMI);
1422      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1423        continue;
1424      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1425        return false;
1426    }
1427
1428    // If a register operand of the re-materialized instruction is going to
1429    // be spilled next, then it's not legal to re-materialize this instruction.
1430    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1431      if (ImpUse == SpillIs[i]->reg)
1432        return false;
1433  }
1434  return true;
1435}
1436
1437/// isReMaterializable - Returns true if the definition MI of the specified
1438/// val# of the specified interval is re-materializable.
1439bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1440                                       const VNInfo *ValNo, MachineInstr *MI) {
1441  SmallVector<LiveInterval*, 4> Dummy1;
1442  bool Dummy2;
1443  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1444}
1445
1446/// isReMaterializable - Returns true if every definition of MI of every
1447/// val# of the specified interval is re-materializable.
1448bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1449                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1450                                       bool &isLoad) {
1451  isLoad = false;
1452  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1453       i != e; ++i) {
1454    const VNInfo *VNI = *i;
1455    if (VNI->isUnused())
1456      continue; // Dead val#.
1457    // Is the def for the val# rematerializable?
1458    if (!VNI->isDefAccurate())
1459      return false;
1460    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1461    bool DefIsLoad = false;
1462    if (!ReMatDefMI ||
1463        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1464      return false;
1465    isLoad |= DefIsLoad;
1466  }
1467  return true;
1468}
1469
1470/// FilterFoldedOps - Filter out two-address use operands. Return
1471/// true if it finds any issue with the operands that ought to prevent
1472/// folding.
1473static bool FilterFoldedOps(MachineInstr *MI,
1474                            SmallVector<unsigned, 2> &Ops,
1475                            unsigned &MRInfo,
1476                            SmallVector<unsigned, 2> &FoldOps) {
1477  MRInfo = 0;
1478  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1479    unsigned OpIdx = Ops[i];
1480    MachineOperand &MO = MI->getOperand(OpIdx);
1481    // FIXME: fold subreg use.
1482    if (MO.getSubReg())
1483      return true;
1484    if (MO.isDef())
1485      MRInfo |= (unsigned)VirtRegMap::isMod;
1486    else {
1487      // Filter out two-address use operand(s).
1488      if (MI->isRegTiedToDefOperand(OpIdx)) {
1489        MRInfo = VirtRegMap::isModRef;
1490        continue;
1491      }
1492      MRInfo |= (unsigned)VirtRegMap::isRef;
1493    }
1494    FoldOps.push_back(OpIdx);
1495  }
1496  return false;
1497}
1498
1499
1500/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1501/// slot / to reg or any rematerialized load into ith operand of specified
1502/// MI. If it is successul, MI is updated with the newly created MI and
1503/// returns true.
1504bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1505                                         VirtRegMap &vrm, MachineInstr *DefMI,
1506                                         LiveIndex InstrIdx,
1507                                         SmallVector<unsigned, 2> &Ops,
1508                                         bool isSS, int Slot, unsigned Reg) {
1509  // If it is an implicit def instruction, just delete it.
1510  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1511    RemoveMachineInstrFromMaps(MI);
1512    vrm.RemoveMachineInstrFromMaps(MI);
1513    MI->eraseFromParent();
1514    ++numFolds;
1515    return true;
1516  }
1517
1518  // Filter the list of operand indexes that are to be folded. Abort if
1519  // any operand will prevent folding.
1520  unsigned MRInfo = 0;
1521  SmallVector<unsigned, 2> FoldOps;
1522  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1523    return false;
1524
1525  // The only time it's safe to fold into a two address instruction is when
1526  // it's folding reload and spill from / into a spill stack slot.
1527  if (DefMI && (MRInfo & VirtRegMap::isMod))
1528    return false;
1529
1530  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1531                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1532  if (fmi) {
1533    // Remember this instruction uses the spill slot.
1534    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1535
1536    // Attempt to fold the memory reference into the instruction. If
1537    // we can do this, we don't need to insert spill code.
1538    MachineBasicBlock &MBB = *MI->getParent();
1539    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1540      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1541    vrm.transferSpillPts(MI, fmi);
1542    vrm.transferRestorePts(MI, fmi);
1543    vrm.transferEmergencySpills(MI, fmi);
1544    mi2iMap_.erase(MI);
1545    i2miMap_[InstrIdx.getVecIndex()] = fmi;
1546    mi2iMap_[fmi] = InstrIdx;
1547    MI = MBB.insert(MBB.erase(MI), fmi);
1548    ++numFolds;
1549    return true;
1550  }
1551  return false;
1552}
1553
1554/// canFoldMemoryOperand - Returns true if the specified load / store
1555/// folding is possible.
1556bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1557                                         SmallVector<unsigned, 2> &Ops,
1558                                         bool ReMat) const {
1559  // Filter the list of operand indexes that are to be folded. Abort if
1560  // any operand will prevent folding.
1561  unsigned MRInfo = 0;
1562  SmallVector<unsigned, 2> FoldOps;
1563  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1564    return false;
1565
1566  // It's only legal to remat for a use, not a def.
1567  if (ReMat && (MRInfo & VirtRegMap::isMod))
1568    return false;
1569
1570  return tii_->canFoldMemoryOperand(MI, FoldOps);
1571}
1572
1573bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1574  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1575  for (LiveInterval::Ranges::const_iterator
1576         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1577    std::vector<IdxMBBPair>::const_iterator II =
1578      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1579    if (II == Idx2MBBMap.end())
1580      continue;
1581    if (I->end > II->first)  // crossing a MBB.
1582      return false;
1583    MBBs.insert(II->second);
1584    if (MBBs.size() > 1)
1585      return false;
1586  }
1587  return true;
1588}
1589
1590/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1591/// interval on to-be re-materialized operands of MI) with new register.
1592void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1593                                       MachineInstr *MI, unsigned NewVReg,
1594                                       VirtRegMap &vrm) {
1595  // There is an implicit use. That means one of the other operand is
1596  // being remat'ed and the remat'ed instruction has li.reg as an
1597  // use operand. Make sure we rewrite that as well.
1598  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1599    MachineOperand &MO = MI->getOperand(i);
1600    if (!MO.isReg())
1601      continue;
1602    unsigned Reg = MO.getReg();
1603    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1604      continue;
1605    if (!vrm.isReMaterialized(Reg))
1606      continue;
1607    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1608    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1609    if (UseMO)
1610      UseMO->setReg(NewVReg);
1611  }
1612}
1613
1614/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1615/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1616bool LiveIntervals::
1617rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1618                 bool TrySplit, LiveIndex index, LiveIndex end,
1619                 MachineInstr *MI,
1620                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1621                 unsigned Slot, int LdSlot,
1622                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1623                 VirtRegMap &vrm,
1624                 const TargetRegisterClass* rc,
1625                 SmallVector<int, 4> &ReMatIds,
1626                 const MachineLoopInfo *loopInfo,
1627                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1628                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1629                 std::vector<LiveInterval*> &NewLIs) {
1630  bool CanFold = false;
1631 RestartInstruction:
1632  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1633    MachineOperand& mop = MI->getOperand(i);
1634    if (!mop.isReg())
1635      continue;
1636    unsigned Reg = mop.getReg();
1637    unsigned RegI = Reg;
1638    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1639      continue;
1640    if (Reg != li.reg)
1641      continue;
1642
1643    bool TryFold = !DefIsReMat;
1644    bool FoldSS = true; // Default behavior unless it's a remat.
1645    int FoldSlot = Slot;
1646    if (DefIsReMat) {
1647      // If this is the rematerializable definition MI itself and
1648      // all of its uses are rematerialized, simply delete it.
1649      if (MI == ReMatOrigDefMI && CanDelete) {
1650        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1651                     << MI << '\n');
1652        RemoveMachineInstrFromMaps(MI);
1653        vrm.RemoveMachineInstrFromMaps(MI);
1654        MI->eraseFromParent();
1655        break;
1656      }
1657
1658      // If def for this use can't be rematerialized, then try folding.
1659      // If def is rematerializable and it's a load, also try folding.
1660      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1661      if (isLoad) {
1662        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1663        FoldSS = isLoadSS;
1664        FoldSlot = LdSlot;
1665      }
1666    }
1667
1668    // Scan all of the operands of this instruction rewriting operands
1669    // to use NewVReg instead of li.reg as appropriate.  We do this for
1670    // two reasons:
1671    //
1672    //   1. If the instr reads the same spilled vreg multiple times, we
1673    //      want to reuse the NewVReg.
1674    //   2. If the instr is a two-addr instruction, we are required to
1675    //      keep the src/dst regs pinned.
1676    //
1677    // Keep track of whether we replace a use and/or def so that we can
1678    // create the spill interval with the appropriate range.
1679
1680    HasUse = mop.isUse();
1681    HasDef = mop.isDef();
1682    SmallVector<unsigned, 2> Ops;
1683    Ops.push_back(i);
1684    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1685      const MachineOperand &MOj = MI->getOperand(j);
1686      if (!MOj.isReg())
1687        continue;
1688      unsigned RegJ = MOj.getReg();
1689      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1690        continue;
1691      if (RegJ == RegI) {
1692        Ops.push_back(j);
1693        if (!MOj.isUndef()) {
1694          HasUse |= MOj.isUse();
1695          HasDef |= MOj.isDef();
1696        }
1697      }
1698    }
1699
1700    // Create a new virtual register for the spill interval.
1701    // Create the new register now so we can map the fold instruction
1702    // to the new register so when it is unfolded we get the correct
1703    // answer.
1704    bool CreatedNewVReg = false;
1705    if (NewVReg == 0) {
1706      NewVReg = mri_->createVirtualRegister(rc);
1707      vrm.grow();
1708      CreatedNewVReg = true;
1709    }
1710
1711    if (!TryFold)
1712      CanFold = false;
1713    else {
1714      // Do not fold load / store here if we are splitting. We'll find an
1715      // optimal point to insert a load / store later.
1716      if (!TrySplit) {
1717        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1718                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1719          // Folding the load/store can completely change the instruction in
1720          // unpredictable ways, rescan it from the beginning.
1721
1722          if (FoldSS) {
1723            // We need to give the new vreg the same stack slot as the
1724            // spilled interval.
1725            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1726          }
1727
1728          HasUse = false;
1729          HasDef = false;
1730          CanFold = false;
1731          if (isNotInMIMap(MI))
1732            break;
1733          goto RestartInstruction;
1734        }
1735      } else {
1736        // We'll try to fold it later if it's profitable.
1737        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1738      }
1739    }
1740
1741    mop.setReg(NewVReg);
1742    if (mop.isImplicit())
1743      rewriteImplicitOps(li, MI, NewVReg, vrm);
1744
1745    // Reuse NewVReg for other reads.
1746    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1747      MachineOperand &mopj = MI->getOperand(Ops[j]);
1748      mopj.setReg(NewVReg);
1749      if (mopj.isImplicit())
1750        rewriteImplicitOps(li, MI, NewVReg, vrm);
1751    }
1752
1753    if (CreatedNewVReg) {
1754      if (DefIsReMat) {
1755        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1756        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1757          // Each valnum may have its own remat id.
1758          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1759        } else {
1760          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1761        }
1762        if (!CanDelete || (HasUse && HasDef)) {
1763          // If this is a two-addr instruction then its use operands are
1764          // rematerializable but its def is not. It should be assigned a
1765          // stack slot.
1766          vrm.assignVirt2StackSlot(NewVReg, Slot);
1767        }
1768      } else {
1769        vrm.assignVirt2StackSlot(NewVReg, Slot);
1770      }
1771    } else if (HasUse && HasDef &&
1772               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1773      // If this interval hasn't been assigned a stack slot (because earlier
1774      // def is a deleted remat def), do it now.
1775      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1776      vrm.assignVirt2StackSlot(NewVReg, Slot);
1777    }
1778
1779    // Re-matting an instruction with virtual register use. Add the
1780    // register as an implicit use on the use MI.
1781    if (DefIsReMat && ImpUse)
1782      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1783
1784    // Create a new register interval for this spill / remat.
1785    LiveInterval &nI = getOrCreateInterval(NewVReg);
1786    if (CreatedNewVReg) {
1787      NewLIs.push_back(&nI);
1788      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1789      if (TrySplit)
1790        vrm.setIsSplitFromReg(NewVReg, li.reg);
1791    }
1792
1793    if (HasUse) {
1794      if (CreatedNewVReg) {
1795        LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1796                     nI.getNextValue(LiveIndex(), 0, false,
1797                                     VNInfoAllocator));
1798        DEBUG(errs() << " +" << LR);
1799        nI.addRange(LR);
1800      } else {
1801        // Extend the split live interval to this def / use.
1802        LiveIndex End = getNextSlot(getUseIndex(index));
1803        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1804                     nI.getValNumInfo(nI.getNumValNums()-1));
1805        DEBUG(errs() << " +" << LR);
1806        nI.addRange(LR);
1807      }
1808    }
1809    if (HasDef) {
1810      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1811                   nI.getNextValue(LiveIndex(), 0, false,
1812                                   VNInfoAllocator));
1813      DEBUG(errs() << " +" << LR);
1814      nI.addRange(LR);
1815    }
1816
1817    DEBUG({
1818        errs() << "\t\t\t\tAdded new interval: ";
1819        nI.print(errs(), tri_);
1820        errs() << '\n';
1821      });
1822  }
1823  return CanFold;
1824}
1825bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1826                                   const VNInfo *VNI,
1827                                   MachineBasicBlock *MBB,
1828                                   LiveIndex Idx) const {
1829  LiveIndex End = getMBBEndIdx(MBB);
1830  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1831    if (VNI->kills[j].isPHIIndex())
1832      continue;
1833
1834    LiveIndex KillIdx = VNI->kills[j];
1835    if (KillIdx > Idx && KillIdx < End)
1836      return true;
1837  }
1838  return false;
1839}
1840
1841/// RewriteInfo - Keep track of machine instrs that will be rewritten
1842/// during spilling.
1843namespace {
1844  struct RewriteInfo {
1845    LiveIndex Index;
1846    MachineInstr *MI;
1847    bool HasUse;
1848    bool HasDef;
1849    RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
1850      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1851  };
1852
1853  struct RewriteInfoCompare {
1854    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1855      return LHS.Index < RHS.Index;
1856    }
1857  };
1858}
1859
1860void LiveIntervals::
1861rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1862                    LiveInterval::Ranges::const_iterator &I,
1863                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1864                    unsigned Slot, int LdSlot,
1865                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1866                    VirtRegMap &vrm,
1867                    const TargetRegisterClass* rc,
1868                    SmallVector<int, 4> &ReMatIds,
1869                    const MachineLoopInfo *loopInfo,
1870                    BitVector &SpillMBBs,
1871                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1872                    BitVector &RestoreMBBs,
1873                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1874                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1875                    std::vector<LiveInterval*> &NewLIs) {
1876  bool AllCanFold = true;
1877  unsigned NewVReg = 0;
1878  LiveIndex start = getBaseIndex(I->start);
1879  LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1880
1881  // First collect all the def / use in this live range that will be rewritten.
1882  // Make sure they are sorted according to instruction index.
1883  std::vector<RewriteInfo> RewriteMIs;
1884  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1885         re = mri_->reg_end(); ri != re; ) {
1886    MachineInstr *MI = &*ri;
1887    MachineOperand &O = ri.getOperand();
1888    ++ri;
1889    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1890    LiveIndex index = getInstructionIndex(MI);
1891    if (index < start || index >= end)
1892      continue;
1893
1894    if (O.isUndef())
1895      // Must be defined by an implicit def. It should not be spilled. Note,
1896      // this is for correctness reason. e.g.
1897      // 8   %reg1024<def> = IMPLICIT_DEF
1898      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1899      // The live range [12, 14) are not part of the r1024 live interval since
1900      // it's defined by an implicit def. It will not conflicts with live
1901      // interval of r1025. Now suppose both registers are spilled, you can
1902      // easily see a situation where both registers are reloaded before
1903      // the INSERT_SUBREG and both target registers that would overlap.
1904      continue;
1905    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1906  }
1907  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1908
1909  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1910  // Now rewrite the defs and uses.
1911  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1912    RewriteInfo &rwi = RewriteMIs[i];
1913    ++i;
1914    LiveIndex index = rwi.Index;
1915    bool MIHasUse = rwi.HasUse;
1916    bool MIHasDef = rwi.HasDef;
1917    MachineInstr *MI = rwi.MI;
1918    // If MI def and/or use the same register multiple times, then there
1919    // are multiple entries.
1920    unsigned NumUses = MIHasUse;
1921    while (i != e && RewriteMIs[i].MI == MI) {
1922      assert(RewriteMIs[i].Index == index);
1923      bool isUse = RewriteMIs[i].HasUse;
1924      if (isUse) ++NumUses;
1925      MIHasUse |= isUse;
1926      MIHasDef |= RewriteMIs[i].HasDef;
1927      ++i;
1928    }
1929    MachineBasicBlock *MBB = MI->getParent();
1930
1931    if (ImpUse && MI != ReMatDefMI) {
1932      // Re-matting an instruction with virtual register use. Update the
1933      // register interval's spill weight to HUGE_VALF to prevent it from
1934      // being spilled.
1935      LiveInterval &ImpLi = getInterval(ImpUse);
1936      ImpLi.weight = HUGE_VALF;
1937    }
1938
1939    unsigned MBBId = MBB->getNumber();
1940    unsigned ThisVReg = 0;
1941    if (TrySplit) {
1942      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1943      if (NVI != MBBVRegsMap.end()) {
1944        ThisVReg = NVI->second;
1945        // One common case:
1946        // x = use
1947        // ...
1948        // ...
1949        // def = ...
1950        //     = use
1951        // It's better to start a new interval to avoid artifically
1952        // extend the new interval.
1953        if (MIHasDef && !MIHasUse) {
1954          MBBVRegsMap.erase(MBB->getNumber());
1955          ThisVReg = 0;
1956        }
1957      }
1958    }
1959
1960    bool IsNew = ThisVReg == 0;
1961    if (IsNew) {
1962      // This ends the previous live interval. If all of its def / use
1963      // can be folded, give it a low spill weight.
1964      if (NewVReg && TrySplit && AllCanFold) {
1965        LiveInterval &nI = getOrCreateInterval(NewVReg);
1966        nI.weight /= 10.0F;
1967      }
1968      AllCanFold = true;
1969    }
1970    NewVReg = ThisVReg;
1971
1972    bool HasDef = false;
1973    bool HasUse = false;
1974    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1975                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1976                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1977                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1978                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1979    if (!HasDef && !HasUse)
1980      continue;
1981
1982    AllCanFold &= CanFold;
1983
1984    // Update weight of spill interval.
1985    LiveInterval &nI = getOrCreateInterval(NewVReg);
1986    if (!TrySplit) {
1987      // The spill weight is now infinity as it cannot be spilled again.
1988      nI.weight = HUGE_VALF;
1989      continue;
1990    }
1991
1992    // Keep track of the last def and first use in each MBB.
1993    if (HasDef) {
1994      if (MI != ReMatOrigDefMI || !CanDelete) {
1995        bool HasKill = false;
1996        if (!HasUse)
1997          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1998        else {
1999          // If this is a two-address code, then this index starts a new VNInfo.
2000          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2001          if (VNI)
2002            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2003        }
2004        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2005          SpillIdxes.find(MBBId);
2006        if (!HasKill) {
2007          if (SII == SpillIdxes.end()) {
2008            std::vector<SRInfo> S;
2009            S.push_back(SRInfo(index, NewVReg, true));
2010            SpillIdxes.insert(std::make_pair(MBBId, S));
2011          } else if (SII->second.back().vreg != NewVReg) {
2012            SII->second.push_back(SRInfo(index, NewVReg, true));
2013          } else if (index > SII->second.back().index) {
2014            // If there is an earlier def and this is a two-address
2015            // instruction, then it's not possible to fold the store (which
2016            // would also fold the load).
2017            SRInfo &Info = SII->second.back();
2018            Info.index = index;
2019            Info.canFold = !HasUse;
2020          }
2021          SpillMBBs.set(MBBId);
2022        } else if (SII != SpillIdxes.end() &&
2023                   SII->second.back().vreg == NewVReg &&
2024                   index > SII->second.back().index) {
2025          // There is an earlier def that's not killed (must be two-address).
2026          // The spill is no longer needed.
2027          SII->second.pop_back();
2028          if (SII->second.empty()) {
2029            SpillIdxes.erase(MBBId);
2030            SpillMBBs.reset(MBBId);
2031          }
2032        }
2033      }
2034    }
2035
2036    if (HasUse) {
2037      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2038        SpillIdxes.find(MBBId);
2039      if (SII != SpillIdxes.end() &&
2040          SII->second.back().vreg == NewVReg &&
2041          index > SII->second.back().index)
2042        // Use(s) following the last def, it's not safe to fold the spill.
2043        SII->second.back().canFold = false;
2044      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2045        RestoreIdxes.find(MBBId);
2046      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2047        // If we are splitting live intervals, only fold if it's the first
2048        // use and there isn't another use later in the MBB.
2049        RII->second.back().canFold = false;
2050      else if (IsNew) {
2051        // Only need a reload if there isn't an earlier def / use.
2052        if (RII == RestoreIdxes.end()) {
2053          std::vector<SRInfo> Infos;
2054          Infos.push_back(SRInfo(index, NewVReg, true));
2055          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2056        } else {
2057          RII->second.push_back(SRInfo(index, NewVReg, true));
2058        }
2059        RestoreMBBs.set(MBBId);
2060      }
2061    }
2062
2063    // Update spill weight.
2064    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2065    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2066  }
2067
2068  if (NewVReg && TrySplit && AllCanFold) {
2069    // If all of its def / use can be folded, give it a low spill weight.
2070    LiveInterval &nI = getOrCreateInterval(NewVReg);
2071    nI.weight /= 10.0F;
2072  }
2073}
2074
2075bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
2076                        unsigned vr, BitVector &RestoreMBBs,
2077                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2078  if (!RestoreMBBs[Id])
2079    return false;
2080  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2081  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2082    if (Restores[i].index == index &&
2083        Restores[i].vreg == vr &&
2084        Restores[i].canFold)
2085      return true;
2086  return false;
2087}
2088
2089void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
2090                        unsigned vr, BitVector &RestoreMBBs,
2091                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2092  if (!RestoreMBBs[Id])
2093    return;
2094  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2095  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2096    if (Restores[i].index == index && Restores[i].vreg)
2097      Restores[i].index = LiveIndex();
2098}
2099
2100/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2101/// spilled and create empty intervals for their uses.
2102void
2103LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2104                                    const TargetRegisterClass* rc,
2105                                    std::vector<LiveInterval*> &NewLIs) {
2106  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2107         re = mri_->reg_end(); ri != re; ) {
2108    MachineOperand &O = ri.getOperand();
2109    MachineInstr *MI = &*ri;
2110    ++ri;
2111    if (O.isDef()) {
2112      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2113             "Register def was not rewritten?");
2114      RemoveMachineInstrFromMaps(MI);
2115      vrm.RemoveMachineInstrFromMaps(MI);
2116      MI->eraseFromParent();
2117    } else {
2118      // This must be an use of an implicit_def so it's not part of the live
2119      // interval. Create a new empty live interval for it.
2120      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2121      unsigned NewVReg = mri_->createVirtualRegister(rc);
2122      vrm.grow();
2123      vrm.setIsImplicitlyDefined(NewVReg);
2124      NewLIs.push_back(&getOrCreateInterval(NewVReg));
2125      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2126        MachineOperand &MO = MI->getOperand(i);
2127        if (MO.isReg() && MO.getReg() == li.reg) {
2128          MO.setReg(NewVReg);
2129          MO.setIsUndef();
2130        }
2131      }
2132    }
2133  }
2134}
2135
2136std::vector<LiveInterval*> LiveIntervals::
2137addIntervalsForSpillsFast(const LiveInterval &li,
2138                          const MachineLoopInfo *loopInfo,
2139                          VirtRegMap &vrm) {
2140  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2141
2142  std::vector<LiveInterval*> added;
2143
2144  assert(li.weight != HUGE_VALF &&
2145         "attempt to spill already spilled interval!");
2146
2147  DEBUG({
2148      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2149      li.dump();
2150      errs() << '\n';
2151    });
2152
2153  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2154
2155  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2156  while (RI != mri_->reg_end()) {
2157    MachineInstr* MI = &*RI;
2158
2159    SmallVector<unsigned, 2> Indices;
2160    bool HasUse = false;
2161    bool HasDef = false;
2162
2163    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2164      MachineOperand& mop = MI->getOperand(i);
2165      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2166
2167      HasUse |= MI->getOperand(i).isUse();
2168      HasDef |= MI->getOperand(i).isDef();
2169
2170      Indices.push_back(i);
2171    }
2172
2173    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2174                              Indices, true, slot, li.reg)) {
2175      unsigned NewVReg = mri_->createVirtualRegister(rc);
2176      vrm.grow();
2177      vrm.assignVirt2StackSlot(NewVReg, slot);
2178
2179      // create a new register for this spill
2180      LiveInterval &nI = getOrCreateInterval(NewVReg);
2181
2182      // the spill weight is now infinity as it
2183      // cannot be spilled again
2184      nI.weight = HUGE_VALF;
2185
2186      // Rewrite register operands to use the new vreg.
2187      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2188           E = Indices.end(); I != E; ++I) {
2189        MI->getOperand(*I).setReg(NewVReg);
2190
2191        if (MI->getOperand(*I).isUse())
2192          MI->getOperand(*I).setIsKill(true);
2193      }
2194
2195      // Fill in  the new live interval.
2196      LiveIndex index = getInstructionIndex(MI);
2197      if (HasUse) {
2198        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2199                     nI.getNextValue(LiveIndex(), 0, false,
2200                                     getVNInfoAllocator()));
2201        DEBUG(errs() << " +" << LR);
2202        nI.addRange(LR);
2203        vrm.addRestorePoint(NewVReg, MI);
2204      }
2205      if (HasDef) {
2206        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2207                     nI.getNextValue(LiveIndex(), 0, false,
2208                                     getVNInfoAllocator()));
2209        DEBUG(errs() << " +" << LR);
2210        nI.addRange(LR);
2211        vrm.addSpillPoint(NewVReg, true, MI);
2212      }
2213
2214      added.push_back(&nI);
2215
2216      DEBUG({
2217          errs() << "\t\t\t\tadded new interval: ";
2218          nI.dump();
2219          errs() << '\n';
2220        });
2221    }
2222
2223
2224    RI = mri_->reg_begin(li.reg);
2225  }
2226
2227  return added;
2228}
2229
2230std::vector<LiveInterval*> LiveIntervals::
2231addIntervalsForSpills(const LiveInterval &li,
2232                      SmallVectorImpl<LiveInterval*> &SpillIs,
2233                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2234
2235  if (EnableFastSpilling)
2236    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2237
2238  assert(li.weight != HUGE_VALF &&
2239         "attempt to spill already spilled interval!");
2240
2241  DEBUG({
2242      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2243      li.print(errs(), tri_);
2244      errs() << '\n';
2245    });
2246
2247  // Each bit specify whether a spill is required in the MBB.
2248  BitVector SpillMBBs(mf_->getNumBlockIDs());
2249  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2250  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2251  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2252  DenseMap<unsigned,unsigned> MBBVRegsMap;
2253  std::vector<LiveInterval*> NewLIs;
2254  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2255
2256  unsigned NumValNums = li.getNumValNums();
2257  SmallVector<MachineInstr*, 4> ReMatDefs;
2258  ReMatDefs.resize(NumValNums, NULL);
2259  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2260  ReMatOrigDefs.resize(NumValNums, NULL);
2261  SmallVector<int, 4> ReMatIds;
2262  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2263  BitVector ReMatDelete(NumValNums);
2264  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2265
2266  // Spilling a split live interval. It cannot be split any further. Also,
2267  // it's also guaranteed to be a single val# / range interval.
2268  if (vrm.getPreSplitReg(li.reg)) {
2269    vrm.setIsSplitFromReg(li.reg, 0);
2270    // Unset the split kill marker on the last use.
2271    LiveIndex KillIdx = vrm.getKillPoint(li.reg);
2272    if (KillIdx != LiveIndex()) {
2273      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2274      assert(KillMI && "Last use disappeared?");
2275      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2276      assert(KillOp != -1 && "Last use disappeared?");
2277      KillMI->getOperand(KillOp).setIsKill(false);
2278    }
2279    vrm.removeKillPoint(li.reg);
2280    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2281    Slot = vrm.getStackSlot(li.reg);
2282    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2283    MachineInstr *ReMatDefMI = DefIsReMat ?
2284      vrm.getReMaterializedMI(li.reg) : NULL;
2285    int LdSlot = 0;
2286    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2287    bool isLoad = isLoadSS ||
2288      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2289    bool IsFirstRange = true;
2290    for (LiveInterval::Ranges::const_iterator
2291           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2292      // If this is a split live interval with multiple ranges, it means there
2293      // are two-address instructions that re-defined the value. Only the
2294      // first def can be rematerialized!
2295      if (IsFirstRange) {
2296        // Note ReMatOrigDefMI has already been deleted.
2297        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2298                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2299                             false, vrm, rc, ReMatIds, loopInfo,
2300                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2301                             MBBVRegsMap, NewLIs);
2302      } else {
2303        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2304                             Slot, 0, false, false, false,
2305                             false, vrm, rc, ReMatIds, loopInfo,
2306                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2307                             MBBVRegsMap, NewLIs);
2308      }
2309      IsFirstRange = false;
2310    }
2311
2312    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2313    return NewLIs;
2314  }
2315
2316  bool TrySplit = !intervalIsInOneMBB(li);
2317  if (TrySplit)
2318    ++numSplits;
2319  bool NeedStackSlot = false;
2320  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2321       i != e; ++i) {
2322    const VNInfo *VNI = *i;
2323    unsigned VN = VNI->id;
2324    if (VNI->isUnused())
2325      continue; // Dead val#.
2326    // Is the def for the val# rematerializable?
2327    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2328      ? getInstructionFromIndex(VNI->def) : 0;
2329    bool dummy;
2330    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2331      // Remember how to remat the def of this val#.
2332      ReMatOrigDefs[VN] = ReMatDefMI;
2333      // Original def may be modified so we have to make a copy here.
2334      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2335      CloneMIs.push_back(Clone);
2336      ReMatDefs[VN] = Clone;
2337
2338      bool CanDelete = true;
2339      if (VNI->hasPHIKill()) {
2340        // A kill is a phi node, not all of its uses can be rematerialized.
2341        // It must not be deleted.
2342        CanDelete = false;
2343        // Need a stack slot if there is any live range where uses cannot be
2344        // rematerialized.
2345        NeedStackSlot = true;
2346      }
2347      if (CanDelete)
2348        ReMatDelete.set(VN);
2349    } else {
2350      // Need a stack slot if there is any live range where uses cannot be
2351      // rematerialized.
2352      NeedStackSlot = true;
2353    }
2354  }
2355
2356  // One stack slot per live interval.
2357  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2358    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2359      Slot = vrm.assignVirt2StackSlot(li.reg);
2360
2361    // This case only occurs when the prealloc splitter has already assigned
2362    // a stack slot to this vreg.
2363    else
2364      Slot = vrm.getStackSlot(li.reg);
2365  }
2366
2367  // Create new intervals and rewrite defs and uses.
2368  for (LiveInterval::Ranges::const_iterator
2369         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2370    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2371    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2372    bool DefIsReMat = ReMatDefMI != NULL;
2373    bool CanDelete = ReMatDelete[I->valno->id];
2374    int LdSlot = 0;
2375    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2376    bool isLoad = isLoadSS ||
2377      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2378    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2379                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2380                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2381                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2382                               MBBVRegsMap, NewLIs);
2383  }
2384
2385  // Insert spills / restores if we are splitting.
2386  if (!TrySplit) {
2387    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2388    return NewLIs;
2389  }
2390
2391  SmallPtrSet<LiveInterval*, 4> AddedKill;
2392  SmallVector<unsigned, 2> Ops;
2393  if (NeedStackSlot) {
2394    int Id = SpillMBBs.find_first();
2395    while (Id != -1) {
2396      std::vector<SRInfo> &spills = SpillIdxes[Id];
2397      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2398        LiveIndex index = spills[i].index;
2399        unsigned VReg = spills[i].vreg;
2400        LiveInterval &nI = getOrCreateInterval(VReg);
2401        bool isReMat = vrm.isReMaterialized(VReg);
2402        MachineInstr *MI = getInstructionFromIndex(index);
2403        bool CanFold = false;
2404        bool FoundUse = false;
2405        Ops.clear();
2406        if (spills[i].canFold) {
2407          CanFold = true;
2408          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2409            MachineOperand &MO = MI->getOperand(j);
2410            if (!MO.isReg() || MO.getReg() != VReg)
2411              continue;
2412
2413            Ops.push_back(j);
2414            if (MO.isDef())
2415              continue;
2416            if (isReMat ||
2417                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2418                                                RestoreMBBs, RestoreIdxes))) {
2419              // MI has two-address uses of the same register. If the use
2420              // isn't the first and only use in the BB, then we can't fold
2421              // it. FIXME: Move this to rewriteInstructionsForSpills.
2422              CanFold = false;
2423              break;
2424            }
2425            FoundUse = true;
2426          }
2427        }
2428        // Fold the store into the def if possible.
2429        bool Folded = false;
2430        if (CanFold && !Ops.empty()) {
2431          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2432            Folded = true;
2433            if (FoundUse) {
2434              // Also folded uses, do not issue a load.
2435              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2436              nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2437            }
2438            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2439          }
2440        }
2441
2442        // Otherwise tell the spiller to issue a spill.
2443        if (!Folded) {
2444          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2445          bool isKill = LR->end == getStoreIndex(index);
2446          if (!MI->registerDefIsDead(nI.reg))
2447            // No need to spill a dead def.
2448            vrm.addSpillPoint(VReg, isKill, MI);
2449          if (isKill)
2450            AddedKill.insert(&nI);
2451        }
2452      }
2453      Id = SpillMBBs.find_next(Id);
2454    }
2455  }
2456
2457  int Id = RestoreMBBs.find_first();
2458  while (Id != -1) {
2459    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2460    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2461      LiveIndex index = restores[i].index;
2462      if (index == LiveIndex())
2463        continue;
2464      unsigned VReg = restores[i].vreg;
2465      LiveInterval &nI = getOrCreateInterval(VReg);
2466      bool isReMat = vrm.isReMaterialized(VReg);
2467      MachineInstr *MI = getInstructionFromIndex(index);
2468      bool CanFold = false;
2469      Ops.clear();
2470      if (restores[i].canFold) {
2471        CanFold = true;
2472        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2473          MachineOperand &MO = MI->getOperand(j);
2474          if (!MO.isReg() || MO.getReg() != VReg)
2475            continue;
2476
2477          if (MO.isDef()) {
2478            // If this restore were to be folded, it would have been folded
2479            // already.
2480            CanFold = false;
2481            break;
2482          }
2483          Ops.push_back(j);
2484        }
2485      }
2486
2487      // Fold the load into the use if possible.
2488      bool Folded = false;
2489      if (CanFold && !Ops.empty()) {
2490        if (!isReMat)
2491          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2492        else {
2493          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2494          int LdSlot = 0;
2495          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2496          // If the rematerializable def is a load, also try to fold it.
2497          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2498            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2499                                          Ops, isLoadSS, LdSlot, VReg);
2500          if (!Folded) {
2501            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2502            if (ImpUse) {
2503              // Re-matting an instruction with virtual register use. Add the
2504              // register as an implicit use on the use MI and update the register
2505              // interval's spill weight to HUGE_VALF to prevent it from being
2506              // spilled.
2507              LiveInterval &ImpLi = getInterval(ImpUse);
2508              ImpLi.weight = HUGE_VALF;
2509              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2510            }
2511          }
2512        }
2513      }
2514      // If folding is not possible / failed, then tell the spiller to issue a
2515      // load / rematerialization for us.
2516      if (Folded)
2517        nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2518      else
2519        vrm.addRestorePoint(VReg, MI);
2520    }
2521    Id = RestoreMBBs.find_next(Id);
2522  }
2523
2524  // Finalize intervals: add kills, finalize spill weights, and filter out
2525  // dead intervals.
2526  std::vector<LiveInterval*> RetNewLIs;
2527  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2528    LiveInterval *LI = NewLIs[i];
2529    if (!LI->empty()) {
2530      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2531      if (!AddedKill.count(LI)) {
2532        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2533        LiveIndex LastUseIdx = getBaseIndex(LR->end);
2534        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2535        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2536        assert(UseIdx != -1);
2537        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2538          LastUse->getOperand(UseIdx).setIsKill();
2539          vrm.addKillPoint(LI->reg, LastUseIdx);
2540        }
2541      }
2542      RetNewLIs.push_back(LI);
2543    }
2544  }
2545
2546  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2547  return RetNewLIs;
2548}
2549
2550/// hasAllocatableSuperReg - Return true if the specified physical register has
2551/// any super register that's allocatable.
2552bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2553  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2554    if (allocatableRegs_[*AS] && hasInterval(*AS))
2555      return true;
2556  return false;
2557}
2558
2559/// getRepresentativeReg - Find the largest super register of the specified
2560/// physical register.
2561unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2562  // Find the largest super-register that is allocatable.
2563  unsigned BestReg = Reg;
2564  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2565    unsigned SuperReg = *AS;
2566    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2567      BestReg = SuperReg;
2568      break;
2569    }
2570  }
2571  return BestReg;
2572}
2573
2574/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2575/// specified interval that conflicts with the specified physical register.
2576unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2577                                                   unsigned PhysReg) const {
2578  unsigned NumConflicts = 0;
2579  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2580  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2581         E = mri_->reg_end(); I != E; ++I) {
2582    MachineOperand &O = I.getOperand();
2583    MachineInstr *MI = O.getParent();
2584    LiveIndex Index = getInstructionIndex(MI);
2585    if (pli.liveAt(Index))
2586      ++NumConflicts;
2587  }
2588  return NumConflicts;
2589}
2590
2591/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2592/// around all defs and uses of the specified interval. Return true if it
2593/// was able to cut its interval.
2594bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2595                                            unsigned PhysReg, VirtRegMap &vrm) {
2596  unsigned SpillReg = getRepresentativeReg(PhysReg);
2597
2598  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2599    // If there are registers which alias PhysReg, but which are not a
2600    // sub-register of the chosen representative super register. Assert
2601    // since we can't handle it yet.
2602    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2603           tri_->isSuperRegister(*AS, SpillReg));
2604
2605  bool Cut = false;
2606  LiveInterval &pli = getInterval(SpillReg);
2607  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2608  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2609         E = mri_->reg_end(); I != E; ++I) {
2610    MachineOperand &O = I.getOperand();
2611    MachineInstr *MI = O.getParent();
2612    if (SeenMIs.count(MI))
2613      continue;
2614    SeenMIs.insert(MI);
2615    LiveIndex Index = getInstructionIndex(MI);
2616    if (pli.liveAt(Index)) {
2617      vrm.addEmergencySpill(SpillReg, MI);
2618      LiveIndex StartIdx = getLoadIndex(Index);
2619      LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
2620      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2621        pli.removeRange(StartIdx, EndIdx);
2622        Cut = true;
2623      } else {
2624        std::string msg;
2625        raw_string_ostream Msg(msg);
2626        Msg << "Ran out of registers during register allocation!";
2627        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2628          Msg << "\nPlease check your inline asm statement for invalid "
2629               << "constraints:\n";
2630          MI->print(Msg, tm_);
2631        }
2632        llvm_report_error(Msg.str());
2633      }
2634      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2635        if (!hasInterval(*AS))
2636          continue;
2637        LiveInterval &spli = getInterval(*AS);
2638        if (spli.liveAt(Index))
2639          spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2640      }
2641    }
2642  }
2643  return Cut;
2644}
2645
2646LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2647                                                  MachineInstr* startInst) {
2648  LiveInterval& Interval = getOrCreateInterval(reg);
2649  VNInfo* VN = Interval.getNextValue(
2650    LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2651    startInst, true, getVNInfoAllocator());
2652  VN->setHasPHIKill(true);
2653  VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2654  LiveRange LR(
2655    LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
2656    getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2657  Interval.addRange(LR);
2658
2659  return LR;
2660}
2661
2662