LiveIntervalAnalysis.cpp revision d129d73b725e32d2fc0c87f392c239db8801e922
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/DepthFirstIterator.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/ADT/STLExtras.h"
43#include <algorithm>
44#include <limits>
45#include <cmath>
46using namespace llvm;
47
48// Hidden options for help debugging.
49static cl::opt<bool> DisableReMat("disable-rematerialization",
50                                  cl::init(false), cl::Hidden);
51
52static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53                               cl::init(true), cl::Hidden);
54static cl::opt<int> SplitLimit("split-limit",
55                               cl::init(-1), cl::Hidden);
56
57static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
58
59static cl::opt<bool> EnableFastSpilling("fast-spill",
60                                        cl::init(false), cl::Hidden);
61
62STATISTIC(numIntervals, "Number of original intervals");
63STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits   , "Number of intervals split");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.addRequired<AliasAnalysis>();
71  AU.addPreserved<AliasAnalysis>();
72  AU.addPreserved<LiveVariables>();
73  AU.addRequired<LiveVariables>();
74  AU.addPreservedID(MachineLoopInfoID);
75  AU.addPreservedID(MachineDominatorsID);
76
77  if (!StrongPHIElim) {
78    AU.addPreservedID(PHIEliminationID);
79    AU.addRequiredID(PHIEliminationID);
80  }
81
82  AU.addRequiredID(TwoAddressInstructionPassID);
83  MachineFunctionPass::getAnalysisUsage(AU);
84}
85
86void LiveIntervals::releaseMemory() {
87  // Free the live intervals themselves.
88  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89       E = r2iMap_.end(); I != E; ++I)
90    delete I->second;
91
92  MBB2IdxMap.clear();
93  Idx2MBBMap.clear();
94  mi2iMap_.clear();
95  i2miMap_.clear();
96  r2iMap_.clear();
97  terminatorGaps.clear();
98
99  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100  VNInfoAllocator.Reset();
101  while (!ClonedMIs.empty()) {
102    MachineInstr *MI = ClonedMIs.back();
103    ClonedMIs.pop_back();
104    mf_->DeleteMachineInstr(MI);
105  }
106}
107
108/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109/// there is one implicit_def for each use. Add isUndef marker to
110/// implicit_def defs and their uses.
111void LiveIntervals::processImplicitDefs() {
112  SmallSet<unsigned, 8> ImpDefRegs;
113  SmallVector<MachineInstr*, 8> ImpDefMIs;
114  MachineBasicBlock *Entry = mf_->begin();
115  SmallPtrSet<MachineBasicBlock*,16> Visited;
116  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
118       DFI != E; ++DFI) {
119    MachineBasicBlock *MBB = *DFI;
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ) {
122      MachineInstr *MI = &*I;
123      ++I;
124      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125        unsigned Reg = MI->getOperand(0).getReg();
126        ImpDefRegs.insert(Reg);
127        ImpDefMIs.push_back(MI);
128        continue;
129      }
130
131      bool ChangedToImpDef = false;
132      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
133        MachineOperand& MO = MI->getOperand(i);
134        if (!MO.isReg() || !MO.isUse())
135          continue;
136        unsigned Reg = MO.getReg();
137        if (!Reg)
138          continue;
139        if (!ImpDefRegs.count(Reg))
140          continue;
141        // Use is a copy, just turn it into an implicit_def.
142        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
143        if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
144            Reg == SrcReg) {
145          bool isKill = MO.isKill();
146          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
147          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
148            MI->RemoveOperand(j);
149          if (isKill)
150            ImpDefRegs.erase(Reg);
151          ChangedToImpDef = true;
152          break;
153        }
154
155        MO.setIsUndef();
156        if (MO.isKill() || MI->isRegTiedToDefOperand(i))
157          ImpDefRegs.erase(Reg);
158      }
159
160      if (ChangedToImpDef) {
161        // Backtrack to process this new implicit_def.
162        --I;
163      } else {
164        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
165          MachineOperand& MO = MI->getOperand(i);
166          if (!MO.isReg() || !MO.isDef())
167            continue;
168          ImpDefRegs.erase(MO.getReg());
169        }
170      }
171    }
172
173    // Any outstanding liveout implicit_def's?
174    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
175      MachineInstr *MI = ImpDefMIs[i];
176      unsigned Reg = MI->getOperand(0).getReg();
177      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
178          !ImpDefRegs.count(Reg)) {
179        // Delete all "local" implicit_def's. That include those which define
180        // physical registers since they cannot be liveout.
181        MI->eraseFromParent();
182        continue;
183      }
184
185      // If there are multiple defs of the same register and at least one
186      // is not an implicit_def, do not insert implicit_def's before the
187      // uses.
188      bool Skip = false;
189      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
190             DE = mri_->def_end(); DI != DE; ++DI) {
191        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
192          Skip = true;
193          break;
194        }
195      }
196      if (Skip)
197        continue;
198
199      // The only implicit_def which we want to keep are those that are live
200      // out of its block.
201      MI->eraseFromParent();
202
203      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
204             UE = mri_->use_end(); UI != UE; ) {
205        MachineOperand &RMO = UI.getOperand();
206        MachineInstr *RMI = &*UI;
207        ++UI;
208        MachineBasicBlock *RMBB = RMI->getParent();
209        if (RMBB == MBB)
210          continue;
211
212        // Turn a copy use into an implicit_def.
213        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
214        if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
215            Reg == SrcReg) {
216          RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
217          for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
218            RMI->RemoveOperand(j);
219          continue;
220        }
221
222        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
223        unsigned NewVReg = mri_->createVirtualRegister(RC);
224        RMO.setReg(NewVReg);
225        RMO.setIsUndef();
226        RMO.setIsKill();
227      }
228    }
229    ImpDefRegs.clear();
230    ImpDefMIs.clear();
231  }
232}
233
234void LiveIntervals::computeNumbering() {
235  Index2MiMap OldI2MI = i2miMap_;
236  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
237
238  Idx2MBBMap.clear();
239  MBB2IdxMap.clear();
240  mi2iMap_.clear();
241  i2miMap_.clear();
242  terminatorGaps.clear();
243
244  FunctionSize = 0;
245
246  // Number MachineInstrs and MachineBasicBlocks.
247  // Initialize MBB indexes to a sentinal.
248  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
249
250  unsigned MIIndex = 0;
251  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
252       MBB != E; ++MBB) {
253    unsigned StartIdx = MIIndex;
254
255    // Insert an empty slot at the beginning of each block.
256    MIIndex += InstrSlots::NUM;
257    i2miMap_.push_back(0);
258
259    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
260         I != E; ++I) {
261
262      if (I == MBB->getFirstTerminator()) {
263        // Leave a gap for before terminators, this is where we will point
264        // PHI kills.
265        bool inserted =
266          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
267        assert(inserted &&
268               "Multiple 'first' terminators encountered during numbering.");
269        inserted = inserted; // Avoid compiler warning if assertions turned off.
270        i2miMap_.push_back(0);
271
272        MIIndex += InstrSlots::NUM;
273      }
274
275      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
276      assert(inserted && "multiple MachineInstr -> index mappings");
277      inserted = true;
278      i2miMap_.push_back(I);
279      MIIndex += InstrSlots::NUM;
280      FunctionSize++;
281
282      // Insert max(1, numdefs) empty slots after every instruction.
283      unsigned Slots = I->getDesc().getNumDefs();
284      if (Slots == 0)
285        Slots = 1;
286      MIIndex += InstrSlots::NUM * Slots;
287      while (Slots--)
288        i2miMap_.push_back(0);
289    }
290
291    if (MBB->getFirstTerminator() == MBB->end()) {
292      // Leave a gap for before terminators, this is where we will point
293      // PHI kills.
294      bool inserted =
295        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
296      assert(inserted &&
297             "Multiple 'first' terminators encountered during numbering.");
298      inserted = inserted; // Avoid compiler warning if assertions turned off.
299      i2miMap_.push_back(0);
300
301      MIIndex += InstrSlots::NUM;
302    }
303
304    // Set the MBB2IdxMap entry for this MBB.
305    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
306    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
307  }
308
309  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
310
311  if (!OldI2MI.empty())
312    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
313      for (LiveInterval::iterator LI = OI->second->begin(),
314           LE = OI->second->end(); LI != LE; ++LI) {
315
316        // Remap the start index of the live range to the corresponding new
317        // number, or our best guess at what it _should_ correspond to if the
318        // original instruction has been erased.  This is either the following
319        // instruction or its predecessor.
320        unsigned index = LI->start / InstrSlots::NUM;
321        unsigned offset = LI->start % InstrSlots::NUM;
322        if (offset == InstrSlots::LOAD) {
323          std::vector<IdxMBBPair>::const_iterator I =
324                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
325          // Take the pair containing the index
326          std::vector<IdxMBBPair>::const_iterator J =
327                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
328
329          LI->start = getMBBStartIdx(J->second);
330        } else {
331          LI->start = mi2iMap_[OldI2MI[index]] + offset;
332        }
333
334        // Remap the ending index in the same way that we remapped the start,
335        // except for the final step where we always map to the immediately
336        // following instruction.
337        index = (LI->end - 1) / InstrSlots::NUM;
338        offset  = LI->end % InstrSlots::NUM;
339        if (offset == InstrSlots::LOAD) {
340          // VReg dies at end of block.
341          std::vector<IdxMBBPair>::const_iterator I =
342                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
343          --I;
344
345          LI->end = getMBBEndIdx(I->second) + 1;
346        } else {
347          unsigned idx = index;
348          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
349
350          if (index != OldI2MI.size())
351            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
352          else
353            LI->end = InstrSlots::NUM * i2miMap_.size();
354        }
355      }
356
357      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
358           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
359        VNInfo* vni = *VNI;
360
361        // Remap the VNInfo def index, which works the same as the
362        // start indices above. VN's with special sentinel defs
363        // don't need to be remapped.
364        if (vni->isDefAccurate() && !vni->isUnused()) {
365          unsigned index = vni->def / InstrSlots::NUM;
366          unsigned offset = vni->def % InstrSlots::NUM;
367          if (offset == InstrSlots::LOAD) {
368            std::vector<IdxMBBPair>::const_iterator I =
369                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
370            // Take the pair containing the index
371            std::vector<IdxMBBPair>::const_iterator J =
372                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
373
374            vni->def = getMBBStartIdx(J->second);
375          } else {
376            vni->def = mi2iMap_[OldI2MI[index]] + offset;
377          }
378        }
379
380        // Remap the VNInfo kill indices, which works the same as
381        // the end indices above.
382        for (size_t i = 0; i < vni->kills.size(); ++i) {
383          unsigned killIdx = vni->kills[i].killIdx;
384
385          unsigned index = (killIdx - 1) / InstrSlots::NUM;
386          unsigned offset = killIdx % InstrSlots::NUM;
387
388          if (offset == InstrSlots::LOAD) {
389            assert("Value killed at a load slot.");
390            /*std::vector<IdxMBBPair>::const_iterator I =
391             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
392            --I;
393
394            vni->kills[i] = getMBBEndIdx(I->second);*/
395          } else {
396            if (vni->kills[i].isPHIKill) {
397              std::vector<IdxMBBPair>::const_iterator I =
398                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
399              --I;
400              vni->kills[i].killIdx = terminatorGaps[I->second];
401            } else {
402              assert(OldI2MI[index] != 0 &&
403                     "Kill refers to instruction not present in index maps.");
404              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
405            }
406
407            /*
408            unsigned idx = index;
409            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
410
411            if (index != OldI2MI.size())
412              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
413                              (idx == index ? offset : 0);
414            else
415              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
416            */
417          }
418        }
419      }
420    }
421}
422
423void LiveIntervals::scaleNumbering(int factor) {
424  // Need to
425  //  * scale MBB begin and end points
426  //  * scale all ranges.
427  //  * Update VNI structures.
428  //  * Scale instruction numberings
429
430  // Scale the MBB indices.
431  Idx2MBBMap.clear();
432  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
433       MBB != MBBE; ++MBB) {
434    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
435    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
436    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
437    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
438  }
439  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
440
441  // Scale terminator gaps.
442  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
443       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
444       TGI != TGE; ++TGI) {
445    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
446  }
447
448  // Scale the intervals.
449  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
450    LI->second->scaleNumbering(factor);
451  }
452
453  // Scale MachineInstrs.
454  Mi2IndexMap oldmi2iMap = mi2iMap_;
455  unsigned highestSlot = 0;
456  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
457       MI != ME; ++MI) {
458    unsigned newSlot = InstrSlots::scale(MI->second, factor);
459    mi2iMap_[MI->first] = newSlot;
460    highestSlot = std::max(highestSlot, newSlot);
461  }
462
463  i2miMap_.clear();
464  i2miMap_.resize(highestSlot + 1);
465  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
466       MI != ME; ++MI) {
467    i2miMap_[MI->second] = MI->first;
468  }
469
470}
471
472
473/// runOnMachineFunction - Register allocate the whole function
474///
475bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
476  mf_ = &fn;
477  mri_ = &mf_->getRegInfo();
478  tm_ = &fn.getTarget();
479  tri_ = tm_->getRegisterInfo();
480  tii_ = tm_->getInstrInfo();
481  aa_ = &getAnalysis<AliasAnalysis>();
482  lv_ = &getAnalysis<LiveVariables>();
483  allocatableRegs_ = tri_->getAllocatableSet(fn);
484
485  processImplicitDefs();
486  computeNumbering();
487  computeIntervals();
488
489  numIntervals += getNumIntervals();
490
491  DEBUG(dump());
492  return true;
493}
494
495/// print - Implement the dump method.
496void LiveIntervals::print(std::ostream &O, const Module* ) const {
497  O << "********** INTERVALS **********\n";
498  for (const_iterator I = begin(), E = end(); I != E; ++I) {
499    I->second->print(O, tri_);
500    O << "\n";
501  }
502
503  O << "********** MACHINEINSTRS **********\n";
504  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
505       mbbi != mbbe; ++mbbi) {
506    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
507    for (MachineBasicBlock::iterator mii = mbbi->begin(),
508           mie = mbbi->end(); mii != mie; ++mii) {
509      O << getInstructionIndex(mii) << '\t' << *mii;
510    }
511  }
512}
513
514/// conflictsWithPhysRegDef - Returns true if the specified register
515/// is defined during the duration of the specified interval.
516bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
517                                            VirtRegMap &vrm, unsigned reg) {
518  for (LiveInterval::Ranges::const_iterator
519         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
520    for (unsigned index = getBaseIndex(I->start),
521           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
522         index += InstrSlots::NUM) {
523      // skip deleted instructions
524      while (index != end && !getInstructionFromIndex(index))
525        index += InstrSlots::NUM;
526      if (index == end) break;
527
528      MachineInstr *MI = getInstructionFromIndex(index);
529      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
530      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
531        if (SrcReg == li.reg || DstReg == li.reg)
532          continue;
533      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
534        MachineOperand& mop = MI->getOperand(i);
535        if (!mop.isReg())
536          continue;
537        unsigned PhysReg = mop.getReg();
538        if (PhysReg == 0 || PhysReg == li.reg)
539          continue;
540        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
541          if (!vrm.hasPhys(PhysReg))
542            continue;
543          PhysReg = vrm.getPhys(PhysReg);
544        }
545        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
546          return true;
547      }
548    }
549  }
550
551  return false;
552}
553
554/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
555/// it can check use as well.
556bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
557                                            unsigned Reg, bool CheckUse,
558                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
559  for (LiveInterval::Ranges::const_iterator
560         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
561    for (unsigned index = getBaseIndex(I->start),
562           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
563         index += InstrSlots::NUM) {
564      // Skip deleted instructions.
565      MachineInstr *MI = 0;
566      while (index != end) {
567        MI = getInstructionFromIndex(index);
568        if (MI)
569          break;
570        index += InstrSlots::NUM;
571      }
572      if (index == end) break;
573
574      if (JoinedCopies.count(MI))
575        continue;
576      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
577        MachineOperand& MO = MI->getOperand(i);
578        if (!MO.isReg())
579          continue;
580        if (MO.isUse() && !CheckUse)
581          continue;
582        unsigned PhysReg = MO.getReg();
583        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
584          continue;
585        if (tri_->isSubRegister(Reg, PhysReg))
586          return true;
587      }
588    }
589  }
590
591  return false;
592}
593
594
595void LiveIntervals::printRegName(unsigned reg) const {
596  if (TargetRegisterInfo::isPhysicalRegister(reg))
597    cerr << tri_->getName(reg);
598  else
599    cerr << "%reg" << reg;
600}
601
602void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
603                                             MachineBasicBlock::iterator mi,
604                                             unsigned MIIdx, MachineOperand& MO,
605                                             unsigned MOIdx,
606                                             LiveInterval &interval) {
607  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
608
609  // Virtual registers may be defined multiple times (due to phi
610  // elimination and 2-addr elimination).  Much of what we do only has to be
611  // done once for the vreg.  We use an empty interval to detect the first
612  // time we see a vreg.
613  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
614  if (interval.empty()) {
615    // Get the Idx of the defining instructions.
616    unsigned defIndex = getDefIndex(MIIdx);
617    // Earlyclobbers move back one.
618    if (MO.isEarlyClobber())
619      defIndex = getUseIndex(MIIdx);
620    VNInfo *ValNo;
621    MachineInstr *CopyMI = NULL;
622    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
623    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
624        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
625        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
626        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
627      CopyMI = mi;
628    // Earlyclobbers move back one.
629    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
630
631    assert(ValNo->id == 0 && "First value in interval is not 0?");
632
633    // Loop over all of the blocks that the vreg is defined in.  There are
634    // two cases we have to handle here.  The most common case is a vreg
635    // whose lifetime is contained within a basic block.  In this case there
636    // will be a single kill, in MBB, which comes after the definition.
637    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
638      // FIXME: what about dead vars?
639      unsigned killIdx;
640      if (vi.Kills[0] != mi)
641        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
642      else
643        killIdx = defIndex+1;
644
645      // If the kill happens after the definition, we have an intra-block
646      // live range.
647      if (killIdx > defIndex) {
648        assert(vi.AliveBlocks.empty() &&
649               "Shouldn't be alive across any blocks!");
650        LiveRange LR(defIndex, killIdx, ValNo);
651        interval.addRange(LR);
652        DOUT << " +" << LR << "\n";
653        interval.addKill(ValNo, killIdx, false);
654        return;
655      }
656    }
657
658    // The other case we handle is when a virtual register lives to the end
659    // of the defining block, potentially live across some blocks, then is
660    // live into some number of blocks, but gets killed.  Start by adding a
661    // range that goes from this definition to the end of the defining block.
662    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
663    DOUT << " +" << NewLR;
664    interval.addRange(NewLR);
665
666    // Iterate over all of the blocks that the variable is completely
667    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
668    // live interval.
669    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
670             E = vi.AliveBlocks.end(); I != E; ++I) {
671      LiveRange LR(getMBBStartIdx(*I),
672                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
673                   ValNo);
674      interval.addRange(LR);
675      DOUT << " +" << LR;
676    }
677
678    // Finally, this virtual register is live from the start of any killing
679    // block to the 'use' slot of the killing instruction.
680    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
681      MachineInstr *Kill = vi.Kills[i];
682      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
683      LiveRange LR(getMBBStartIdx(Kill->getParent()),
684                   killIdx, ValNo);
685      interval.addRange(LR);
686      interval.addKill(ValNo, killIdx, false);
687      DOUT << " +" << LR;
688    }
689
690  } else {
691    // If this is the second time we see a virtual register definition, it
692    // must be due to phi elimination or two addr elimination.  If this is
693    // the result of two address elimination, then the vreg is one of the
694    // def-and-use register operand.
695    if (mi->isRegTiedToUseOperand(MOIdx)) {
696      // If this is a two-address definition, then we have already processed
697      // the live range.  The only problem is that we didn't realize there
698      // are actually two values in the live interval.  Because of this we
699      // need to take the LiveRegion that defines this register and split it
700      // into two values.
701      assert(interval.containsOneValue());
702      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
703      unsigned RedefIndex = getDefIndex(MIIdx);
704      if (MO.isEarlyClobber())
705        RedefIndex = getUseIndex(MIIdx);
706
707      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
708      VNInfo *OldValNo = OldLR->valno;
709
710      // Delete the initial value, which should be short and continuous,
711      // because the 2-addr copy must be in the same MBB as the redef.
712      interval.removeRange(DefIndex, RedefIndex);
713
714      // Two-address vregs should always only be redefined once.  This means
715      // that at this point, there should be exactly one value number in it.
716      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
717
718      // The new value number (#1) is defined by the instruction we claimed
719      // defined value #0.
720      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
721                                            false, // update at *
722                                            VNInfoAllocator);
723      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
724
725      // Value#0 is now defined by the 2-addr instruction.
726      OldValNo->def  = RedefIndex;
727      OldValNo->copy = 0;
728      if (MO.isEarlyClobber())
729        OldValNo->setHasRedefByEC(true);
730
731      // Add the new live interval which replaces the range for the input copy.
732      LiveRange LR(DefIndex, RedefIndex, ValNo);
733      DOUT << " replace range with " << LR;
734      interval.addRange(LR);
735      interval.addKill(ValNo, RedefIndex, false);
736
737      // If this redefinition is dead, we need to add a dummy unit live
738      // range covering the def slot.
739      if (MO.isDead())
740        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
741
742      DOUT << " RESULT: ";
743      interval.print(DOUT, tri_);
744
745    } else {
746      // Otherwise, this must be because of phi elimination.  If this is the
747      // first redefinition of the vreg that we have seen, go back and change
748      // the live range in the PHI block to be a different value number.
749      if (interval.containsOneValue()) {
750        assert(vi.Kills.size() == 1 &&
751               "PHI elimination vreg should have one kill, the PHI itself!");
752
753        // Remove the old range that we now know has an incorrect number.
754        VNInfo *VNI = interval.getValNumInfo(0);
755        MachineInstr *Killer = vi.Kills[0];
756        unsigned Start = getMBBStartIdx(Killer->getParent());
757        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
758        DOUT << " Removing [" << Start << "," << End << "] from: ";
759        interval.print(DOUT, tri_); DOUT << "\n";
760        interval.removeRange(Start, End);
761        assert(interval.ranges.size() == 1 &&
762               "newly discovered PHI interval has >1 ranges.");
763        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
764        interval.addKill(VNI, terminatorGaps[killMBB], true);
765        VNI->setHasPHIKill(true);
766        DOUT << " RESULT: "; interval.print(DOUT, tri_);
767
768        // Replace the interval with one of a NEW value number.  Note that this
769        // value number isn't actually defined by an instruction, weird huh? :)
770        LiveRange LR(Start, End,
771          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
772        LR.valno->setIsPHIDef(true);
773        DOUT << " replace range with " << LR;
774        interval.addRange(LR);
775        interval.addKill(LR.valno, End, false);
776        DOUT << " RESULT: "; interval.print(DOUT, tri_);
777      }
778
779      // In the case of PHI elimination, each variable definition is only
780      // live until the end of the block.  We've already taken care of the
781      // rest of the live range.
782      unsigned defIndex = getDefIndex(MIIdx);
783      if (MO.isEarlyClobber())
784        defIndex = getUseIndex(MIIdx);
785
786      VNInfo *ValNo;
787      MachineInstr *CopyMI = NULL;
788      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
789      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
790          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
791          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
792          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
793        CopyMI = mi;
794      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
795
796      unsigned killIndex = getMBBEndIdx(mbb) + 1;
797      LiveRange LR(defIndex, killIndex, ValNo);
798      interval.addRange(LR);
799      interval.addKill(ValNo, terminatorGaps[mbb], true);
800      ValNo->setHasPHIKill(true);
801      DOUT << " +" << LR;
802    }
803  }
804
805  DOUT << '\n';
806}
807
808void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
809                                              MachineBasicBlock::iterator mi,
810                                              unsigned MIIdx,
811                                              MachineOperand& MO,
812                                              LiveInterval &interval,
813                                              MachineInstr *CopyMI) {
814  // A physical register cannot be live across basic block, so its
815  // lifetime must end somewhere in its defining basic block.
816  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
817
818  unsigned baseIndex = MIIdx;
819  unsigned start = getDefIndex(baseIndex);
820  // Earlyclobbers move back one.
821  if (MO.isEarlyClobber())
822    start = getUseIndex(MIIdx);
823  unsigned end = start;
824
825  // If it is not used after definition, it is considered dead at
826  // the instruction defining it. Hence its interval is:
827  // [defSlot(def), defSlot(def)+1)
828  if (MO.isDead()) {
829    DOUT << " dead";
830    end = start + 1;
831    goto exit;
832  }
833
834  // If it is not dead on definition, it must be killed by a
835  // subsequent instruction. Hence its interval is:
836  // [defSlot(def), useSlot(kill)+1)
837  baseIndex += InstrSlots::NUM;
838  while (++mi != MBB->end()) {
839    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
840           getInstructionFromIndex(baseIndex) == 0)
841      baseIndex += InstrSlots::NUM;
842    if (mi->killsRegister(interval.reg, tri_)) {
843      DOUT << " killed";
844      end = getUseIndex(baseIndex) + 1;
845      goto exit;
846    } else {
847      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
848      if (DefIdx != -1) {
849        if (mi->isRegTiedToUseOperand(DefIdx)) {
850          // Two-address instruction.
851          end = getDefIndex(baseIndex);
852          if (mi->getOperand(DefIdx).isEarlyClobber())
853            end = getUseIndex(baseIndex);
854        } else {
855          // Another instruction redefines the register before it is ever read.
856          // Then the register is essentially dead at the instruction that defines
857          // it. Hence its interval is:
858          // [defSlot(def), defSlot(def)+1)
859          DOUT << " dead";
860          end = start + 1;
861        }
862        goto exit;
863      }
864    }
865
866    baseIndex += InstrSlots::NUM;
867  }
868
869  // The only case we should have a dead physreg here without a killing or
870  // instruction where we know it's dead is if it is live-in to the function
871  // and never used. Another possible case is the implicit use of the
872  // physical register has been deleted by two-address pass.
873  end = start + 1;
874
875exit:
876  assert(start < end && "did not find end of interval?");
877
878  // Already exists? Extend old live interval.
879  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
880  bool Extend = OldLR != interval.end();
881  VNInfo *ValNo = Extend
882    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
883  if (MO.isEarlyClobber() && Extend)
884    ValNo->setHasRedefByEC(true);
885  LiveRange LR(start, end, ValNo);
886  interval.addRange(LR);
887  interval.addKill(LR.valno, end, false);
888  DOUT << " +" << LR << '\n';
889}
890
891void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
892                                      MachineBasicBlock::iterator MI,
893                                      unsigned MIIdx,
894                                      MachineOperand& MO,
895                                      unsigned MOIdx) {
896  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
897    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
898                             getOrCreateInterval(MO.getReg()));
899  else if (allocatableRegs_[MO.getReg()]) {
900    MachineInstr *CopyMI = NULL;
901    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
902    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
903        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
904        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
905        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
906      CopyMI = MI;
907    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908                              getOrCreateInterval(MO.getReg()), CopyMI);
909    // Def of a register also defines its sub-registers.
910    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
911      // If MI also modifies the sub-register explicitly, avoid processing it
912      // more than once. Do not pass in TRI here so it checks for exact match.
913      if (!MI->modifiesRegister(*AS))
914        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
915                                  getOrCreateInterval(*AS), 0);
916  }
917}
918
919void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
920                                         unsigned MIIdx,
921                                         LiveInterval &interval, bool isAlias) {
922  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
923
924  // Look for kills, if it reaches a def before it's killed, then it shouldn't
925  // be considered a livein.
926  MachineBasicBlock::iterator mi = MBB->begin();
927  unsigned baseIndex = MIIdx;
928  unsigned start = baseIndex;
929  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
930         getInstructionFromIndex(baseIndex) == 0)
931    baseIndex += InstrSlots::NUM;
932  unsigned end = baseIndex;
933  bool SeenDefUse = false;
934
935  while (mi != MBB->end()) {
936    if (mi->killsRegister(interval.reg, tri_)) {
937      DOUT << " killed";
938      end = getUseIndex(baseIndex) + 1;
939      SeenDefUse = true;
940      break;
941    } else if (mi->modifiesRegister(interval.reg, tri_)) {
942      // Another instruction redefines the register before it is ever read.
943      // Then the register is essentially dead at the instruction that defines
944      // it. Hence its interval is:
945      // [defSlot(def), defSlot(def)+1)
946      DOUT << " dead";
947      end = getDefIndex(start) + 1;
948      SeenDefUse = true;
949      break;
950    }
951
952    baseIndex += InstrSlots::NUM;
953    ++mi;
954    if (mi != MBB->end()) {
955      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
956             getInstructionFromIndex(baseIndex) == 0)
957        baseIndex += InstrSlots::NUM;
958    }
959  }
960
961  // Live-in register might not be used at all.
962  if (!SeenDefUse) {
963    if (isAlias) {
964      DOUT << " dead";
965      end = getDefIndex(MIIdx) + 1;
966    } else {
967      DOUT << " live through";
968      end = baseIndex;
969    }
970  }
971
972  VNInfo *vni =
973    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
974  vni->setIsPHIDef(true);
975  LiveRange LR(start, end, vni);
976
977  interval.addRange(LR);
978  interval.addKill(LR.valno, end, false);
979  DOUT << " +" << LR << '\n';
980}
981
982/// computeIntervals - computes the live intervals for virtual
983/// registers. for some ordering of the machine instructions [1,N] a
984/// live interval is an interval [i, j) where 1 <= i <= j < N for
985/// which a variable is live
986void LiveIntervals::computeIntervals() {
987
988  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
989       << "********** Function: "
990       << ((Value*)mf_->getFunction())->getName() << '\n';
991
992  SmallVector<unsigned, 8> UndefUses;
993  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
994       MBBI != E; ++MBBI) {
995    MachineBasicBlock *MBB = MBBI;
996    // Track the index of the current machine instr.
997    unsigned MIIndex = getMBBStartIdx(MBB);
998    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
999
1000    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1001
1002    // Create intervals for live-ins to this BB first.
1003    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1004           LE = MBB->livein_end(); LI != LE; ++LI) {
1005      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1006      // Multiple live-ins can alias the same register.
1007      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1008        if (!hasInterval(*AS))
1009          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1010                               true);
1011    }
1012
1013    // Skip over empty initial indices.
1014    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1015           getInstructionFromIndex(MIIndex) == 0)
1016      MIIndex += InstrSlots::NUM;
1017
1018    for (; MI != miEnd; ++MI) {
1019      DOUT << MIIndex << "\t" << *MI;
1020
1021      // Handle defs.
1022      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1023        MachineOperand &MO = MI->getOperand(i);
1024        if (!MO.isReg() || !MO.getReg())
1025          continue;
1026
1027        // handle register defs - build intervals
1028        if (MO.isDef())
1029          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1030        else if (MO.isUndef())
1031          UndefUses.push_back(MO.getReg());
1032      }
1033
1034      // Skip over the empty slots after each instruction.
1035      unsigned Slots = MI->getDesc().getNumDefs();
1036      if (Slots == 0)
1037        Slots = 1;
1038      MIIndex += InstrSlots::NUM * Slots;
1039
1040      // Skip over empty indices.
1041      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1042             getInstructionFromIndex(MIIndex) == 0)
1043        MIIndex += InstrSlots::NUM;
1044    }
1045  }
1046
1047  // Create empty intervals for registers defined by implicit_def's (except
1048  // for those implicit_def that define values which are liveout of their
1049  // blocks.
1050  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1051    unsigned UndefReg = UndefUses[i];
1052    (void)getOrCreateInterval(UndefReg);
1053  }
1054}
1055
1056bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1057                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1058  std::vector<IdxMBBPair>::const_iterator I =
1059    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1060
1061  bool ResVal = false;
1062  while (I != Idx2MBBMap.end()) {
1063    if (I->first >= End)
1064      break;
1065    MBBs.push_back(I->second);
1066    ResVal = true;
1067    ++I;
1068  }
1069  return ResVal;
1070}
1071
1072bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1073                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1074  std::vector<IdxMBBPair>::const_iterator I =
1075    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1076
1077  bool ResVal = false;
1078  while (I != Idx2MBBMap.end()) {
1079    if (I->first > End)
1080      break;
1081    MachineBasicBlock *MBB = I->second;
1082    if (getMBBEndIdx(MBB) > End)
1083      break;
1084    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1085           SE = MBB->succ_end(); SI != SE; ++SI)
1086      MBBs.push_back(*SI);
1087    ResVal = true;
1088    ++I;
1089  }
1090  return ResVal;
1091}
1092
1093LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1094  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1095  return new LiveInterval(reg, Weight);
1096}
1097
1098/// dupInterval - Duplicate a live interval. The caller is responsible for
1099/// managing the allocated memory.
1100LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1101  LiveInterval *NewLI = createInterval(li->reg);
1102  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1103  return NewLI;
1104}
1105
1106/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1107/// copy field and returns the source register that defines it.
1108unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1109  if (!VNI->copy)
1110    return 0;
1111
1112  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1113    // If it's extracting out of a physical register, return the sub-register.
1114    unsigned Reg = VNI->copy->getOperand(1).getReg();
1115    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1116      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1117    return Reg;
1118  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1119             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1120    return VNI->copy->getOperand(2).getReg();
1121
1122  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1123  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1124    return SrcReg;
1125  llvm_unreachable("Unrecognized copy instruction!");
1126  return 0;
1127}
1128
1129//===----------------------------------------------------------------------===//
1130// Register allocator hooks.
1131//
1132
1133/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1134/// allow one) virtual register operand, then its uses are implicitly using
1135/// the register. Returns the virtual register.
1136unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1137                                            MachineInstr *MI) const {
1138  unsigned RegOp = 0;
1139  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1140    MachineOperand &MO = MI->getOperand(i);
1141    if (!MO.isReg() || !MO.isUse())
1142      continue;
1143    unsigned Reg = MO.getReg();
1144    if (Reg == 0 || Reg == li.reg)
1145      continue;
1146
1147    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1148        !allocatableRegs_[Reg])
1149      continue;
1150    // FIXME: For now, only remat MI with at most one register operand.
1151    assert(!RegOp &&
1152           "Can't rematerialize instruction with multiple register operand!");
1153    RegOp = MO.getReg();
1154#ifndef NDEBUG
1155    break;
1156#endif
1157  }
1158  return RegOp;
1159}
1160
1161/// isValNoAvailableAt - Return true if the val# of the specified interval
1162/// which reaches the given instruction also reaches the specified use index.
1163bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1164                                       unsigned UseIdx) const {
1165  unsigned Index = getInstructionIndex(MI);
1166  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1167  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1168  return UI != li.end() && UI->valno == ValNo;
1169}
1170
1171/// isReMaterializable - Returns true if the definition MI of the specified
1172/// val# of the specified interval is re-materializable.
1173bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1174                                       const VNInfo *ValNo, MachineInstr *MI,
1175                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1176                                       bool &isLoad) {
1177  if (DisableReMat)
1178    return false;
1179
1180  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1181    return true;
1182
1183  int FrameIdx = 0;
1184  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1185      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1186    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1187    // this but remember this is not safe to fold into a two-address
1188    // instruction.
1189    // This is a load from fixed stack slot. It can be rematerialized.
1190    return true;
1191
1192  // If the target-specific rules don't identify an instruction as
1193  // being trivially rematerializable, use some target-independent
1194  // rules.
1195  if (!MI->getDesc().isRematerializable() ||
1196      !tii_->isTriviallyReMaterializable(MI)) {
1197    if (!EnableAggressiveRemat)
1198      return false;
1199
1200    // If the instruction accesses memory but the memoperands have been lost,
1201    // we can't analyze it.
1202    const TargetInstrDesc &TID = MI->getDesc();
1203    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1204      return false;
1205
1206    // Avoid instructions obviously unsafe for remat.
1207    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1208      return false;
1209
1210    // If the instruction accesses memory and the memory could be non-constant,
1211    // assume the instruction is not rematerializable.
1212    for (std::list<MachineMemOperand>::const_iterator
1213           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1214      const MachineMemOperand &MMO = *I;
1215      if (MMO.isVolatile() || MMO.isStore())
1216        return false;
1217      const Value *V = MMO.getValue();
1218      if (!V)
1219        return false;
1220      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1221        if (!PSV->isConstant(mf_->getFrameInfo()))
1222          return false;
1223      } else if (!aa_->pointsToConstantMemory(V))
1224        return false;
1225    }
1226
1227    // If any of the registers accessed are non-constant, conservatively assume
1228    // the instruction is not rematerializable.
1229    unsigned ImpUse = 0;
1230    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1231      const MachineOperand &MO = MI->getOperand(i);
1232      if (MO.isReg()) {
1233        unsigned Reg = MO.getReg();
1234        if (Reg == 0)
1235          continue;
1236        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1237          return false;
1238
1239        // Only allow one def, and that in the first operand.
1240        if (MO.isDef() != (i == 0))
1241          return false;
1242
1243        // Only allow constant-valued registers.
1244        bool IsLiveIn = mri_->isLiveIn(Reg);
1245        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1246                                          E = mri_->def_end();
1247
1248        // For the def, it should be the only def of that register.
1249        if (MO.isDef() && (next(I) != E || IsLiveIn))
1250          return false;
1251
1252        if (MO.isUse()) {
1253          // Only allow one use other register use, as that's all the
1254          // remat mechanisms support currently.
1255          if (Reg != li.reg) {
1256            if (ImpUse == 0)
1257              ImpUse = Reg;
1258            else if (Reg != ImpUse)
1259              return false;
1260          }
1261          // For the use, there should be only one associated def.
1262          if (I != E && (next(I) != E || IsLiveIn))
1263            return false;
1264        }
1265      }
1266    }
1267  }
1268
1269  unsigned ImpUse = getReMatImplicitUse(li, MI);
1270  if (ImpUse) {
1271    const LiveInterval &ImpLi = getInterval(ImpUse);
1272    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1273           re = mri_->use_end(); ri != re; ++ri) {
1274      MachineInstr *UseMI = &*ri;
1275      unsigned UseIdx = getInstructionIndex(UseMI);
1276      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1277        continue;
1278      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1279        return false;
1280    }
1281
1282    // If a register operand of the re-materialized instruction is going to
1283    // be spilled next, then it's not legal to re-materialize this instruction.
1284    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1285      if (ImpUse == SpillIs[i]->reg)
1286        return false;
1287  }
1288  return true;
1289}
1290
1291/// isReMaterializable - Returns true if the definition MI of the specified
1292/// val# of the specified interval is re-materializable.
1293bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1294                                       const VNInfo *ValNo, MachineInstr *MI) {
1295  SmallVector<LiveInterval*, 4> Dummy1;
1296  bool Dummy2;
1297  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1298}
1299
1300/// isReMaterializable - Returns true if every definition of MI of every
1301/// val# of the specified interval is re-materializable.
1302bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1303                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1304                                       bool &isLoad) {
1305  isLoad = false;
1306  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1307       i != e; ++i) {
1308    const VNInfo *VNI = *i;
1309    if (VNI->isUnused())
1310      continue; // Dead val#.
1311    // Is the def for the val# rematerializable?
1312    if (!VNI->isDefAccurate())
1313      return false;
1314    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1315    bool DefIsLoad = false;
1316    if (!ReMatDefMI ||
1317        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1318      return false;
1319    isLoad |= DefIsLoad;
1320  }
1321  return true;
1322}
1323
1324/// FilterFoldedOps - Filter out two-address use operands. Return
1325/// true if it finds any issue with the operands that ought to prevent
1326/// folding.
1327static bool FilterFoldedOps(MachineInstr *MI,
1328                            SmallVector<unsigned, 2> &Ops,
1329                            unsigned &MRInfo,
1330                            SmallVector<unsigned, 2> &FoldOps) {
1331  MRInfo = 0;
1332  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1333    unsigned OpIdx = Ops[i];
1334    MachineOperand &MO = MI->getOperand(OpIdx);
1335    // FIXME: fold subreg use.
1336    if (MO.getSubReg())
1337      return true;
1338    if (MO.isDef())
1339      MRInfo |= (unsigned)VirtRegMap::isMod;
1340    else {
1341      // Filter out two-address use operand(s).
1342      if (MI->isRegTiedToDefOperand(OpIdx)) {
1343        MRInfo = VirtRegMap::isModRef;
1344        continue;
1345      }
1346      MRInfo |= (unsigned)VirtRegMap::isRef;
1347    }
1348    FoldOps.push_back(OpIdx);
1349  }
1350  return false;
1351}
1352
1353
1354/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1355/// slot / to reg or any rematerialized load into ith operand of specified
1356/// MI. If it is successul, MI is updated with the newly created MI and
1357/// returns true.
1358bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1359                                         VirtRegMap &vrm, MachineInstr *DefMI,
1360                                         unsigned InstrIdx,
1361                                         SmallVector<unsigned, 2> &Ops,
1362                                         bool isSS, int Slot, unsigned Reg) {
1363  // If it is an implicit def instruction, just delete it.
1364  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1365    RemoveMachineInstrFromMaps(MI);
1366    vrm.RemoveMachineInstrFromMaps(MI);
1367    MI->eraseFromParent();
1368    ++numFolds;
1369    return true;
1370  }
1371
1372  // Filter the list of operand indexes that are to be folded. Abort if
1373  // any operand will prevent folding.
1374  unsigned MRInfo = 0;
1375  SmallVector<unsigned, 2> FoldOps;
1376  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1377    return false;
1378
1379  // The only time it's safe to fold into a two address instruction is when
1380  // it's folding reload and spill from / into a spill stack slot.
1381  if (DefMI && (MRInfo & VirtRegMap::isMod))
1382    return false;
1383
1384  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1385                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1386  if (fmi) {
1387    // Remember this instruction uses the spill slot.
1388    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1389
1390    // Attempt to fold the memory reference into the instruction. If
1391    // we can do this, we don't need to insert spill code.
1392    MachineBasicBlock &MBB = *MI->getParent();
1393    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1394      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1395    vrm.transferSpillPts(MI, fmi);
1396    vrm.transferRestorePts(MI, fmi);
1397    vrm.transferEmergencySpills(MI, fmi);
1398    mi2iMap_.erase(MI);
1399    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1400    mi2iMap_[fmi] = InstrIdx;
1401    MI = MBB.insert(MBB.erase(MI), fmi);
1402    ++numFolds;
1403    return true;
1404  }
1405  return false;
1406}
1407
1408/// canFoldMemoryOperand - Returns true if the specified load / store
1409/// folding is possible.
1410bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1411                                         SmallVector<unsigned, 2> &Ops,
1412                                         bool ReMat) const {
1413  // Filter the list of operand indexes that are to be folded. Abort if
1414  // any operand will prevent folding.
1415  unsigned MRInfo = 0;
1416  SmallVector<unsigned, 2> FoldOps;
1417  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1418    return false;
1419
1420  // It's only legal to remat for a use, not a def.
1421  if (ReMat && (MRInfo & VirtRegMap::isMod))
1422    return false;
1423
1424  return tii_->canFoldMemoryOperand(MI, FoldOps);
1425}
1426
1427bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1428  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1429  for (LiveInterval::Ranges::const_iterator
1430         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1431    std::vector<IdxMBBPair>::const_iterator II =
1432      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1433    if (II == Idx2MBBMap.end())
1434      continue;
1435    if (I->end > II->first)  // crossing a MBB.
1436      return false;
1437    MBBs.insert(II->second);
1438    if (MBBs.size() > 1)
1439      return false;
1440  }
1441  return true;
1442}
1443
1444/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1445/// interval on to-be re-materialized operands of MI) with new register.
1446void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1447                                       MachineInstr *MI, unsigned NewVReg,
1448                                       VirtRegMap &vrm) {
1449  // There is an implicit use. That means one of the other operand is
1450  // being remat'ed and the remat'ed instruction has li.reg as an
1451  // use operand. Make sure we rewrite that as well.
1452  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1453    MachineOperand &MO = MI->getOperand(i);
1454    if (!MO.isReg())
1455      continue;
1456    unsigned Reg = MO.getReg();
1457    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1458      continue;
1459    if (!vrm.isReMaterialized(Reg))
1460      continue;
1461    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1462    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1463    if (UseMO)
1464      UseMO->setReg(NewVReg);
1465  }
1466}
1467
1468/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1469/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1470bool LiveIntervals::
1471rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1472                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1473                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1474                 unsigned Slot, int LdSlot,
1475                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1476                 VirtRegMap &vrm,
1477                 const TargetRegisterClass* rc,
1478                 SmallVector<int, 4> &ReMatIds,
1479                 const MachineLoopInfo *loopInfo,
1480                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1481                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1482                 std::vector<LiveInterval*> &NewLIs) {
1483  bool CanFold = false;
1484 RestartInstruction:
1485  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1486    MachineOperand& mop = MI->getOperand(i);
1487    if (!mop.isReg())
1488      continue;
1489    unsigned Reg = mop.getReg();
1490    unsigned RegI = Reg;
1491    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1492      continue;
1493    if (Reg != li.reg)
1494      continue;
1495
1496    bool TryFold = !DefIsReMat;
1497    bool FoldSS = true; // Default behavior unless it's a remat.
1498    int FoldSlot = Slot;
1499    if (DefIsReMat) {
1500      // If this is the rematerializable definition MI itself and
1501      // all of its uses are rematerialized, simply delete it.
1502      if (MI == ReMatOrigDefMI && CanDelete) {
1503        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1504        DOUT << MI << '\n';
1505        RemoveMachineInstrFromMaps(MI);
1506        vrm.RemoveMachineInstrFromMaps(MI);
1507        MI->eraseFromParent();
1508        break;
1509      }
1510
1511      // If def for this use can't be rematerialized, then try folding.
1512      // If def is rematerializable and it's a load, also try folding.
1513      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1514      if (isLoad) {
1515        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1516        FoldSS = isLoadSS;
1517        FoldSlot = LdSlot;
1518      }
1519    }
1520
1521    // Scan all of the operands of this instruction rewriting operands
1522    // to use NewVReg instead of li.reg as appropriate.  We do this for
1523    // two reasons:
1524    //
1525    //   1. If the instr reads the same spilled vreg multiple times, we
1526    //      want to reuse the NewVReg.
1527    //   2. If the instr is a two-addr instruction, we are required to
1528    //      keep the src/dst regs pinned.
1529    //
1530    // Keep track of whether we replace a use and/or def so that we can
1531    // create the spill interval with the appropriate range.
1532
1533    HasUse = mop.isUse();
1534    HasDef = mop.isDef();
1535    SmallVector<unsigned, 2> Ops;
1536    Ops.push_back(i);
1537    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1538      const MachineOperand &MOj = MI->getOperand(j);
1539      if (!MOj.isReg())
1540        continue;
1541      unsigned RegJ = MOj.getReg();
1542      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1543        continue;
1544      if (RegJ == RegI) {
1545        Ops.push_back(j);
1546        if (!MOj.isUndef()) {
1547          HasUse |= MOj.isUse();
1548          HasDef |= MOj.isDef();
1549        }
1550      }
1551    }
1552
1553    // Create a new virtual register for the spill interval.
1554    // Create the new register now so we can map the fold instruction
1555    // to the new register so when it is unfolded we get the correct
1556    // answer.
1557    bool CreatedNewVReg = false;
1558    if (NewVReg == 0) {
1559      NewVReg = mri_->createVirtualRegister(rc);
1560      vrm.grow();
1561      CreatedNewVReg = true;
1562    }
1563
1564    if (!TryFold)
1565      CanFold = false;
1566    else {
1567      // Do not fold load / store here if we are splitting. We'll find an
1568      // optimal point to insert a load / store later.
1569      if (!TrySplit) {
1570        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1571                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1572          // Folding the load/store can completely change the instruction in
1573          // unpredictable ways, rescan it from the beginning.
1574
1575          if (FoldSS) {
1576            // We need to give the new vreg the same stack slot as the
1577            // spilled interval.
1578            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1579          }
1580
1581          HasUse = false;
1582          HasDef = false;
1583          CanFold = false;
1584          if (isNotInMIMap(MI))
1585            break;
1586          goto RestartInstruction;
1587        }
1588      } else {
1589        // We'll try to fold it later if it's profitable.
1590        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1591      }
1592    }
1593
1594    mop.setReg(NewVReg);
1595    if (mop.isImplicit())
1596      rewriteImplicitOps(li, MI, NewVReg, vrm);
1597
1598    // Reuse NewVReg for other reads.
1599    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1600      MachineOperand &mopj = MI->getOperand(Ops[j]);
1601      mopj.setReg(NewVReg);
1602      if (mopj.isImplicit())
1603        rewriteImplicitOps(li, MI, NewVReg, vrm);
1604    }
1605
1606    if (CreatedNewVReg) {
1607      if (DefIsReMat) {
1608        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1609        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1610          // Each valnum may have its own remat id.
1611          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1612        } else {
1613          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1614        }
1615        if (!CanDelete || (HasUse && HasDef)) {
1616          // If this is a two-addr instruction then its use operands are
1617          // rematerializable but its def is not. It should be assigned a
1618          // stack slot.
1619          vrm.assignVirt2StackSlot(NewVReg, Slot);
1620        }
1621      } else {
1622        vrm.assignVirt2StackSlot(NewVReg, Slot);
1623      }
1624    } else if (HasUse && HasDef &&
1625               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1626      // If this interval hasn't been assigned a stack slot (because earlier
1627      // def is a deleted remat def), do it now.
1628      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1629      vrm.assignVirt2StackSlot(NewVReg, Slot);
1630    }
1631
1632    // Re-matting an instruction with virtual register use. Add the
1633    // register as an implicit use on the use MI.
1634    if (DefIsReMat && ImpUse)
1635      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1636
1637    // Create a new register interval for this spill / remat.
1638    LiveInterval &nI = getOrCreateInterval(NewVReg);
1639    if (CreatedNewVReg) {
1640      NewLIs.push_back(&nI);
1641      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1642      if (TrySplit)
1643        vrm.setIsSplitFromReg(NewVReg, li.reg);
1644    }
1645
1646    if (HasUse) {
1647      if (CreatedNewVReg) {
1648        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1649                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1650        DOUT << " +" << LR;
1651        nI.addRange(LR);
1652      } else {
1653        // Extend the split live interval to this def / use.
1654        unsigned End = getUseIndex(index)+1;
1655        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1656                     nI.getValNumInfo(nI.getNumValNums()-1));
1657        DOUT << " +" << LR;
1658        nI.addRange(LR);
1659      }
1660    }
1661    if (HasDef) {
1662      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1663                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1664      DOUT << " +" << LR;
1665      nI.addRange(LR);
1666    }
1667
1668    DOUT << "\t\t\t\tAdded new interval: ";
1669    nI.print(DOUT, tri_);
1670    DOUT << '\n';
1671  }
1672  return CanFold;
1673}
1674bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1675                                   const VNInfo *VNI,
1676                                   MachineBasicBlock *MBB, unsigned Idx) const {
1677  unsigned End = getMBBEndIdx(MBB);
1678  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1679    if (VNI->kills[j].isPHIKill)
1680      continue;
1681
1682    unsigned KillIdx = VNI->kills[j].killIdx;
1683    if (KillIdx > Idx && KillIdx < End)
1684      return true;
1685  }
1686  return false;
1687}
1688
1689/// RewriteInfo - Keep track of machine instrs that will be rewritten
1690/// during spilling.
1691namespace {
1692  struct RewriteInfo {
1693    unsigned Index;
1694    MachineInstr *MI;
1695    bool HasUse;
1696    bool HasDef;
1697    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1698      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1699  };
1700
1701  struct RewriteInfoCompare {
1702    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1703      return LHS.Index < RHS.Index;
1704    }
1705  };
1706}
1707
1708void LiveIntervals::
1709rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1710                    LiveInterval::Ranges::const_iterator &I,
1711                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1712                    unsigned Slot, int LdSlot,
1713                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1714                    VirtRegMap &vrm,
1715                    const TargetRegisterClass* rc,
1716                    SmallVector<int, 4> &ReMatIds,
1717                    const MachineLoopInfo *loopInfo,
1718                    BitVector &SpillMBBs,
1719                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1720                    BitVector &RestoreMBBs,
1721                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1722                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1723                    std::vector<LiveInterval*> &NewLIs) {
1724  bool AllCanFold = true;
1725  unsigned NewVReg = 0;
1726  unsigned start = getBaseIndex(I->start);
1727  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1728
1729  // First collect all the def / use in this live range that will be rewritten.
1730  // Make sure they are sorted according to instruction index.
1731  std::vector<RewriteInfo> RewriteMIs;
1732  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1733         re = mri_->reg_end(); ri != re; ) {
1734    MachineInstr *MI = &*ri;
1735    MachineOperand &O = ri.getOperand();
1736    ++ri;
1737    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1738    unsigned index = getInstructionIndex(MI);
1739    if (index < start || index >= end)
1740      continue;
1741
1742    if (O.isUndef())
1743      // Must be defined by an implicit def. It should not be spilled. Note,
1744      // this is for correctness reason. e.g.
1745      // 8   %reg1024<def> = IMPLICIT_DEF
1746      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1747      // The live range [12, 14) are not part of the r1024 live interval since
1748      // it's defined by an implicit def. It will not conflicts with live
1749      // interval of r1025. Now suppose both registers are spilled, you can
1750      // easily see a situation where both registers are reloaded before
1751      // the INSERT_SUBREG and both target registers that would overlap.
1752      continue;
1753    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1754  }
1755  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1756
1757  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1758  // Now rewrite the defs and uses.
1759  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1760    RewriteInfo &rwi = RewriteMIs[i];
1761    ++i;
1762    unsigned index = rwi.Index;
1763    bool MIHasUse = rwi.HasUse;
1764    bool MIHasDef = rwi.HasDef;
1765    MachineInstr *MI = rwi.MI;
1766    // If MI def and/or use the same register multiple times, then there
1767    // are multiple entries.
1768    unsigned NumUses = MIHasUse;
1769    while (i != e && RewriteMIs[i].MI == MI) {
1770      assert(RewriteMIs[i].Index == index);
1771      bool isUse = RewriteMIs[i].HasUse;
1772      if (isUse) ++NumUses;
1773      MIHasUse |= isUse;
1774      MIHasDef |= RewriteMIs[i].HasDef;
1775      ++i;
1776    }
1777    MachineBasicBlock *MBB = MI->getParent();
1778
1779    if (ImpUse && MI != ReMatDefMI) {
1780      // Re-matting an instruction with virtual register use. Update the
1781      // register interval's spill weight to HUGE_VALF to prevent it from
1782      // being spilled.
1783      LiveInterval &ImpLi = getInterval(ImpUse);
1784      ImpLi.weight = HUGE_VALF;
1785    }
1786
1787    unsigned MBBId = MBB->getNumber();
1788    unsigned ThisVReg = 0;
1789    if (TrySplit) {
1790      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1791      if (NVI != MBBVRegsMap.end()) {
1792        ThisVReg = NVI->second;
1793        // One common case:
1794        // x = use
1795        // ...
1796        // ...
1797        // def = ...
1798        //     = use
1799        // It's better to start a new interval to avoid artifically
1800        // extend the new interval.
1801        if (MIHasDef && !MIHasUse) {
1802          MBBVRegsMap.erase(MBB->getNumber());
1803          ThisVReg = 0;
1804        }
1805      }
1806    }
1807
1808    bool IsNew = ThisVReg == 0;
1809    if (IsNew) {
1810      // This ends the previous live interval. If all of its def / use
1811      // can be folded, give it a low spill weight.
1812      if (NewVReg && TrySplit && AllCanFold) {
1813        LiveInterval &nI = getOrCreateInterval(NewVReg);
1814        nI.weight /= 10.0F;
1815      }
1816      AllCanFold = true;
1817    }
1818    NewVReg = ThisVReg;
1819
1820    bool HasDef = false;
1821    bool HasUse = false;
1822    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1823                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1824                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1825                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1826                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1827    if (!HasDef && !HasUse)
1828      continue;
1829
1830    AllCanFold &= CanFold;
1831
1832    // Update weight of spill interval.
1833    LiveInterval &nI = getOrCreateInterval(NewVReg);
1834    if (!TrySplit) {
1835      // The spill weight is now infinity as it cannot be spilled again.
1836      nI.weight = HUGE_VALF;
1837      continue;
1838    }
1839
1840    // Keep track of the last def and first use in each MBB.
1841    if (HasDef) {
1842      if (MI != ReMatOrigDefMI || !CanDelete) {
1843        bool HasKill = false;
1844        if (!HasUse)
1845          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1846        else {
1847          // If this is a two-address code, then this index starts a new VNInfo.
1848          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1849          if (VNI)
1850            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1851        }
1852        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1853          SpillIdxes.find(MBBId);
1854        if (!HasKill) {
1855          if (SII == SpillIdxes.end()) {
1856            std::vector<SRInfo> S;
1857            S.push_back(SRInfo(index, NewVReg, true));
1858            SpillIdxes.insert(std::make_pair(MBBId, S));
1859          } else if (SII->second.back().vreg != NewVReg) {
1860            SII->second.push_back(SRInfo(index, NewVReg, true));
1861          } else if ((int)index > SII->second.back().index) {
1862            // If there is an earlier def and this is a two-address
1863            // instruction, then it's not possible to fold the store (which
1864            // would also fold the load).
1865            SRInfo &Info = SII->second.back();
1866            Info.index = index;
1867            Info.canFold = !HasUse;
1868          }
1869          SpillMBBs.set(MBBId);
1870        } else if (SII != SpillIdxes.end() &&
1871                   SII->second.back().vreg == NewVReg &&
1872                   (int)index > SII->second.back().index) {
1873          // There is an earlier def that's not killed (must be two-address).
1874          // The spill is no longer needed.
1875          SII->second.pop_back();
1876          if (SII->second.empty()) {
1877            SpillIdxes.erase(MBBId);
1878            SpillMBBs.reset(MBBId);
1879          }
1880        }
1881      }
1882    }
1883
1884    if (HasUse) {
1885      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1886        SpillIdxes.find(MBBId);
1887      if (SII != SpillIdxes.end() &&
1888          SII->second.back().vreg == NewVReg &&
1889          (int)index > SII->second.back().index)
1890        // Use(s) following the last def, it's not safe to fold the spill.
1891        SII->second.back().canFold = false;
1892      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1893        RestoreIdxes.find(MBBId);
1894      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1895        // If we are splitting live intervals, only fold if it's the first
1896        // use and there isn't another use later in the MBB.
1897        RII->second.back().canFold = false;
1898      else if (IsNew) {
1899        // Only need a reload if there isn't an earlier def / use.
1900        if (RII == RestoreIdxes.end()) {
1901          std::vector<SRInfo> Infos;
1902          Infos.push_back(SRInfo(index, NewVReg, true));
1903          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1904        } else {
1905          RII->second.push_back(SRInfo(index, NewVReg, true));
1906        }
1907        RestoreMBBs.set(MBBId);
1908      }
1909    }
1910
1911    // Update spill weight.
1912    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1913    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1914  }
1915
1916  if (NewVReg && TrySplit && AllCanFold) {
1917    // If all of its def / use can be folded, give it a low spill weight.
1918    LiveInterval &nI = getOrCreateInterval(NewVReg);
1919    nI.weight /= 10.0F;
1920  }
1921}
1922
1923bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1924                        BitVector &RestoreMBBs,
1925                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1926  if (!RestoreMBBs[Id])
1927    return false;
1928  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1929  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1930    if (Restores[i].index == index &&
1931        Restores[i].vreg == vr &&
1932        Restores[i].canFold)
1933      return true;
1934  return false;
1935}
1936
1937void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1938                        BitVector &RestoreMBBs,
1939                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1940  if (!RestoreMBBs[Id])
1941    return;
1942  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1943  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1944    if (Restores[i].index == index && Restores[i].vreg)
1945      Restores[i].index = -1;
1946}
1947
1948/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1949/// spilled and create empty intervals for their uses.
1950void
1951LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1952                                    const TargetRegisterClass* rc,
1953                                    std::vector<LiveInterval*> &NewLIs) {
1954  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1955         re = mri_->reg_end(); ri != re; ) {
1956    MachineOperand &O = ri.getOperand();
1957    MachineInstr *MI = &*ri;
1958    ++ri;
1959    if (O.isDef()) {
1960      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1961             "Register def was not rewritten?");
1962      RemoveMachineInstrFromMaps(MI);
1963      vrm.RemoveMachineInstrFromMaps(MI);
1964      MI->eraseFromParent();
1965    } else {
1966      // This must be an use of an implicit_def so it's not part of the live
1967      // interval. Create a new empty live interval for it.
1968      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1969      unsigned NewVReg = mri_->createVirtualRegister(rc);
1970      vrm.grow();
1971      vrm.setIsImplicitlyDefined(NewVReg);
1972      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1973      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1974        MachineOperand &MO = MI->getOperand(i);
1975        if (MO.isReg() && MO.getReg() == li.reg) {
1976          MO.setReg(NewVReg);
1977          MO.setIsUndef();
1978        }
1979      }
1980    }
1981  }
1982}
1983
1984std::vector<LiveInterval*> LiveIntervals::
1985addIntervalsForSpillsFast(const LiveInterval &li,
1986                          const MachineLoopInfo *loopInfo,
1987                          VirtRegMap &vrm) {
1988  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1989
1990  std::vector<LiveInterval*> added;
1991
1992  assert(li.weight != HUGE_VALF &&
1993         "attempt to spill already spilled interval!");
1994
1995  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1996  DEBUG(li.dump());
1997  DOUT << '\n';
1998
1999  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2000
2001  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2002  while (RI != mri_->reg_end()) {
2003    MachineInstr* MI = &*RI;
2004
2005    SmallVector<unsigned, 2> Indices;
2006    bool HasUse = false;
2007    bool HasDef = false;
2008
2009    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2010      MachineOperand& mop = MI->getOperand(i);
2011      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2012
2013      HasUse |= MI->getOperand(i).isUse();
2014      HasDef |= MI->getOperand(i).isDef();
2015
2016      Indices.push_back(i);
2017    }
2018
2019    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2020                              Indices, true, slot, li.reg)) {
2021      unsigned NewVReg = mri_->createVirtualRegister(rc);
2022      vrm.grow();
2023      vrm.assignVirt2StackSlot(NewVReg, slot);
2024
2025      // create a new register for this spill
2026      LiveInterval &nI = getOrCreateInterval(NewVReg);
2027
2028      // the spill weight is now infinity as it
2029      // cannot be spilled again
2030      nI.weight = HUGE_VALF;
2031
2032      // Rewrite register operands to use the new vreg.
2033      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2034           E = Indices.end(); I != E; ++I) {
2035        MI->getOperand(*I).setReg(NewVReg);
2036
2037        if (MI->getOperand(*I).isUse())
2038          MI->getOperand(*I).setIsKill(true);
2039      }
2040
2041      // Fill in  the new live interval.
2042      unsigned index = getInstructionIndex(MI);
2043      if (HasUse) {
2044        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2045                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2046        DOUT << " +" << LR;
2047        nI.addRange(LR);
2048        vrm.addRestorePoint(NewVReg, MI);
2049      }
2050      if (HasDef) {
2051        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2052                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2053        DOUT << " +" << LR;
2054        nI.addRange(LR);
2055        vrm.addSpillPoint(NewVReg, true, MI);
2056      }
2057
2058      added.push_back(&nI);
2059
2060      DOUT << "\t\t\t\tadded new interval: ";
2061      DEBUG(nI.dump());
2062      DOUT << '\n';
2063    }
2064
2065
2066    RI = mri_->reg_begin(li.reg);
2067  }
2068
2069  return added;
2070}
2071
2072std::vector<LiveInterval*> LiveIntervals::
2073addIntervalsForSpills(const LiveInterval &li,
2074                      SmallVectorImpl<LiveInterval*> &SpillIs,
2075                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2076
2077  if (EnableFastSpilling)
2078    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2079
2080  assert(li.weight != HUGE_VALF &&
2081         "attempt to spill already spilled interval!");
2082
2083  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2084  li.print(DOUT, tri_);
2085  DOUT << '\n';
2086
2087  // Each bit specify whether a spill is required in the MBB.
2088  BitVector SpillMBBs(mf_->getNumBlockIDs());
2089  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2090  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2091  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2092  DenseMap<unsigned,unsigned> MBBVRegsMap;
2093  std::vector<LiveInterval*> NewLIs;
2094  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2095
2096  unsigned NumValNums = li.getNumValNums();
2097  SmallVector<MachineInstr*, 4> ReMatDefs;
2098  ReMatDefs.resize(NumValNums, NULL);
2099  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2100  ReMatOrigDefs.resize(NumValNums, NULL);
2101  SmallVector<int, 4> ReMatIds;
2102  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2103  BitVector ReMatDelete(NumValNums);
2104  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2105
2106  // Spilling a split live interval. It cannot be split any further. Also,
2107  // it's also guaranteed to be a single val# / range interval.
2108  if (vrm.getPreSplitReg(li.reg)) {
2109    vrm.setIsSplitFromReg(li.reg, 0);
2110    // Unset the split kill marker on the last use.
2111    unsigned KillIdx = vrm.getKillPoint(li.reg);
2112    if (KillIdx) {
2113      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2114      assert(KillMI && "Last use disappeared?");
2115      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2116      assert(KillOp != -1 && "Last use disappeared?");
2117      KillMI->getOperand(KillOp).setIsKill(false);
2118    }
2119    vrm.removeKillPoint(li.reg);
2120    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2121    Slot = vrm.getStackSlot(li.reg);
2122    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2123    MachineInstr *ReMatDefMI = DefIsReMat ?
2124      vrm.getReMaterializedMI(li.reg) : NULL;
2125    int LdSlot = 0;
2126    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2127    bool isLoad = isLoadSS ||
2128      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2129    bool IsFirstRange = true;
2130    for (LiveInterval::Ranges::const_iterator
2131           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2132      // If this is a split live interval with multiple ranges, it means there
2133      // are two-address instructions that re-defined the value. Only the
2134      // first def can be rematerialized!
2135      if (IsFirstRange) {
2136        // Note ReMatOrigDefMI has already been deleted.
2137        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2138                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2139                             false, vrm, rc, ReMatIds, loopInfo,
2140                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2141                             MBBVRegsMap, NewLIs);
2142      } else {
2143        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2144                             Slot, 0, false, false, false,
2145                             false, vrm, rc, ReMatIds, loopInfo,
2146                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2147                             MBBVRegsMap, NewLIs);
2148      }
2149      IsFirstRange = false;
2150    }
2151
2152    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2153    return NewLIs;
2154  }
2155
2156  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2157  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2158    TrySplit = false;
2159  if (TrySplit)
2160    ++numSplits;
2161  bool NeedStackSlot = false;
2162  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2163       i != e; ++i) {
2164    const VNInfo *VNI = *i;
2165    unsigned VN = VNI->id;
2166    if (VNI->isUnused())
2167      continue; // Dead val#.
2168    // Is the def for the val# rematerializable?
2169    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2170      ? getInstructionFromIndex(VNI->def) : 0;
2171    bool dummy;
2172    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2173      // Remember how to remat the def of this val#.
2174      ReMatOrigDefs[VN] = ReMatDefMI;
2175      // Original def may be modified so we have to make a copy here.
2176      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2177      ClonedMIs.push_back(Clone);
2178      ReMatDefs[VN] = Clone;
2179
2180      bool CanDelete = true;
2181      if (VNI->hasPHIKill()) {
2182        // A kill is a phi node, not all of its uses can be rematerialized.
2183        // It must not be deleted.
2184        CanDelete = false;
2185        // Need a stack slot if there is any live range where uses cannot be
2186        // rematerialized.
2187        NeedStackSlot = true;
2188      }
2189      if (CanDelete)
2190        ReMatDelete.set(VN);
2191    } else {
2192      // Need a stack slot if there is any live range where uses cannot be
2193      // rematerialized.
2194      NeedStackSlot = true;
2195    }
2196  }
2197
2198  // One stack slot per live interval.
2199  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2200    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2201      Slot = vrm.assignVirt2StackSlot(li.reg);
2202
2203    // This case only occurs when the prealloc splitter has already assigned
2204    // a stack slot to this vreg.
2205    else
2206      Slot = vrm.getStackSlot(li.reg);
2207  }
2208
2209  // Create new intervals and rewrite defs and uses.
2210  for (LiveInterval::Ranges::const_iterator
2211         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2212    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2213    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2214    bool DefIsReMat = ReMatDefMI != NULL;
2215    bool CanDelete = ReMatDelete[I->valno->id];
2216    int LdSlot = 0;
2217    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2218    bool isLoad = isLoadSS ||
2219      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2220    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2221                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2222                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2223                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2224                               MBBVRegsMap, NewLIs);
2225  }
2226
2227  // Insert spills / restores if we are splitting.
2228  if (!TrySplit) {
2229    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2230    return NewLIs;
2231  }
2232
2233  SmallPtrSet<LiveInterval*, 4> AddedKill;
2234  SmallVector<unsigned, 2> Ops;
2235  if (NeedStackSlot) {
2236    int Id = SpillMBBs.find_first();
2237    while (Id != -1) {
2238      std::vector<SRInfo> &spills = SpillIdxes[Id];
2239      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2240        int index = spills[i].index;
2241        unsigned VReg = spills[i].vreg;
2242        LiveInterval &nI = getOrCreateInterval(VReg);
2243        bool isReMat = vrm.isReMaterialized(VReg);
2244        MachineInstr *MI = getInstructionFromIndex(index);
2245        bool CanFold = false;
2246        bool FoundUse = false;
2247        Ops.clear();
2248        if (spills[i].canFold) {
2249          CanFold = true;
2250          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2251            MachineOperand &MO = MI->getOperand(j);
2252            if (!MO.isReg() || MO.getReg() != VReg)
2253              continue;
2254
2255            Ops.push_back(j);
2256            if (MO.isDef())
2257              continue;
2258            if (isReMat ||
2259                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2260                                                RestoreMBBs, RestoreIdxes))) {
2261              // MI has two-address uses of the same register. If the use
2262              // isn't the first and only use in the BB, then we can't fold
2263              // it. FIXME: Move this to rewriteInstructionsForSpills.
2264              CanFold = false;
2265              break;
2266            }
2267            FoundUse = true;
2268          }
2269        }
2270        // Fold the store into the def if possible.
2271        bool Folded = false;
2272        if (CanFold && !Ops.empty()) {
2273          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2274            Folded = true;
2275            if (FoundUse) {
2276              // Also folded uses, do not issue a load.
2277              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2278              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2279            }
2280            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2281          }
2282        }
2283
2284        // Otherwise tell the spiller to issue a spill.
2285        if (!Folded) {
2286          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2287          bool isKill = LR->end == getStoreIndex(index);
2288          if (!MI->registerDefIsDead(nI.reg))
2289            // No need to spill a dead def.
2290            vrm.addSpillPoint(VReg, isKill, MI);
2291          if (isKill)
2292            AddedKill.insert(&nI);
2293        }
2294      }
2295      Id = SpillMBBs.find_next(Id);
2296    }
2297  }
2298
2299  int Id = RestoreMBBs.find_first();
2300  while (Id != -1) {
2301    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2302    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2303      int index = restores[i].index;
2304      if (index == -1)
2305        continue;
2306      unsigned VReg = restores[i].vreg;
2307      LiveInterval &nI = getOrCreateInterval(VReg);
2308      bool isReMat = vrm.isReMaterialized(VReg);
2309      MachineInstr *MI = getInstructionFromIndex(index);
2310      bool CanFold = false;
2311      Ops.clear();
2312      if (restores[i].canFold) {
2313        CanFold = true;
2314        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2315          MachineOperand &MO = MI->getOperand(j);
2316          if (!MO.isReg() || MO.getReg() != VReg)
2317            continue;
2318
2319          if (MO.isDef()) {
2320            // If this restore were to be folded, it would have been folded
2321            // already.
2322            CanFold = false;
2323            break;
2324          }
2325          Ops.push_back(j);
2326        }
2327      }
2328
2329      // Fold the load into the use if possible.
2330      bool Folded = false;
2331      if (CanFold && !Ops.empty()) {
2332        if (!isReMat)
2333          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2334        else {
2335          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2336          int LdSlot = 0;
2337          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2338          // If the rematerializable def is a load, also try to fold it.
2339          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2340            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2341                                          Ops, isLoadSS, LdSlot, VReg);
2342          if (!Folded) {
2343            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2344            if (ImpUse) {
2345              // Re-matting an instruction with virtual register use. Add the
2346              // register as an implicit use on the use MI and update the register
2347              // interval's spill weight to HUGE_VALF to prevent it from being
2348              // spilled.
2349              LiveInterval &ImpLi = getInterval(ImpUse);
2350              ImpLi.weight = HUGE_VALF;
2351              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2352            }
2353          }
2354        }
2355      }
2356      // If folding is not possible / failed, then tell the spiller to issue a
2357      // load / rematerialization for us.
2358      if (Folded)
2359        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2360      else
2361        vrm.addRestorePoint(VReg, MI);
2362    }
2363    Id = RestoreMBBs.find_next(Id);
2364  }
2365
2366  // Finalize intervals: add kills, finalize spill weights, and filter out
2367  // dead intervals.
2368  std::vector<LiveInterval*> RetNewLIs;
2369  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2370    LiveInterval *LI = NewLIs[i];
2371    if (!LI->empty()) {
2372      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2373      if (!AddedKill.count(LI)) {
2374        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2375        unsigned LastUseIdx = getBaseIndex(LR->end);
2376        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2377        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2378        assert(UseIdx != -1);
2379        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2380          LastUse->getOperand(UseIdx).setIsKill();
2381          vrm.addKillPoint(LI->reg, LastUseIdx);
2382        }
2383      }
2384      RetNewLIs.push_back(LI);
2385    }
2386  }
2387
2388  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2389  return RetNewLIs;
2390}
2391
2392/// hasAllocatableSuperReg - Return true if the specified physical register has
2393/// any super register that's allocatable.
2394bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2395  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2396    if (allocatableRegs_[*AS] && hasInterval(*AS))
2397      return true;
2398  return false;
2399}
2400
2401/// getRepresentativeReg - Find the largest super register of the specified
2402/// physical register.
2403unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2404  // Find the largest super-register that is allocatable.
2405  unsigned BestReg = Reg;
2406  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2407    unsigned SuperReg = *AS;
2408    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2409      BestReg = SuperReg;
2410      break;
2411    }
2412  }
2413  return BestReg;
2414}
2415
2416/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2417/// specified interval that conflicts with the specified physical register.
2418unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2419                                                   unsigned PhysReg) const {
2420  unsigned NumConflicts = 0;
2421  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2422  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2423         E = mri_->reg_end(); I != E; ++I) {
2424    MachineOperand &O = I.getOperand();
2425    MachineInstr *MI = O.getParent();
2426    unsigned Index = getInstructionIndex(MI);
2427    if (pli.liveAt(Index))
2428      ++NumConflicts;
2429  }
2430  return NumConflicts;
2431}
2432
2433/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2434/// around all defs and uses of the specified interval. Return true if it
2435/// was able to cut its interval.
2436bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2437                                            unsigned PhysReg, VirtRegMap &vrm) {
2438  unsigned SpillReg = getRepresentativeReg(PhysReg);
2439
2440  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2441    // If there are registers which alias PhysReg, but which are not a
2442    // sub-register of the chosen representative super register. Assert
2443    // since we can't handle it yet.
2444    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2445           tri_->isSuperRegister(*AS, SpillReg));
2446
2447  bool Cut = false;
2448  LiveInterval &pli = getInterval(SpillReg);
2449  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2450  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2451         E = mri_->reg_end(); I != E; ++I) {
2452    MachineOperand &O = I.getOperand();
2453    MachineInstr *MI = O.getParent();
2454    if (SeenMIs.count(MI))
2455      continue;
2456    SeenMIs.insert(MI);
2457    unsigned Index = getInstructionIndex(MI);
2458    if (pli.liveAt(Index)) {
2459      vrm.addEmergencySpill(SpillReg, MI);
2460      unsigned StartIdx = getLoadIndex(Index);
2461      unsigned EndIdx = getStoreIndex(Index)+1;
2462      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2463        pli.removeRange(StartIdx, EndIdx);
2464        Cut = true;
2465      } else {
2466        std::string msg;
2467        raw_string_ostream Msg(msg);
2468        Msg << "Ran out of registers during register allocation!";
2469        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2470          Msg << "\nPlease check your inline asm statement for invalid "
2471               << "constraints:\n";
2472          MI->print(Msg, tm_);
2473        }
2474        llvm_report_error(Msg.str());
2475      }
2476      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2477        if (!hasInterval(*AS))
2478          continue;
2479        LiveInterval &spli = getInterval(*AS);
2480        if (spli.liveAt(Index))
2481          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2482      }
2483    }
2484  }
2485  return Cut;
2486}
2487
2488LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2489                                                  MachineInstr* startInst) {
2490  LiveInterval& Interval = getOrCreateInterval(reg);
2491  VNInfo* VN = Interval.getNextValue(
2492            getInstructionIndex(startInst) + InstrSlots::DEF,
2493            startInst, true, getVNInfoAllocator());
2494  VN->setHasPHIKill(true);
2495  VN->kills.push_back(
2496    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2497  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2498               getMBBEndIdx(startInst->getParent()) + 1, VN);
2499  Interval.addRange(LR);
2500
2501  return LR;
2502}
2503