LiveIntervalAnalysis.cpp revision 397f4560780d34da0bd1e4c9b9101c6f0774e8ff
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/DepthFirstIterator.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/ADT/STLExtras.h"
43#include <algorithm>
44#include <limits>
45#include <cmath>
46using namespace llvm;
47
48// Hidden options for help debugging.
49static cl::opt<bool> DisableReMat("disable-rematerialization",
50                                  cl::init(false), cl::Hidden);
51
52static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53                               cl::init(true), cl::Hidden);
54static cl::opt<int> SplitLimit("split-limit",
55                               cl::init(-1), cl::Hidden);
56
57static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
58
59static cl::opt<bool> EnableFastSpilling("fast-spill",
60                                        cl::init(false), cl::Hidden);
61
62STATISTIC(numIntervals, "Number of original intervals");
63STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits   , "Number of intervals split");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.setPreservesCFG();
71  AU.addRequired<AliasAnalysis>();
72  AU.addPreserved<AliasAnalysis>();
73  AU.addPreserved<LiveVariables>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreservedID(MachineLoopInfoID);
76  AU.addPreservedID(MachineDominatorsID);
77
78  if (!StrongPHIElim) {
79    AU.addPreservedID(PHIEliminationID);
80    AU.addRequiredID(PHIEliminationID);
81  }
82
83  AU.addRequiredID(TwoAddressInstructionPassID);
84  MachineFunctionPass::getAnalysisUsage(AU);
85}
86
87void LiveIntervals::releaseMemory() {
88  // Free the live intervals themselves.
89  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90       E = r2iMap_.end(); I != E; ++I)
91    delete I->second;
92
93  MBB2IdxMap.clear();
94  Idx2MBBMap.clear();
95  mi2iMap_.clear();
96  i2miMap_.clear();
97  r2iMap_.clear();
98  terminatorGaps.clear();
99
100  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101  VNInfoAllocator.Reset();
102  while (!ClonedMIs.empty()) {
103    MachineInstr *MI = ClonedMIs.back();
104    ClonedMIs.pop_back();
105    mf_->DeleteMachineInstr(MI);
106  }
107}
108
109static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110                                   const TargetInstrInfo *tii_) {
111  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112  if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
113      Reg == SrcReg)
114    return true;
115
116  if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117       MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118      MI->getOperand(2).getReg() == Reg)
119    return true;
120  if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121      MI->getOperand(1).getReg() == Reg)
122    return true;
123  return false;
124}
125
126/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127/// there is one implicit_def for each use. Add isUndef marker to
128/// implicit_def defs and their uses.
129void LiveIntervals::processImplicitDefs() {
130  SmallSet<unsigned, 8> ImpDefRegs;
131  SmallVector<MachineInstr*, 8> ImpDefMIs;
132  MachineBasicBlock *Entry = mf_->begin();
133  SmallPtrSet<MachineBasicBlock*,16> Visited;
134  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136       DFI != E; ++DFI) {
137    MachineBasicBlock *MBB = *DFI;
138    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139         I != E; ) {
140      MachineInstr *MI = &*I;
141      ++I;
142      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143        unsigned Reg = MI->getOperand(0).getReg();
144        ImpDefRegs.insert(Reg);
145        ImpDefMIs.push_back(MI);
146        continue;
147      }
148
149      bool ChangedToImpDef = false;
150      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151        MachineOperand& MO = MI->getOperand(i);
152        if (!MO.isReg() || !MO.isUse() || MO.isUndef())
153          continue;
154        unsigned Reg = MO.getReg();
155        if (!Reg)
156          continue;
157        if (!ImpDefRegs.count(Reg))
158          continue;
159        // Use is a copy, just turn it into an implicit_def.
160        if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161          bool isKill = MO.isKill();
162          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164            MI->RemoveOperand(j);
165          if (isKill)
166            ImpDefRegs.erase(Reg);
167          ChangedToImpDef = true;
168          break;
169        }
170
171        MO.setIsUndef();
172        if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173          // Make sure other uses of
174          for (unsigned j = i+1; j != e; ++j) {
175            MachineOperand &MOJ = MI->getOperand(j);
176            if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
177              MOJ.setIsUndef();
178          }
179          ImpDefRegs.erase(Reg);
180        }
181      }
182
183      if (ChangedToImpDef) {
184        // Backtrack to process this new implicit_def.
185        --I;
186      } else {
187        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188          MachineOperand& MO = MI->getOperand(i);
189          if (!MO.isReg() || !MO.isDef())
190            continue;
191          ImpDefRegs.erase(MO.getReg());
192        }
193      }
194    }
195
196    // Any outstanding liveout implicit_def's?
197    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198      MachineInstr *MI = ImpDefMIs[i];
199      unsigned Reg = MI->getOperand(0).getReg();
200      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201          !ImpDefRegs.count(Reg)) {
202        // Delete all "local" implicit_def's. That include those which define
203        // physical registers since they cannot be liveout.
204        MI->eraseFromParent();
205        continue;
206      }
207
208      // If there are multiple defs of the same register and at least one
209      // is not an implicit_def, do not insert implicit_def's before the
210      // uses.
211      bool Skip = false;
212      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213             DE = mri_->def_end(); DI != DE; ++DI) {
214        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
215          Skip = true;
216          break;
217        }
218      }
219      if (Skip)
220        continue;
221
222      // The only implicit_def which we want to keep are those that are live
223      // out of its block.
224      MI->eraseFromParent();
225
226      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227             UE = mri_->use_end(); UI != UE; ) {
228        MachineOperand &RMO = UI.getOperand();
229        MachineInstr *RMI = &*UI;
230        ++UI;
231        MachineBasicBlock *RMBB = RMI->getParent();
232        if (RMBB == MBB)
233          continue;
234
235        // Turn a copy use into an implicit_def.
236        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237        if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
238            Reg == SrcReg) {
239          RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240          for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241            RMI->RemoveOperand(j);
242          continue;
243        }
244
245        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246        unsigned NewVReg = mri_->createVirtualRegister(RC);
247        RMO.setReg(NewVReg);
248        RMO.setIsUndef();
249        RMO.setIsKill();
250      }
251    }
252    ImpDefRegs.clear();
253    ImpDefMIs.clear();
254  }
255}
256
257void LiveIntervals::computeNumbering() {
258  Index2MiMap OldI2MI = i2miMap_;
259  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
260
261  Idx2MBBMap.clear();
262  MBB2IdxMap.clear();
263  mi2iMap_.clear();
264  i2miMap_.clear();
265  terminatorGaps.clear();
266
267  FunctionSize = 0;
268
269  // Number MachineInstrs and MachineBasicBlocks.
270  // Initialize MBB indexes to a sentinal.
271  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
272
273  unsigned MIIndex = 0;
274  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
275       MBB != E; ++MBB) {
276    unsigned StartIdx = MIIndex;
277
278    // Insert an empty slot at the beginning of each block.
279    MIIndex += InstrSlots::NUM;
280    i2miMap_.push_back(0);
281
282    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
283         I != E; ++I) {
284
285      if (I == MBB->getFirstTerminator()) {
286        // Leave a gap for before terminators, this is where we will point
287        // PHI kills.
288        bool inserted =
289          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
290        assert(inserted &&
291               "Multiple 'first' terminators encountered during numbering.");
292        inserted = inserted; // Avoid compiler warning if assertions turned off.
293        i2miMap_.push_back(0);
294
295        MIIndex += InstrSlots::NUM;
296      }
297
298      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
299      assert(inserted && "multiple MachineInstr -> index mappings");
300      inserted = true;
301      i2miMap_.push_back(I);
302      MIIndex += InstrSlots::NUM;
303      FunctionSize++;
304
305      // Insert max(1, numdefs) empty slots after every instruction.
306      unsigned Slots = I->getDesc().getNumDefs();
307      if (Slots == 0)
308        Slots = 1;
309      MIIndex += InstrSlots::NUM * Slots;
310      while (Slots--)
311        i2miMap_.push_back(0);
312    }
313
314    if (MBB->getFirstTerminator() == MBB->end()) {
315      // Leave a gap for before terminators, this is where we will point
316      // PHI kills.
317      bool inserted =
318        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
319      assert(inserted &&
320             "Multiple 'first' terminators encountered during numbering.");
321      inserted = inserted; // Avoid compiler warning if assertions turned off.
322      i2miMap_.push_back(0);
323
324      MIIndex += InstrSlots::NUM;
325    }
326
327    // Set the MBB2IdxMap entry for this MBB.
328    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
329    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
330  }
331
332  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
333
334  if (!OldI2MI.empty())
335    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
336      for (LiveInterval::iterator LI = OI->second->begin(),
337           LE = OI->second->end(); LI != LE; ++LI) {
338
339        // Remap the start index of the live range to the corresponding new
340        // number, or our best guess at what it _should_ correspond to if the
341        // original instruction has been erased.  This is either the following
342        // instruction or its predecessor.
343        unsigned index = LI->start / InstrSlots::NUM;
344        unsigned offset = LI->start % InstrSlots::NUM;
345        if (offset == InstrSlots::LOAD) {
346          std::vector<IdxMBBPair>::const_iterator I =
347                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
348          // Take the pair containing the index
349          std::vector<IdxMBBPair>::const_iterator J =
350                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
351
352          LI->start = getMBBStartIdx(J->second);
353        } else {
354          LI->start = mi2iMap_[OldI2MI[index]] + offset;
355        }
356
357        // Remap the ending index in the same way that we remapped the start,
358        // except for the final step where we always map to the immediately
359        // following instruction.
360        index = (LI->end - 1) / InstrSlots::NUM;
361        offset  = LI->end % InstrSlots::NUM;
362        if (offset == InstrSlots::LOAD) {
363          // VReg dies at end of block.
364          std::vector<IdxMBBPair>::const_iterator I =
365                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
366          --I;
367
368          LI->end = getMBBEndIdx(I->second) + 1;
369        } else {
370          unsigned idx = index;
371          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
372
373          if (index != OldI2MI.size())
374            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
375          else
376            LI->end = InstrSlots::NUM * i2miMap_.size();
377        }
378      }
379
380      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
381           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
382        VNInfo* vni = *VNI;
383
384        // Remap the VNInfo def index, which works the same as the
385        // start indices above. VN's with special sentinel defs
386        // don't need to be remapped.
387        if (vni->isDefAccurate() && !vni->isUnused()) {
388          unsigned index = vni->def / InstrSlots::NUM;
389          unsigned offset = vni->def % InstrSlots::NUM;
390          if (offset == InstrSlots::LOAD) {
391            std::vector<IdxMBBPair>::const_iterator I =
392                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
393            // Take the pair containing the index
394            std::vector<IdxMBBPair>::const_iterator J =
395                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
396
397            vni->def = getMBBStartIdx(J->second);
398          } else {
399            vni->def = mi2iMap_[OldI2MI[index]] + offset;
400          }
401        }
402
403        // Remap the VNInfo kill indices, which works the same as
404        // the end indices above.
405        for (size_t i = 0; i < vni->kills.size(); ++i) {
406          unsigned killIdx = vni->kills[i].killIdx;
407
408          unsigned index = (killIdx - 1) / InstrSlots::NUM;
409          unsigned offset = killIdx % InstrSlots::NUM;
410
411          if (offset == InstrSlots::LOAD) {
412            assert("Value killed at a load slot.");
413            /*std::vector<IdxMBBPair>::const_iterator I =
414             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
415            --I;
416
417            vni->kills[i] = getMBBEndIdx(I->second);*/
418          } else {
419            if (vni->kills[i].isPHIKill) {
420              std::vector<IdxMBBPair>::const_iterator I =
421                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
422              --I;
423              vni->kills[i].killIdx = terminatorGaps[I->second];
424            } else {
425              assert(OldI2MI[index] != 0 &&
426                     "Kill refers to instruction not present in index maps.");
427              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
428            }
429
430            /*
431            unsigned idx = index;
432            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
433
434            if (index != OldI2MI.size())
435              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
436                              (idx == index ? offset : 0);
437            else
438              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
439            */
440          }
441        }
442      }
443    }
444}
445
446void LiveIntervals::scaleNumbering(int factor) {
447  // Need to
448  //  * scale MBB begin and end points
449  //  * scale all ranges.
450  //  * Update VNI structures.
451  //  * Scale instruction numberings
452
453  // Scale the MBB indices.
454  Idx2MBBMap.clear();
455  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
456       MBB != MBBE; ++MBB) {
457    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
458    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
459    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
460    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
461  }
462  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
463
464  // Scale terminator gaps.
465  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
466       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
467       TGI != TGE; ++TGI) {
468    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
469  }
470
471  // Scale the intervals.
472  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
473    LI->second->scaleNumbering(factor);
474  }
475
476  // Scale MachineInstrs.
477  Mi2IndexMap oldmi2iMap = mi2iMap_;
478  unsigned highestSlot = 0;
479  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
480       MI != ME; ++MI) {
481    unsigned newSlot = InstrSlots::scale(MI->second, factor);
482    mi2iMap_[MI->first] = newSlot;
483    highestSlot = std::max(highestSlot, newSlot);
484  }
485
486  i2miMap_.clear();
487  i2miMap_.resize(highestSlot + 1);
488  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
489       MI != ME; ++MI) {
490    i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
491  }
492
493}
494
495
496/// runOnMachineFunction - Register allocate the whole function
497///
498bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
499  mf_ = &fn;
500  mri_ = &mf_->getRegInfo();
501  tm_ = &fn.getTarget();
502  tri_ = tm_->getRegisterInfo();
503  tii_ = tm_->getInstrInfo();
504  aa_ = &getAnalysis<AliasAnalysis>();
505  lv_ = &getAnalysis<LiveVariables>();
506  allocatableRegs_ = tri_->getAllocatableSet(fn);
507
508  processImplicitDefs();
509  computeNumbering();
510  computeIntervals();
511
512  numIntervals += getNumIntervals();
513
514  DEBUG(dump());
515  return true;
516}
517
518/// print - Implement the dump method.
519void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
520  OS << "********** INTERVALS **********\n";
521  for (const_iterator I = begin(), E = end(); I != E; ++I) {
522    I->second->print(OS, tri_);
523    OS << "\n";
524  }
525
526  OS << "********** MACHINEINSTRS **********\n";
527
528  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
529       mbbi != mbbe; ++mbbi) {
530    OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
531    for (MachineBasicBlock::iterator mii = mbbi->begin(),
532           mie = mbbi->end(); mii != mie; ++mii) {
533      OS << getInstructionIndex(mii) << '\t' << *mii;
534    }
535  }
536}
537
538/// conflictsWithPhysRegDef - Returns true if the specified register
539/// is defined during the duration of the specified interval.
540bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
541                                            VirtRegMap &vrm, unsigned reg) {
542  for (LiveInterval::Ranges::const_iterator
543         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
544    for (unsigned index = getBaseIndex(I->start),
545           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
546         index += InstrSlots::NUM) {
547      // skip deleted instructions
548      while (index != end && !getInstructionFromIndex(index))
549        index += InstrSlots::NUM;
550      if (index == end) break;
551
552      MachineInstr *MI = getInstructionFromIndex(index);
553      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
554      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
555        if (SrcReg == li.reg || DstReg == li.reg)
556          continue;
557      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
558        MachineOperand& mop = MI->getOperand(i);
559        if (!mop.isReg())
560          continue;
561        unsigned PhysReg = mop.getReg();
562        if (PhysReg == 0 || PhysReg == li.reg)
563          continue;
564        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
565          if (!vrm.hasPhys(PhysReg))
566            continue;
567          PhysReg = vrm.getPhys(PhysReg);
568        }
569        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
570          return true;
571      }
572    }
573  }
574
575  return false;
576}
577
578/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
579/// it can check use as well.
580bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
581                                            unsigned Reg, bool CheckUse,
582                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
583  for (LiveInterval::Ranges::const_iterator
584         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
585    for (unsigned index = getBaseIndex(I->start),
586           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
587         index += InstrSlots::NUM) {
588      // Skip deleted instructions.
589      MachineInstr *MI = 0;
590      while (index != end) {
591        MI = getInstructionFromIndex(index);
592        if (MI)
593          break;
594        index += InstrSlots::NUM;
595      }
596      if (index == end) break;
597
598      if (JoinedCopies.count(MI))
599        continue;
600      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
601        MachineOperand& MO = MI->getOperand(i);
602        if (!MO.isReg())
603          continue;
604        if (MO.isUse() && !CheckUse)
605          continue;
606        unsigned PhysReg = MO.getReg();
607        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
608          continue;
609        if (tri_->isSubRegister(Reg, PhysReg))
610          return true;
611      }
612    }
613  }
614
615  return false;
616}
617
618
619void LiveIntervals::printRegName(unsigned reg) const {
620  if (TargetRegisterInfo::isPhysicalRegister(reg))
621    errs() << tri_->getName(reg);
622  else
623    errs() << "%reg" << reg;
624}
625
626void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
627                                             MachineBasicBlock::iterator mi,
628                                             unsigned MIIdx, MachineOperand& MO,
629                                             unsigned MOIdx,
630                                             LiveInterval &interval) {
631  DEBUG({
632      errs() << "\t\tregister: ";
633      printRegName(interval.reg);
634    });
635
636  // Virtual registers may be defined multiple times (due to phi
637  // elimination and 2-addr elimination).  Much of what we do only has to be
638  // done once for the vreg.  We use an empty interval to detect the first
639  // time we see a vreg.
640  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
641  if (interval.empty()) {
642    // Get the Idx of the defining instructions.
643    unsigned defIndex = getDefIndex(MIIdx);
644    // Earlyclobbers move back one.
645    if (MO.isEarlyClobber())
646      defIndex = getUseIndex(MIIdx);
647    VNInfo *ValNo;
648    MachineInstr *CopyMI = NULL;
649    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
650    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
651        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
652        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
653        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
654      CopyMI = mi;
655    // Earlyclobbers move back one.
656    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
657
658    assert(ValNo->id == 0 && "First value in interval is not 0?");
659
660    // Loop over all of the blocks that the vreg is defined in.  There are
661    // two cases we have to handle here.  The most common case is a vreg
662    // whose lifetime is contained within a basic block.  In this case there
663    // will be a single kill, in MBB, which comes after the definition.
664    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
665      // FIXME: what about dead vars?
666      unsigned killIdx;
667      if (vi.Kills[0] != mi)
668        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
669      else
670        killIdx = defIndex+1;
671
672      // If the kill happens after the definition, we have an intra-block
673      // live range.
674      if (killIdx > defIndex) {
675        assert(vi.AliveBlocks.empty() &&
676               "Shouldn't be alive across any blocks!");
677        LiveRange LR(defIndex, killIdx, ValNo);
678        interval.addRange(LR);
679        DEBUG(errs() << " +" << LR << "\n");
680        interval.addKill(ValNo, killIdx, false);
681        return;
682      }
683    }
684
685    // The other case we handle is when a virtual register lives to the end
686    // of the defining block, potentially live across some blocks, then is
687    // live into some number of blocks, but gets killed.  Start by adding a
688    // range that goes from this definition to the end of the defining block.
689    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
690    DEBUG(errs() << " +" << NewLR);
691    interval.addRange(NewLR);
692
693    // Iterate over all of the blocks that the variable is completely
694    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
695    // live interval.
696    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
697             E = vi.AliveBlocks.end(); I != E; ++I) {
698      LiveRange LR(getMBBStartIdx(*I),
699                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
700                   ValNo);
701      interval.addRange(LR);
702      DEBUG(errs() << " +" << LR);
703    }
704
705    // Finally, this virtual register is live from the start of any killing
706    // block to the 'use' slot of the killing instruction.
707    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
708      MachineInstr *Kill = vi.Kills[i];
709      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
710      LiveRange LR(getMBBStartIdx(Kill->getParent()),
711                   killIdx, ValNo);
712      interval.addRange(LR);
713      interval.addKill(ValNo, killIdx, false);
714      DEBUG(errs() << " +" << LR);
715    }
716
717  } else {
718    // If this is the second time we see a virtual register definition, it
719    // must be due to phi elimination or two addr elimination.  If this is
720    // the result of two address elimination, then the vreg is one of the
721    // def-and-use register operand.
722    if (mi->isRegTiedToUseOperand(MOIdx)) {
723      // If this is a two-address definition, then we have already processed
724      // the live range.  The only problem is that we didn't realize there
725      // are actually two values in the live interval.  Because of this we
726      // need to take the LiveRegion that defines this register and split it
727      // into two values.
728      assert(interval.containsOneValue());
729      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
730      unsigned RedefIndex = getDefIndex(MIIdx);
731      if (MO.isEarlyClobber())
732        RedefIndex = getUseIndex(MIIdx);
733
734      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
735      VNInfo *OldValNo = OldLR->valno;
736
737      // Delete the initial value, which should be short and continuous,
738      // because the 2-addr copy must be in the same MBB as the redef.
739      interval.removeRange(DefIndex, RedefIndex);
740
741      // Two-address vregs should always only be redefined once.  This means
742      // that at this point, there should be exactly one value number in it.
743      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
744
745      // The new value number (#1) is defined by the instruction we claimed
746      // defined value #0.
747      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
748                                            false, // update at *
749                                            VNInfoAllocator);
750      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
751
752      // Value#0 is now defined by the 2-addr instruction.
753      OldValNo->def  = RedefIndex;
754      OldValNo->setCopy(0);
755      if (MO.isEarlyClobber())
756        OldValNo->setHasRedefByEC(true);
757
758      // Add the new live interval which replaces the range for the input copy.
759      LiveRange LR(DefIndex, RedefIndex, ValNo);
760      DEBUG(errs() << " replace range with " << LR);
761      interval.addRange(LR);
762      interval.addKill(ValNo, RedefIndex, false);
763
764      // If this redefinition is dead, we need to add a dummy unit live
765      // range covering the def slot.
766      if (MO.isDead())
767        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
768
769      DEBUG({
770          errs() << " RESULT: ";
771          interval.print(errs(), tri_);
772        });
773    } else {
774      // Otherwise, this must be because of phi elimination.  If this is the
775      // first redefinition of the vreg that we have seen, go back and change
776      // the live range in the PHI block to be a different value number.
777      if (interval.containsOneValue()) {
778        assert(vi.Kills.size() == 1 &&
779               "PHI elimination vreg should have one kill, the PHI itself!");
780
781        // Remove the old range that we now know has an incorrect number.
782        VNInfo *VNI = interval.getValNumInfo(0);
783        MachineInstr *Killer = vi.Kills[0];
784        unsigned Start = getMBBStartIdx(Killer->getParent());
785        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
786        DEBUG({
787            errs() << " Removing [" << Start << "," << End << "] from: ";
788            interval.print(errs(), tri_);
789            errs() << "\n";
790          });
791        interval.removeRange(Start, End);
792        assert(interval.ranges.size() == 1 &&
793               "newly discovered PHI interval has >1 ranges.");
794        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
795        interval.addKill(VNI, terminatorGaps[killMBB], true);
796        VNI->setHasPHIKill(true);
797        DEBUG({
798            errs() << " RESULT: ";
799            interval.print(errs(), tri_);
800          });
801
802        // Replace the interval with one of a NEW value number.  Note that this
803        // value number isn't actually defined by an instruction, weird huh? :)
804        LiveRange LR(Start, End,
805          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
806        LR.valno->setIsPHIDef(true);
807        DEBUG(errs() << " replace range with " << LR);
808        interval.addRange(LR);
809        interval.addKill(LR.valno, End, false);
810        DEBUG({
811            errs() << " RESULT: ";
812            interval.print(errs(), tri_);
813          });
814      }
815
816      // In the case of PHI elimination, each variable definition is only
817      // live until the end of the block.  We've already taken care of the
818      // rest of the live range.
819      unsigned defIndex = getDefIndex(MIIdx);
820      if (MO.isEarlyClobber())
821        defIndex = getUseIndex(MIIdx);
822
823      VNInfo *ValNo;
824      MachineInstr *CopyMI = NULL;
825      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
826      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
827          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
828          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
829          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
830        CopyMI = mi;
831      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
832
833      unsigned killIndex = getMBBEndIdx(mbb) + 1;
834      LiveRange LR(defIndex, killIndex, ValNo);
835      interval.addRange(LR);
836      interval.addKill(ValNo, terminatorGaps[mbb], true);
837      ValNo->setHasPHIKill(true);
838      DEBUG(errs() << " +" << LR);
839    }
840  }
841
842  DEBUG(errs() << '\n');
843}
844
845void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
846                                              MachineBasicBlock::iterator mi,
847                                              unsigned MIIdx,
848                                              MachineOperand& MO,
849                                              LiveInterval &interval,
850                                              MachineInstr *CopyMI) {
851  // A physical register cannot be live across basic block, so its
852  // lifetime must end somewhere in its defining basic block.
853  DEBUG({
854      errs() << "\t\tregister: ";
855      printRegName(interval.reg);
856    });
857
858  unsigned baseIndex = MIIdx;
859  unsigned start = getDefIndex(baseIndex);
860  // Earlyclobbers move back one.
861  if (MO.isEarlyClobber())
862    start = getUseIndex(MIIdx);
863  unsigned end = start;
864
865  // If it is not used after definition, it is considered dead at
866  // the instruction defining it. Hence its interval is:
867  // [defSlot(def), defSlot(def)+1)
868  if (MO.isDead()) {
869    DEBUG(errs() << " dead");
870    end = start + 1;
871    goto exit;
872  }
873
874  // If it is not dead on definition, it must be killed by a
875  // subsequent instruction. Hence its interval is:
876  // [defSlot(def), useSlot(kill)+1)
877  baseIndex += InstrSlots::NUM;
878  while (++mi != MBB->end()) {
879    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
880           getInstructionFromIndex(baseIndex) == 0)
881      baseIndex += InstrSlots::NUM;
882    if (mi->killsRegister(interval.reg, tri_)) {
883      DEBUG(errs() << " killed");
884      end = getUseIndex(baseIndex) + 1;
885      goto exit;
886    } else {
887      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
888      if (DefIdx != -1) {
889        if (mi->isRegTiedToUseOperand(DefIdx)) {
890          // Two-address instruction.
891          end = getDefIndex(baseIndex);
892          if (mi->getOperand(DefIdx).isEarlyClobber())
893            end = getUseIndex(baseIndex);
894        } else {
895          // Another instruction redefines the register before it is ever read.
896          // Then the register is essentially dead at the instruction that defines
897          // it. Hence its interval is:
898          // [defSlot(def), defSlot(def)+1)
899          DEBUG(errs() << " dead");
900          end = start + 1;
901        }
902        goto exit;
903      }
904    }
905
906    baseIndex += InstrSlots::NUM;
907  }
908
909  // The only case we should have a dead physreg here without a killing or
910  // instruction where we know it's dead is if it is live-in to the function
911  // and never used. Another possible case is the implicit use of the
912  // physical register has been deleted by two-address pass.
913  end = start + 1;
914
915exit:
916  assert(start < end && "did not find end of interval?");
917
918  // Already exists? Extend old live interval.
919  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
920  bool Extend = OldLR != interval.end();
921  VNInfo *ValNo = Extend
922    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
923  if (MO.isEarlyClobber() && Extend)
924    ValNo->setHasRedefByEC(true);
925  LiveRange LR(start, end, ValNo);
926  interval.addRange(LR);
927  interval.addKill(LR.valno, end, false);
928  DEBUG(errs() << " +" << LR << '\n');
929}
930
931void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
932                                      MachineBasicBlock::iterator MI,
933                                      unsigned MIIdx,
934                                      MachineOperand& MO,
935                                      unsigned MOIdx) {
936  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
937    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
938                             getOrCreateInterval(MO.getReg()));
939  else if (allocatableRegs_[MO.getReg()]) {
940    MachineInstr *CopyMI = NULL;
941    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
942    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
943        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
944        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
945        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
946      CopyMI = MI;
947    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
948                              getOrCreateInterval(MO.getReg()), CopyMI);
949    // Def of a register also defines its sub-registers.
950    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
951      // If MI also modifies the sub-register explicitly, avoid processing it
952      // more than once. Do not pass in TRI here so it checks for exact match.
953      if (!MI->modifiesRegister(*AS))
954        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
955                                  getOrCreateInterval(*AS), 0);
956  }
957}
958
959void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
960                                         unsigned MIIdx,
961                                         LiveInterval &interval, bool isAlias) {
962  DEBUG({
963      errs() << "\t\tlivein register: ";
964      printRegName(interval.reg);
965    });
966
967  // Look for kills, if it reaches a def before it's killed, then it shouldn't
968  // be considered a livein.
969  MachineBasicBlock::iterator mi = MBB->begin();
970  unsigned baseIndex = MIIdx;
971  unsigned start = baseIndex;
972  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
973         getInstructionFromIndex(baseIndex) == 0)
974    baseIndex += InstrSlots::NUM;
975  unsigned end = baseIndex;
976  bool SeenDefUse = false;
977
978  while (mi != MBB->end()) {
979    if (mi->killsRegister(interval.reg, tri_)) {
980      DEBUG(errs() << " killed");
981      end = getUseIndex(baseIndex) + 1;
982      SeenDefUse = true;
983      break;
984    } else if (mi->modifiesRegister(interval.reg, tri_)) {
985      // Another instruction redefines the register before it is ever read.
986      // Then the register is essentially dead at the instruction that defines
987      // it. Hence its interval is:
988      // [defSlot(def), defSlot(def)+1)
989      DEBUG(errs() << " dead");
990      end = getDefIndex(start) + 1;
991      SeenDefUse = true;
992      break;
993    }
994
995    baseIndex += InstrSlots::NUM;
996    ++mi;
997    if (mi != MBB->end()) {
998      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
999             getInstructionFromIndex(baseIndex) == 0)
1000        baseIndex += InstrSlots::NUM;
1001    }
1002  }
1003
1004  // Live-in register might not be used at all.
1005  if (!SeenDefUse) {
1006    if (isAlias) {
1007      DEBUG(errs() << " dead");
1008      end = getDefIndex(MIIdx) + 1;
1009    } else {
1010      DEBUG(errs() << " live through");
1011      end = baseIndex;
1012    }
1013  }
1014
1015  VNInfo *vni =
1016    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
1017  vni->setIsPHIDef(true);
1018  LiveRange LR(start, end, vni);
1019
1020  interval.addRange(LR);
1021  interval.addKill(LR.valno, end, false);
1022  DEBUG(errs() << " +" << LR << '\n');
1023}
1024
1025/// computeIntervals - computes the live intervals for virtual
1026/// registers. for some ordering of the machine instructions [1,N] a
1027/// live interval is an interval [i, j) where 1 <= i <= j < N for
1028/// which a variable is live
1029void LiveIntervals::computeIntervals() {
1030  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1031               << "********** Function: "
1032               << ((Value*)mf_->getFunction())->getName() << '\n');
1033
1034  SmallVector<unsigned, 8> UndefUses;
1035  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1036       MBBI != E; ++MBBI) {
1037    MachineBasicBlock *MBB = MBBI;
1038    // Track the index of the current machine instr.
1039    unsigned MIIndex = getMBBStartIdx(MBB);
1040    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1041
1042    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1043
1044    // Create intervals for live-ins to this BB first.
1045    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1046           LE = MBB->livein_end(); LI != LE; ++LI) {
1047      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1048      // Multiple live-ins can alias the same register.
1049      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1050        if (!hasInterval(*AS))
1051          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1052                               true);
1053    }
1054
1055    // Skip over empty initial indices.
1056    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1057           getInstructionFromIndex(MIIndex) == 0)
1058      MIIndex += InstrSlots::NUM;
1059
1060    for (; MI != miEnd; ++MI) {
1061      DEBUG(errs() << MIIndex << "\t" << *MI);
1062
1063      // Handle defs.
1064      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1065        MachineOperand &MO = MI->getOperand(i);
1066        if (!MO.isReg() || !MO.getReg())
1067          continue;
1068
1069        // handle register defs - build intervals
1070        if (MO.isDef())
1071          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1072        else if (MO.isUndef())
1073          UndefUses.push_back(MO.getReg());
1074      }
1075
1076      // Skip over the empty slots after each instruction.
1077      unsigned Slots = MI->getDesc().getNumDefs();
1078      if (Slots == 0)
1079        Slots = 1;
1080      MIIndex += InstrSlots::NUM * Slots;
1081
1082      // Skip over empty indices.
1083      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1084             getInstructionFromIndex(MIIndex) == 0)
1085        MIIndex += InstrSlots::NUM;
1086    }
1087  }
1088
1089  // Create empty intervals for registers defined by implicit_def's (except
1090  // for those implicit_def that define values which are liveout of their
1091  // blocks.
1092  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1093    unsigned UndefReg = UndefUses[i];
1094    (void)getOrCreateInterval(UndefReg);
1095  }
1096}
1097
1098bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1099                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1100  std::vector<IdxMBBPair>::const_iterator I =
1101    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1102
1103  bool ResVal = false;
1104  while (I != Idx2MBBMap.end()) {
1105    if (I->first >= End)
1106      break;
1107    MBBs.push_back(I->second);
1108    ResVal = true;
1109    ++I;
1110  }
1111  return ResVal;
1112}
1113
1114bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1115                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1116  std::vector<IdxMBBPair>::const_iterator I =
1117    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1118
1119  bool ResVal = false;
1120  while (I != Idx2MBBMap.end()) {
1121    if (I->first > End)
1122      break;
1123    MachineBasicBlock *MBB = I->second;
1124    if (getMBBEndIdx(MBB) > End)
1125      break;
1126    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1127           SE = MBB->succ_end(); SI != SE; ++SI)
1128      MBBs.push_back(*SI);
1129    ResVal = true;
1130    ++I;
1131  }
1132  return ResVal;
1133}
1134
1135LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1136  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1137  return new LiveInterval(reg, Weight);
1138}
1139
1140/// dupInterval - Duplicate a live interval. The caller is responsible for
1141/// managing the allocated memory.
1142LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1143  LiveInterval *NewLI = createInterval(li->reg);
1144  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1145  return NewLI;
1146}
1147
1148/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1149/// copy field and returns the source register that defines it.
1150unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1151  if (!VNI->getCopy())
1152    return 0;
1153
1154  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1155    // If it's extracting out of a physical register, return the sub-register.
1156    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1157    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1158      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1159    return Reg;
1160  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1161             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1162    return VNI->getCopy()->getOperand(2).getReg();
1163
1164  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1165  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1166    return SrcReg;
1167  llvm_unreachable("Unrecognized copy instruction!");
1168  return 0;
1169}
1170
1171//===----------------------------------------------------------------------===//
1172// Register allocator hooks.
1173//
1174
1175/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1176/// allow one) virtual register operand, then its uses are implicitly using
1177/// the register. Returns the virtual register.
1178unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1179                                            MachineInstr *MI) const {
1180  unsigned RegOp = 0;
1181  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1182    MachineOperand &MO = MI->getOperand(i);
1183    if (!MO.isReg() || !MO.isUse())
1184      continue;
1185    unsigned Reg = MO.getReg();
1186    if (Reg == 0 || Reg == li.reg)
1187      continue;
1188
1189    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1190        !allocatableRegs_[Reg])
1191      continue;
1192    // FIXME: For now, only remat MI with at most one register operand.
1193    assert(!RegOp &&
1194           "Can't rematerialize instruction with multiple register operand!");
1195    RegOp = MO.getReg();
1196#ifndef NDEBUG
1197    break;
1198#endif
1199  }
1200  return RegOp;
1201}
1202
1203/// isValNoAvailableAt - Return true if the val# of the specified interval
1204/// which reaches the given instruction also reaches the specified use index.
1205bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1206                                       unsigned UseIdx) const {
1207  unsigned Index = getInstructionIndex(MI);
1208  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1209  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1210  return UI != li.end() && UI->valno == ValNo;
1211}
1212
1213/// isReMaterializable - Returns true if the definition MI of the specified
1214/// val# of the specified interval is re-materializable.
1215bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1216                                       const VNInfo *ValNo, MachineInstr *MI,
1217                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1218                                       bool &isLoad) {
1219  if (DisableReMat)
1220    return false;
1221
1222  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1223    return true;
1224
1225  int FrameIdx = 0;
1226  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1227      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1228    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1229    // this but remember this is not safe to fold into a two-address
1230    // instruction.
1231    // This is a load from fixed stack slot. It can be rematerialized.
1232    return true;
1233
1234  // If the target-specific rules don't identify an instruction as
1235  // being trivially rematerializable, use some target-independent
1236  // rules.
1237  if (!MI->getDesc().isRematerializable() ||
1238      !tii_->isTriviallyReMaterializable(MI)) {
1239    if (!EnableAggressiveRemat)
1240      return false;
1241
1242    // If the instruction accesses memory but the memoperands have been lost,
1243    // we can't analyze it.
1244    const TargetInstrDesc &TID = MI->getDesc();
1245    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1246      return false;
1247
1248    // Avoid instructions obviously unsafe for remat.
1249    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1250      return false;
1251
1252    // If the instruction accesses memory and the memory could be non-constant,
1253    // assume the instruction is not rematerializable.
1254    for (std::list<MachineMemOperand>::const_iterator
1255           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1256      const MachineMemOperand &MMO = *I;
1257      if (MMO.isVolatile() || MMO.isStore())
1258        return false;
1259      const Value *V = MMO.getValue();
1260      if (!V)
1261        return false;
1262      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1263        if (!PSV->isConstant(mf_->getFrameInfo()))
1264          return false;
1265      } else if (!aa_->pointsToConstantMemory(V))
1266        return false;
1267    }
1268
1269    // If any of the registers accessed are non-constant, conservatively assume
1270    // the instruction is not rematerializable.
1271    unsigned ImpUse = 0;
1272    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1273      const MachineOperand &MO = MI->getOperand(i);
1274      if (MO.isReg()) {
1275        unsigned Reg = MO.getReg();
1276        if (Reg == 0)
1277          continue;
1278        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1279          return false;
1280
1281        // Only allow one def, and that in the first operand.
1282        if (MO.isDef() != (i == 0))
1283          return false;
1284
1285        // Only allow constant-valued registers.
1286        bool IsLiveIn = mri_->isLiveIn(Reg);
1287        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1288                                          E = mri_->def_end();
1289
1290        // For the def, it should be the only def of that register.
1291        if (MO.isDef() && (next(I) != E || IsLiveIn))
1292          return false;
1293
1294        if (MO.isUse()) {
1295          // Only allow one use other register use, as that's all the
1296          // remat mechanisms support currently.
1297          if (Reg != li.reg) {
1298            if (ImpUse == 0)
1299              ImpUse = Reg;
1300            else if (Reg != ImpUse)
1301              return false;
1302          }
1303          // For the use, there should be only one associated def.
1304          if (I != E && (next(I) != E || IsLiveIn))
1305            return false;
1306        }
1307      }
1308    }
1309  }
1310
1311  unsigned ImpUse = getReMatImplicitUse(li, MI);
1312  if (ImpUse) {
1313    const LiveInterval &ImpLi = getInterval(ImpUse);
1314    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1315           re = mri_->use_end(); ri != re; ++ri) {
1316      MachineInstr *UseMI = &*ri;
1317      unsigned UseIdx = getInstructionIndex(UseMI);
1318      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1319        continue;
1320      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1321        return false;
1322    }
1323
1324    // If a register operand of the re-materialized instruction is going to
1325    // be spilled next, then it's not legal to re-materialize this instruction.
1326    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1327      if (ImpUse == SpillIs[i]->reg)
1328        return false;
1329  }
1330  return true;
1331}
1332
1333/// isReMaterializable - Returns true if the definition MI of the specified
1334/// val# of the specified interval is re-materializable.
1335bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1336                                       const VNInfo *ValNo, MachineInstr *MI) {
1337  SmallVector<LiveInterval*, 4> Dummy1;
1338  bool Dummy2;
1339  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1340}
1341
1342/// isReMaterializable - Returns true if every definition of MI of every
1343/// val# of the specified interval is re-materializable.
1344bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1345                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1346                                       bool &isLoad) {
1347  isLoad = false;
1348  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1349       i != e; ++i) {
1350    const VNInfo *VNI = *i;
1351    if (VNI->isUnused())
1352      continue; // Dead val#.
1353    // Is the def for the val# rematerializable?
1354    if (!VNI->isDefAccurate())
1355      return false;
1356    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1357    bool DefIsLoad = false;
1358    if (!ReMatDefMI ||
1359        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1360      return false;
1361    isLoad |= DefIsLoad;
1362  }
1363  return true;
1364}
1365
1366/// FilterFoldedOps - Filter out two-address use operands. Return
1367/// true if it finds any issue with the operands that ought to prevent
1368/// folding.
1369static bool FilterFoldedOps(MachineInstr *MI,
1370                            SmallVector<unsigned, 2> &Ops,
1371                            unsigned &MRInfo,
1372                            SmallVector<unsigned, 2> &FoldOps) {
1373  MRInfo = 0;
1374  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1375    unsigned OpIdx = Ops[i];
1376    MachineOperand &MO = MI->getOperand(OpIdx);
1377    // FIXME: fold subreg use.
1378    if (MO.getSubReg())
1379      return true;
1380    if (MO.isDef())
1381      MRInfo |= (unsigned)VirtRegMap::isMod;
1382    else {
1383      // Filter out two-address use operand(s).
1384      if (MI->isRegTiedToDefOperand(OpIdx)) {
1385        MRInfo = VirtRegMap::isModRef;
1386        continue;
1387      }
1388      MRInfo |= (unsigned)VirtRegMap::isRef;
1389    }
1390    FoldOps.push_back(OpIdx);
1391  }
1392  return false;
1393}
1394
1395
1396/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1397/// slot / to reg or any rematerialized load into ith operand of specified
1398/// MI. If it is successul, MI is updated with the newly created MI and
1399/// returns true.
1400bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1401                                         VirtRegMap &vrm, MachineInstr *DefMI,
1402                                         unsigned InstrIdx,
1403                                         SmallVector<unsigned, 2> &Ops,
1404                                         bool isSS, int Slot, unsigned Reg) {
1405  // If it is an implicit def instruction, just delete it.
1406  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1407    RemoveMachineInstrFromMaps(MI);
1408    vrm.RemoveMachineInstrFromMaps(MI);
1409    MI->eraseFromParent();
1410    ++numFolds;
1411    return true;
1412  }
1413
1414  // Filter the list of operand indexes that are to be folded. Abort if
1415  // any operand will prevent folding.
1416  unsigned MRInfo = 0;
1417  SmallVector<unsigned, 2> FoldOps;
1418  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1419    return false;
1420
1421  // The only time it's safe to fold into a two address instruction is when
1422  // it's folding reload and spill from / into a spill stack slot.
1423  if (DefMI && (MRInfo & VirtRegMap::isMod))
1424    return false;
1425
1426  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1427                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1428  if (fmi) {
1429    // Remember this instruction uses the spill slot.
1430    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1431
1432    // Attempt to fold the memory reference into the instruction. If
1433    // we can do this, we don't need to insert spill code.
1434    MachineBasicBlock &MBB = *MI->getParent();
1435    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1436      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1437    vrm.transferSpillPts(MI, fmi);
1438    vrm.transferRestorePts(MI, fmi);
1439    vrm.transferEmergencySpills(MI, fmi);
1440    mi2iMap_.erase(MI);
1441    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1442    mi2iMap_[fmi] = InstrIdx;
1443    MI = MBB.insert(MBB.erase(MI), fmi);
1444    ++numFolds;
1445    return true;
1446  }
1447  return false;
1448}
1449
1450/// canFoldMemoryOperand - Returns true if the specified load / store
1451/// folding is possible.
1452bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1453                                         SmallVector<unsigned, 2> &Ops,
1454                                         bool ReMat) const {
1455  // Filter the list of operand indexes that are to be folded. Abort if
1456  // any operand will prevent folding.
1457  unsigned MRInfo = 0;
1458  SmallVector<unsigned, 2> FoldOps;
1459  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1460    return false;
1461
1462  // It's only legal to remat for a use, not a def.
1463  if (ReMat && (MRInfo & VirtRegMap::isMod))
1464    return false;
1465
1466  return tii_->canFoldMemoryOperand(MI, FoldOps);
1467}
1468
1469bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1470  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1471  for (LiveInterval::Ranges::const_iterator
1472         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1473    std::vector<IdxMBBPair>::const_iterator II =
1474      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1475    if (II == Idx2MBBMap.end())
1476      continue;
1477    if (I->end > II->first)  // crossing a MBB.
1478      return false;
1479    MBBs.insert(II->second);
1480    if (MBBs.size() > 1)
1481      return false;
1482  }
1483  return true;
1484}
1485
1486/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1487/// interval on to-be re-materialized operands of MI) with new register.
1488void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1489                                       MachineInstr *MI, unsigned NewVReg,
1490                                       VirtRegMap &vrm) {
1491  // There is an implicit use. That means one of the other operand is
1492  // being remat'ed and the remat'ed instruction has li.reg as an
1493  // use operand. Make sure we rewrite that as well.
1494  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1495    MachineOperand &MO = MI->getOperand(i);
1496    if (!MO.isReg())
1497      continue;
1498    unsigned Reg = MO.getReg();
1499    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1500      continue;
1501    if (!vrm.isReMaterialized(Reg))
1502      continue;
1503    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1504    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1505    if (UseMO)
1506      UseMO->setReg(NewVReg);
1507  }
1508}
1509
1510/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1511/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1512bool LiveIntervals::
1513rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1514                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1515                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1516                 unsigned Slot, int LdSlot,
1517                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1518                 VirtRegMap &vrm,
1519                 const TargetRegisterClass* rc,
1520                 SmallVector<int, 4> &ReMatIds,
1521                 const MachineLoopInfo *loopInfo,
1522                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1523                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1524                 std::vector<LiveInterval*> &NewLIs) {
1525  bool CanFold = false;
1526 RestartInstruction:
1527  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1528    MachineOperand& mop = MI->getOperand(i);
1529    if (!mop.isReg())
1530      continue;
1531    unsigned Reg = mop.getReg();
1532    unsigned RegI = Reg;
1533    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1534      continue;
1535    if (Reg != li.reg)
1536      continue;
1537
1538    bool TryFold = !DefIsReMat;
1539    bool FoldSS = true; // Default behavior unless it's a remat.
1540    int FoldSlot = Slot;
1541    if (DefIsReMat) {
1542      // If this is the rematerializable definition MI itself and
1543      // all of its uses are rematerialized, simply delete it.
1544      if (MI == ReMatOrigDefMI && CanDelete) {
1545        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1546                     << MI << '\n');
1547        RemoveMachineInstrFromMaps(MI);
1548        vrm.RemoveMachineInstrFromMaps(MI);
1549        MI->eraseFromParent();
1550        break;
1551      }
1552
1553      // If def for this use can't be rematerialized, then try folding.
1554      // If def is rematerializable and it's a load, also try folding.
1555      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1556      if (isLoad) {
1557        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1558        FoldSS = isLoadSS;
1559        FoldSlot = LdSlot;
1560      }
1561    }
1562
1563    // Scan all of the operands of this instruction rewriting operands
1564    // to use NewVReg instead of li.reg as appropriate.  We do this for
1565    // two reasons:
1566    //
1567    //   1. If the instr reads the same spilled vreg multiple times, we
1568    //      want to reuse the NewVReg.
1569    //   2. If the instr is a two-addr instruction, we are required to
1570    //      keep the src/dst regs pinned.
1571    //
1572    // Keep track of whether we replace a use and/or def so that we can
1573    // create the spill interval with the appropriate range.
1574
1575    HasUse = mop.isUse();
1576    HasDef = mop.isDef();
1577    SmallVector<unsigned, 2> Ops;
1578    Ops.push_back(i);
1579    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1580      const MachineOperand &MOj = MI->getOperand(j);
1581      if (!MOj.isReg())
1582        continue;
1583      unsigned RegJ = MOj.getReg();
1584      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1585        continue;
1586      if (RegJ == RegI) {
1587        Ops.push_back(j);
1588        if (!MOj.isUndef()) {
1589          HasUse |= MOj.isUse();
1590          HasDef |= MOj.isDef();
1591        }
1592      }
1593    }
1594
1595    // Create a new virtual register for the spill interval.
1596    // Create the new register now so we can map the fold instruction
1597    // to the new register so when it is unfolded we get the correct
1598    // answer.
1599    bool CreatedNewVReg = false;
1600    if (NewVReg == 0) {
1601      NewVReg = mri_->createVirtualRegister(rc);
1602      vrm.grow();
1603      CreatedNewVReg = true;
1604    }
1605
1606    if (!TryFold)
1607      CanFold = false;
1608    else {
1609      // Do not fold load / store here if we are splitting. We'll find an
1610      // optimal point to insert a load / store later.
1611      if (!TrySplit) {
1612        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1613                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1614          // Folding the load/store can completely change the instruction in
1615          // unpredictable ways, rescan it from the beginning.
1616
1617          if (FoldSS) {
1618            // We need to give the new vreg the same stack slot as the
1619            // spilled interval.
1620            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1621          }
1622
1623          HasUse = false;
1624          HasDef = false;
1625          CanFold = false;
1626          if (isNotInMIMap(MI))
1627            break;
1628          goto RestartInstruction;
1629        }
1630      } else {
1631        // We'll try to fold it later if it's profitable.
1632        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1633      }
1634    }
1635
1636    mop.setReg(NewVReg);
1637    if (mop.isImplicit())
1638      rewriteImplicitOps(li, MI, NewVReg, vrm);
1639
1640    // Reuse NewVReg for other reads.
1641    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1642      MachineOperand &mopj = MI->getOperand(Ops[j]);
1643      mopj.setReg(NewVReg);
1644      if (mopj.isImplicit())
1645        rewriteImplicitOps(li, MI, NewVReg, vrm);
1646    }
1647
1648    if (CreatedNewVReg) {
1649      if (DefIsReMat) {
1650        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1651        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1652          // Each valnum may have its own remat id.
1653          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1654        } else {
1655          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1656        }
1657        if (!CanDelete || (HasUse && HasDef)) {
1658          // If this is a two-addr instruction then its use operands are
1659          // rematerializable but its def is not. It should be assigned a
1660          // stack slot.
1661          vrm.assignVirt2StackSlot(NewVReg, Slot);
1662        }
1663      } else {
1664        vrm.assignVirt2StackSlot(NewVReg, Slot);
1665      }
1666    } else if (HasUse && HasDef &&
1667               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1668      // If this interval hasn't been assigned a stack slot (because earlier
1669      // def is a deleted remat def), do it now.
1670      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1671      vrm.assignVirt2StackSlot(NewVReg, Slot);
1672    }
1673
1674    // Re-matting an instruction with virtual register use. Add the
1675    // register as an implicit use on the use MI.
1676    if (DefIsReMat && ImpUse)
1677      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1678
1679    // Create a new register interval for this spill / remat.
1680    LiveInterval &nI = getOrCreateInterval(NewVReg);
1681    if (CreatedNewVReg) {
1682      NewLIs.push_back(&nI);
1683      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1684      if (TrySplit)
1685        vrm.setIsSplitFromReg(NewVReg, li.reg);
1686    }
1687
1688    if (HasUse) {
1689      if (CreatedNewVReg) {
1690        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1691                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1692        DEBUG(errs() << " +" << LR);
1693        nI.addRange(LR);
1694      } else {
1695        // Extend the split live interval to this def / use.
1696        unsigned End = getUseIndex(index)+1;
1697        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1698                     nI.getValNumInfo(nI.getNumValNums()-1));
1699        DEBUG(errs() << " +" << LR);
1700        nI.addRange(LR);
1701      }
1702    }
1703    if (HasDef) {
1704      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1705                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1706      DEBUG(errs() << " +" << LR);
1707      nI.addRange(LR);
1708    }
1709
1710    DEBUG({
1711        errs() << "\t\t\t\tAdded new interval: ";
1712        nI.print(errs(), tri_);
1713        errs() << '\n';
1714      });
1715  }
1716  return CanFold;
1717}
1718bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1719                                   const VNInfo *VNI,
1720                                   MachineBasicBlock *MBB, unsigned Idx) const {
1721  unsigned End = getMBBEndIdx(MBB);
1722  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1723    if (VNI->kills[j].isPHIKill)
1724      continue;
1725
1726    unsigned KillIdx = VNI->kills[j].killIdx;
1727    if (KillIdx > Idx && KillIdx < End)
1728      return true;
1729  }
1730  return false;
1731}
1732
1733/// RewriteInfo - Keep track of machine instrs that will be rewritten
1734/// during spilling.
1735namespace {
1736  struct RewriteInfo {
1737    unsigned Index;
1738    MachineInstr *MI;
1739    bool HasUse;
1740    bool HasDef;
1741    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1742      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1743  };
1744
1745  struct RewriteInfoCompare {
1746    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1747      return LHS.Index < RHS.Index;
1748    }
1749  };
1750}
1751
1752void LiveIntervals::
1753rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1754                    LiveInterval::Ranges::const_iterator &I,
1755                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1756                    unsigned Slot, int LdSlot,
1757                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1758                    VirtRegMap &vrm,
1759                    const TargetRegisterClass* rc,
1760                    SmallVector<int, 4> &ReMatIds,
1761                    const MachineLoopInfo *loopInfo,
1762                    BitVector &SpillMBBs,
1763                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1764                    BitVector &RestoreMBBs,
1765                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1766                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1767                    std::vector<LiveInterval*> &NewLIs) {
1768  bool AllCanFold = true;
1769  unsigned NewVReg = 0;
1770  unsigned start = getBaseIndex(I->start);
1771  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1772
1773  // First collect all the def / use in this live range that will be rewritten.
1774  // Make sure they are sorted according to instruction index.
1775  std::vector<RewriteInfo> RewriteMIs;
1776  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1777         re = mri_->reg_end(); ri != re; ) {
1778    MachineInstr *MI = &*ri;
1779    MachineOperand &O = ri.getOperand();
1780    ++ri;
1781    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1782    unsigned index = getInstructionIndex(MI);
1783    if (index < start || index >= end)
1784      continue;
1785
1786    if (O.isUndef())
1787      // Must be defined by an implicit def. It should not be spilled. Note,
1788      // this is for correctness reason. e.g.
1789      // 8   %reg1024<def> = IMPLICIT_DEF
1790      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1791      // The live range [12, 14) are not part of the r1024 live interval since
1792      // it's defined by an implicit def. It will not conflicts with live
1793      // interval of r1025. Now suppose both registers are spilled, you can
1794      // easily see a situation where both registers are reloaded before
1795      // the INSERT_SUBREG and both target registers that would overlap.
1796      continue;
1797    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1798  }
1799  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1800
1801  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1802  // Now rewrite the defs and uses.
1803  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1804    RewriteInfo &rwi = RewriteMIs[i];
1805    ++i;
1806    unsigned index = rwi.Index;
1807    bool MIHasUse = rwi.HasUse;
1808    bool MIHasDef = rwi.HasDef;
1809    MachineInstr *MI = rwi.MI;
1810    // If MI def and/or use the same register multiple times, then there
1811    // are multiple entries.
1812    unsigned NumUses = MIHasUse;
1813    while (i != e && RewriteMIs[i].MI == MI) {
1814      assert(RewriteMIs[i].Index == index);
1815      bool isUse = RewriteMIs[i].HasUse;
1816      if (isUse) ++NumUses;
1817      MIHasUse |= isUse;
1818      MIHasDef |= RewriteMIs[i].HasDef;
1819      ++i;
1820    }
1821    MachineBasicBlock *MBB = MI->getParent();
1822
1823    if (ImpUse && MI != ReMatDefMI) {
1824      // Re-matting an instruction with virtual register use. Update the
1825      // register interval's spill weight to HUGE_VALF to prevent it from
1826      // being spilled.
1827      LiveInterval &ImpLi = getInterval(ImpUse);
1828      ImpLi.weight = HUGE_VALF;
1829    }
1830
1831    unsigned MBBId = MBB->getNumber();
1832    unsigned ThisVReg = 0;
1833    if (TrySplit) {
1834      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1835      if (NVI != MBBVRegsMap.end()) {
1836        ThisVReg = NVI->second;
1837        // One common case:
1838        // x = use
1839        // ...
1840        // ...
1841        // def = ...
1842        //     = use
1843        // It's better to start a new interval to avoid artifically
1844        // extend the new interval.
1845        if (MIHasDef && !MIHasUse) {
1846          MBBVRegsMap.erase(MBB->getNumber());
1847          ThisVReg = 0;
1848        }
1849      }
1850    }
1851
1852    bool IsNew = ThisVReg == 0;
1853    if (IsNew) {
1854      // This ends the previous live interval. If all of its def / use
1855      // can be folded, give it a low spill weight.
1856      if (NewVReg && TrySplit && AllCanFold) {
1857        LiveInterval &nI = getOrCreateInterval(NewVReg);
1858        nI.weight /= 10.0F;
1859      }
1860      AllCanFold = true;
1861    }
1862    NewVReg = ThisVReg;
1863
1864    bool HasDef = false;
1865    bool HasUse = false;
1866    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1867                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1868                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1869                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1870                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1871    if (!HasDef && !HasUse)
1872      continue;
1873
1874    AllCanFold &= CanFold;
1875
1876    // Update weight of spill interval.
1877    LiveInterval &nI = getOrCreateInterval(NewVReg);
1878    if (!TrySplit) {
1879      // The spill weight is now infinity as it cannot be spilled again.
1880      nI.weight = HUGE_VALF;
1881      continue;
1882    }
1883
1884    // Keep track of the last def and first use in each MBB.
1885    if (HasDef) {
1886      if (MI != ReMatOrigDefMI || !CanDelete) {
1887        bool HasKill = false;
1888        if (!HasUse)
1889          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1890        else {
1891          // If this is a two-address code, then this index starts a new VNInfo.
1892          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1893          if (VNI)
1894            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1895        }
1896        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1897          SpillIdxes.find(MBBId);
1898        if (!HasKill) {
1899          if (SII == SpillIdxes.end()) {
1900            std::vector<SRInfo> S;
1901            S.push_back(SRInfo(index, NewVReg, true));
1902            SpillIdxes.insert(std::make_pair(MBBId, S));
1903          } else if (SII->second.back().vreg != NewVReg) {
1904            SII->second.push_back(SRInfo(index, NewVReg, true));
1905          } else if ((int)index > SII->second.back().index) {
1906            // If there is an earlier def and this is a two-address
1907            // instruction, then it's not possible to fold the store (which
1908            // would also fold the load).
1909            SRInfo &Info = SII->second.back();
1910            Info.index = index;
1911            Info.canFold = !HasUse;
1912          }
1913          SpillMBBs.set(MBBId);
1914        } else if (SII != SpillIdxes.end() &&
1915                   SII->second.back().vreg == NewVReg &&
1916                   (int)index > SII->second.back().index) {
1917          // There is an earlier def that's not killed (must be two-address).
1918          // The spill is no longer needed.
1919          SII->second.pop_back();
1920          if (SII->second.empty()) {
1921            SpillIdxes.erase(MBBId);
1922            SpillMBBs.reset(MBBId);
1923          }
1924        }
1925      }
1926    }
1927
1928    if (HasUse) {
1929      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1930        SpillIdxes.find(MBBId);
1931      if (SII != SpillIdxes.end() &&
1932          SII->second.back().vreg == NewVReg &&
1933          (int)index > SII->second.back().index)
1934        // Use(s) following the last def, it's not safe to fold the spill.
1935        SII->second.back().canFold = false;
1936      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1937        RestoreIdxes.find(MBBId);
1938      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1939        // If we are splitting live intervals, only fold if it's the first
1940        // use and there isn't another use later in the MBB.
1941        RII->second.back().canFold = false;
1942      else if (IsNew) {
1943        // Only need a reload if there isn't an earlier def / use.
1944        if (RII == RestoreIdxes.end()) {
1945          std::vector<SRInfo> Infos;
1946          Infos.push_back(SRInfo(index, NewVReg, true));
1947          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1948        } else {
1949          RII->second.push_back(SRInfo(index, NewVReg, true));
1950        }
1951        RestoreMBBs.set(MBBId);
1952      }
1953    }
1954
1955    // Update spill weight.
1956    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1957    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1958  }
1959
1960  if (NewVReg && TrySplit && AllCanFold) {
1961    // If all of its def / use can be folded, give it a low spill weight.
1962    LiveInterval &nI = getOrCreateInterval(NewVReg);
1963    nI.weight /= 10.0F;
1964  }
1965}
1966
1967bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1968                        BitVector &RestoreMBBs,
1969                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1970  if (!RestoreMBBs[Id])
1971    return false;
1972  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1973  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1974    if (Restores[i].index == index &&
1975        Restores[i].vreg == vr &&
1976        Restores[i].canFold)
1977      return true;
1978  return false;
1979}
1980
1981void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1982                        BitVector &RestoreMBBs,
1983                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1984  if (!RestoreMBBs[Id])
1985    return;
1986  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1987  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1988    if (Restores[i].index == index && Restores[i].vreg)
1989      Restores[i].index = -1;
1990}
1991
1992/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1993/// spilled and create empty intervals for their uses.
1994void
1995LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1996                                    const TargetRegisterClass* rc,
1997                                    std::vector<LiveInterval*> &NewLIs) {
1998  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1999         re = mri_->reg_end(); ri != re; ) {
2000    MachineOperand &O = ri.getOperand();
2001    MachineInstr *MI = &*ri;
2002    ++ri;
2003    if (O.isDef()) {
2004      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2005             "Register def was not rewritten?");
2006      RemoveMachineInstrFromMaps(MI);
2007      vrm.RemoveMachineInstrFromMaps(MI);
2008      MI->eraseFromParent();
2009    } else {
2010      // This must be an use of an implicit_def so it's not part of the live
2011      // interval. Create a new empty live interval for it.
2012      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2013      unsigned NewVReg = mri_->createVirtualRegister(rc);
2014      vrm.grow();
2015      vrm.setIsImplicitlyDefined(NewVReg);
2016      NewLIs.push_back(&getOrCreateInterval(NewVReg));
2017      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2018        MachineOperand &MO = MI->getOperand(i);
2019        if (MO.isReg() && MO.getReg() == li.reg) {
2020          MO.setReg(NewVReg);
2021          MO.setIsUndef();
2022        }
2023      }
2024    }
2025  }
2026}
2027
2028std::vector<LiveInterval*> LiveIntervals::
2029addIntervalsForSpillsFast(const LiveInterval &li,
2030                          const MachineLoopInfo *loopInfo,
2031                          VirtRegMap &vrm) {
2032  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2033
2034  std::vector<LiveInterval*> added;
2035
2036  assert(li.weight != HUGE_VALF &&
2037         "attempt to spill already spilled interval!");
2038
2039  DEBUG({
2040      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2041      li.dump();
2042      errs() << '\n';
2043    });
2044
2045  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2046
2047  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2048  while (RI != mri_->reg_end()) {
2049    MachineInstr* MI = &*RI;
2050
2051    SmallVector<unsigned, 2> Indices;
2052    bool HasUse = false;
2053    bool HasDef = false;
2054
2055    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2056      MachineOperand& mop = MI->getOperand(i);
2057      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2058
2059      HasUse |= MI->getOperand(i).isUse();
2060      HasDef |= MI->getOperand(i).isDef();
2061
2062      Indices.push_back(i);
2063    }
2064
2065    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2066                              Indices, true, slot, li.reg)) {
2067      unsigned NewVReg = mri_->createVirtualRegister(rc);
2068      vrm.grow();
2069      vrm.assignVirt2StackSlot(NewVReg, slot);
2070
2071      // create a new register for this spill
2072      LiveInterval &nI = getOrCreateInterval(NewVReg);
2073
2074      // the spill weight is now infinity as it
2075      // cannot be spilled again
2076      nI.weight = HUGE_VALF;
2077
2078      // Rewrite register operands to use the new vreg.
2079      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2080           E = Indices.end(); I != E; ++I) {
2081        MI->getOperand(*I).setReg(NewVReg);
2082
2083        if (MI->getOperand(*I).isUse())
2084          MI->getOperand(*I).setIsKill(true);
2085      }
2086
2087      // Fill in  the new live interval.
2088      unsigned index = getInstructionIndex(MI);
2089      if (HasUse) {
2090        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2091                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2092        DEBUG(errs() << " +" << LR);
2093        nI.addRange(LR);
2094        vrm.addRestorePoint(NewVReg, MI);
2095      }
2096      if (HasDef) {
2097        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2098                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2099        DEBUG(errs() << " +" << LR);
2100        nI.addRange(LR);
2101        vrm.addSpillPoint(NewVReg, true, MI);
2102      }
2103
2104      added.push_back(&nI);
2105
2106      DEBUG({
2107          errs() << "\t\t\t\tadded new interval: ";
2108          nI.dump();
2109          errs() << '\n';
2110        });
2111    }
2112
2113
2114    RI = mri_->reg_begin(li.reg);
2115  }
2116
2117  return added;
2118}
2119
2120std::vector<LiveInterval*> LiveIntervals::
2121addIntervalsForSpills(const LiveInterval &li,
2122                      SmallVectorImpl<LiveInterval*> &SpillIs,
2123                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2124
2125  if (EnableFastSpilling)
2126    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2127
2128  assert(li.weight != HUGE_VALF &&
2129         "attempt to spill already spilled interval!");
2130
2131  DEBUG({
2132      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2133      li.print(errs(), tri_);
2134      errs() << '\n';
2135    });
2136
2137  // Each bit specify whether a spill is required in the MBB.
2138  BitVector SpillMBBs(mf_->getNumBlockIDs());
2139  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2140  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2141  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2142  DenseMap<unsigned,unsigned> MBBVRegsMap;
2143  std::vector<LiveInterval*> NewLIs;
2144  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2145
2146  unsigned NumValNums = li.getNumValNums();
2147  SmallVector<MachineInstr*, 4> ReMatDefs;
2148  ReMatDefs.resize(NumValNums, NULL);
2149  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2150  ReMatOrigDefs.resize(NumValNums, NULL);
2151  SmallVector<int, 4> ReMatIds;
2152  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2153  BitVector ReMatDelete(NumValNums);
2154  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2155
2156  // Spilling a split live interval. It cannot be split any further. Also,
2157  // it's also guaranteed to be a single val# / range interval.
2158  if (vrm.getPreSplitReg(li.reg)) {
2159    vrm.setIsSplitFromReg(li.reg, 0);
2160    // Unset the split kill marker on the last use.
2161    unsigned KillIdx = vrm.getKillPoint(li.reg);
2162    if (KillIdx) {
2163      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2164      assert(KillMI && "Last use disappeared?");
2165      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2166      assert(KillOp != -1 && "Last use disappeared?");
2167      KillMI->getOperand(KillOp).setIsKill(false);
2168    }
2169    vrm.removeKillPoint(li.reg);
2170    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2171    Slot = vrm.getStackSlot(li.reg);
2172    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2173    MachineInstr *ReMatDefMI = DefIsReMat ?
2174      vrm.getReMaterializedMI(li.reg) : NULL;
2175    int LdSlot = 0;
2176    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2177    bool isLoad = isLoadSS ||
2178      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2179    bool IsFirstRange = true;
2180    for (LiveInterval::Ranges::const_iterator
2181           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2182      // If this is a split live interval with multiple ranges, it means there
2183      // are two-address instructions that re-defined the value. Only the
2184      // first def can be rematerialized!
2185      if (IsFirstRange) {
2186        // Note ReMatOrigDefMI has already been deleted.
2187        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2188                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2189                             false, vrm, rc, ReMatIds, loopInfo,
2190                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2191                             MBBVRegsMap, NewLIs);
2192      } else {
2193        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2194                             Slot, 0, false, false, false,
2195                             false, vrm, rc, ReMatIds, loopInfo,
2196                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2197                             MBBVRegsMap, NewLIs);
2198      }
2199      IsFirstRange = false;
2200    }
2201
2202    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2203    return NewLIs;
2204  }
2205
2206  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2207  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2208    TrySplit = false;
2209  if (TrySplit)
2210    ++numSplits;
2211  bool NeedStackSlot = false;
2212  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2213       i != e; ++i) {
2214    const VNInfo *VNI = *i;
2215    unsigned VN = VNI->id;
2216    if (VNI->isUnused())
2217      continue; // Dead val#.
2218    // Is the def for the val# rematerializable?
2219    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2220      ? getInstructionFromIndex(VNI->def) : 0;
2221    bool dummy;
2222    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2223      // Remember how to remat the def of this val#.
2224      ReMatOrigDefs[VN] = ReMatDefMI;
2225      // Original def may be modified so we have to make a copy here.
2226      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2227      ClonedMIs.push_back(Clone);
2228      ReMatDefs[VN] = Clone;
2229
2230      bool CanDelete = true;
2231      if (VNI->hasPHIKill()) {
2232        // A kill is a phi node, not all of its uses can be rematerialized.
2233        // It must not be deleted.
2234        CanDelete = false;
2235        // Need a stack slot if there is any live range where uses cannot be
2236        // rematerialized.
2237        NeedStackSlot = true;
2238      }
2239      if (CanDelete)
2240        ReMatDelete.set(VN);
2241    } else {
2242      // Need a stack slot if there is any live range where uses cannot be
2243      // rematerialized.
2244      NeedStackSlot = true;
2245    }
2246  }
2247
2248  // One stack slot per live interval.
2249  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2250    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2251      Slot = vrm.assignVirt2StackSlot(li.reg);
2252
2253    // This case only occurs when the prealloc splitter has already assigned
2254    // a stack slot to this vreg.
2255    else
2256      Slot = vrm.getStackSlot(li.reg);
2257  }
2258
2259  // Create new intervals and rewrite defs and uses.
2260  for (LiveInterval::Ranges::const_iterator
2261         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2262    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2263    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2264    bool DefIsReMat = ReMatDefMI != NULL;
2265    bool CanDelete = ReMatDelete[I->valno->id];
2266    int LdSlot = 0;
2267    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2268    bool isLoad = isLoadSS ||
2269      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2270    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2271                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2272                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2273                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2274                               MBBVRegsMap, NewLIs);
2275  }
2276
2277  // Insert spills / restores if we are splitting.
2278  if (!TrySplit) {
2279    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2280    return NewLIs;
2281  }
2282
2283  SmallPtrSet<LiveInterval*, 4> AddedKill;
2284  SmallVector<unsigned, 2> Ops;
2285  if (NeedStackSlot) {
2286    int Id = SpillMBBs.find_first();
2287    while (Id != -1) {
2288      std::vector<SRInfo> &spills = SpillIdxes[Id];
2289      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2290        int index = spills[i].index;
2291        unsigned VReg = spills[i].vreg;
2292        LiveInterval &nI = getOrCreateInterval(VReg);
2293        bool isReMat = vrm.isReMaterialized(VReg);
2294        MachineInstr *MI = getInstructionFromIndex(index);
2295        bool CanFold = false;
2296        bool FoundUse = false;
2297        Ops.clear();
2298        if (spills[i].canFold) {
2299          CanFold = true;
2300          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2301            MachineOperand &MO = MI->getOperand(j);
2302            if (!MO.isReg() || MO.getReg() != VReg)
2303              continue;
2304
2305            Ops.push_back(j);
2306            if (MO.isDef())
2307              continue;
2308            if (isReMat ||
2309                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2310                                                RestoreMBBs, RestoreIdxes))) {
2311              // MI has two-address uses of the same register. If the use
2312              // isn't the first and only use in the BB, then we can't fold
2313              // it. FIXME: Move this to rewriteInstructionsForSpills.
2314              CanFold = false;
2315              break;
2316            }
2317            FoundUse = true;
2318          }
2319        }
2320        // Fold the store into the def if possible.
2321        bool Folded = false;
2322        if (CanFold && !Ops.empty()) {
2323          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2324            Folded = true;
2325            if (FoundUse) {
2326              // Also folded uses, do not issue a load.
2327              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2328              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2329            }
2330            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2331          }
2332        }
2333
2334        // Otherwise tell the spiller to issue a spill.
2335        if (!Folded) {
2336          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2337          bool isKill = LR->end == getStoreIndex(index);
2338          if (!MI->registerDefIsDead(nI.reg))
2339            // No need to spill a dead def.
2340            vrm.addSpillPoint(VReg, isKill, MI);
2341          if (isKill)
2342            AddedKill.insert(&nI);
2343        }
2344      }
2345      Id = SpillMBBs.find_next(Id);
2346    }
2347  }
2348
2349  int Id = RestoreMBBs.find_first();
2350  while (Id != -1) {
2351    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2352    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2353      int index = restores[i].index;
2354      if (index == -1)
2355        continue;
2356      unsigned VReg = restores[i].vreg;
2357      LiveInterval &nI = getOrCreateInterval(VReg);
2358      bool isReMat = vrm.isReMaterialized(VReg);
2359      MachineInstr *MI = getInstructionFromIndex(index);
2360      bool CanFold = false;
2361      Ops.clear();
2362      if (restores[i].canFold) {
2363        CanFold = true;
2364        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2365          MachineOperand &MO = MI->getOperand(j);
2366          if (!MO.isReg() || MO.getReg() != VReg)
2367            continue;
2368
2369          if (MO.isDef()) {
2370            // If this restore were to be folded, it would have been folded
2371            // already.
2372            CanFold = false;
2373            break;
2374          }
2375          Ops.push_back(j);
2376        }
2377      }
2378
2379      // Fold the load into the use if possible.
2380      bool Folded = false;
2381      if (CanFold && !Ops.empty()) {
2382        if (!isReMat)
2383          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2384        else {
2385          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2386          int LdSlot = 0;
2387          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2388          // If the rematerializable def is a load, also try to fold it.
2389          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2390            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2391                                          Ops, isLoadSS, LdSlot, VReg);
2392          if (!Folded) {
2393            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2394            if (ImpUse) {
2395              // Re-matting an instruction with virtual register use. Add the
2396              // register as an implicit use on the use MI and update the register
2397              // interval's spill weight to HUGE_VALF to prevent it from being
2398              // spilled.
2399              LiveInterval &ImpLi = getInterval(ImpUse);
2400              ImpLi.weight = HUGE_VALF;
2401              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2402            }
2403          }
2404        }
2405      }
2406      // If folding is not possible / failed, then tell the spiller to issue a
2407      // load / rematerialization for us.
2408      if (Folded)
2409        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2410      else
2411        vrm.addRestorePoint(VReg, MI);
2412    }
2413    Id = RestoreMBBs.find_next(Id);
2414  }
2415
2416  // Finalize intervals: add kills, finalize spill weights, and filter out
2417  // dead intervals.
2418  std::vector<LiveInterval*> RetNewLIs;
2419  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2420    LiveInterval *LI = NewLIs[i];
2421    if (!LI->empty()) {
2422      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2423      if (!AddedKill.count(LI)) {
2424        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2425        unsigned LastUseIdx = getBaseIndex(LR->end);
2426        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2427        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2428        assert(UseIdx != -1);
2429        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2430          LastUse->getOperand(UseIdx).setIsKill();
2431          vrm.addKillPoint(LI->reg, LastUseIdx);
2432        }
2433      }
2434      RetNewLIs.push_back(LI);
2435    }
2436  }
2437
2438  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2439  return RetNewLIs;
2440}
2441
2442/// hasAllocatableSuperReg - Return true if the specified physical register has
2443/// any super register that's allocatable.
2444bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2445  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2446    if (allocatableRegs_[*AS] && hasInterval(*AS))
2447      return true;
2448  return false;
2449}
2450
2451/// getRepresentativeReg - Find the largest super register of the specified
2452/// physical register.
2453unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2454  // Find the largest super-register that is allocatable.
2455  unsigned BestReg = Reg;
2456  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2457    unsigned SuperReg = *AS;
2458    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2459      BestReg = SuperReg;
2460      break;
2461    }
2462  }
2463  return BestReg;
2464}
2465
2466/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2467/// specified interval that conflicts with the specified physical register.
2468unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2469                                                   unsigned PhysReg) const {
2470  unsigned NumConflicts = 0;
2471  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2472  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2473         E = mri_->reg_end(); I != E; ++I) {
2474    MachineOperand &O = I.getOperand();
2475    MachineInstr *MI = O.getParent();
2476    unsigned Index = getInstructionIndex(MI);
2477    if (pli.liveAt(Index))
2478      ++NumConflicts;
2479  }
2480  return NumConflicts;
2481}
2482
2483/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2484/// around all defs and uses of the specified interval. Return true if it
2485/// was able to cut its interval.
2486bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2487                                            unsigned PhysReg, VirtRegMap &vrm) {
2488  unsigned SpillReg = getRepresentativeReg(PhysReg);
2489
2490  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2491    // If there are registers which alias PhysReg, but which are not a
2492    // sub-register of the chosen representative super register. Assert
2493    // since we can't handle it yet.
2494    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2495           tri_->isSuperRegister(*AS, SpillReg));
2496
2497  bool Cut = false;
2498  LiveInterval &pli = getInterval(SpillReg);
2499  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2500  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2501         E = mri_->reg_end(); I != E; ++I) {
2502    MachineOperand &O = I.getOperand();
2503    MachineInstr *MI = O.getParent();
2504    if (SeenMIs.count(MI))
2505      continue;
2506    SeenMIs.insert(MI);
2507    unsigned Index = getInstructionIndex(MI);
2508    if (pli.liveAt(Index)) {
2509      vrm.addEmergencySpill(SpillReg, MI);
2510      unsigned StartIdx = getLoadIndex(Index);
2511      unsigned EndIdx = getStoreIndex(Index)+1;
2512      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2513        pli.removeRange(StartIdx, EndIdx);
2514        Cut = true;
2515      } else {
2516        std::string msg;
2517        raw_string_ostream Msg(msg);
2518        Msg << "Ran out of registers during register allocation!";
2519        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2520          Msg << "\nPlease check your inline asm statement for invalid "
2521               << "constraints:\n";
2522          MI->print(Msg, tm_);
2523        }
2524        llvm_report_error(Msg.str());
2525      }
2526      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2527        if (!hasInterval(*AS))
2528          continue;
2529        LiveInterval &spli = getInterval(*AS);
2530        if (spli.liveAt(Index))
2531          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2532      }
2533    }
2534  }
2535  return Cut;
2536}
2537
2538LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2539                                                  MachineInstr* startInst) {
2540  LiveInterval& Interval = getOrCreateInterval(reg);
2541  VNInfo* VN = Interval.getNextValue(
2542            getInstructionIndex(startInst) + InstrSlots::DEF,
2543            startInst, true, getVNInfoAllocator());
2544  VN->setHasPHIKill(true);
2545  VN->kills.push_back(
2546    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2547  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2548               getMBBEndIdx(startInst->getParent()) + 1, VN);
2549  Interval.addRange(LR);
2550
2551  return LR;
2552}
2553
2554