LiveIntervalAnalysis.cpp revision 180b5e789d07f611994743f4e936b44a7947c7fb
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/DepthFirstIterator.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/ADT/STLExtras.h"
43#include <algorithm>
44#include <limits>
45#include <cmath>
46using namespace llvm;
47
48// Hidden options for help debugging.
49static cl::opt<bool> DisableReMat("disable-rematerialization",
50                                  cl::init(false), cl::Hidden);
51
52static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53                               cl::init(true), cl::Hidden);
54static cl::opt<int> SplitLimit("split-limit",
55                               cl::init(-1), cl::Hidden);
56
57static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
58
59static cl::opt<bool> EnableFastSpilling("fast-spill",
60                                        cl::init(false), cl::Hidden);
61
62STATISTIC(numIntervals, "Number of original intervals");
63STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits   , "Number of intervals split");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.setPreservesCFG();
71  AU.addRequired<AliasAnalysis>();
72  AU.addPreserved<AliasAnalysis>();
73  AU.addPreserved<LiveVariables>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreservedID(MachineLoopInfoID);
76  AU.addPreservedID(MachineDominatorsID);
77
78  if (!StrongPHIElim) {
79    AU.addPreservedID(PHIEliminationID);
80    AU.addRequiredID(PHIEliminationID);
81  }
82
83  AU.addRequiredID(TwoAddressInstructionPassID);
84  MachineFunctionPass::getAnalysisUsage(AU);
85}
86
87void LiveIntervals::releaseMemory() {
88  // Free the live intervals themselves.
89  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90       E = r2iMap_.end(); I != E; ++I)
91    delete I->second;
92
93  MBB2IdxMap.clear();
94  Idx2MBBMap.clear();
95  mi2iMap_.clear();
96  i2miMap_.clear();
97  r2iMap_.clear();
98  terminatorGaps.clear();
99
100  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101  VNInfoAllocator.Reset();
102  while (!ClonedMIs.empty()) {
103    MachineInstr *MI = ClonedMIs.back();
104    ClonedMIs.pop_back();
105    mf_->DeleteMachineInstr(MI);
106  }
107}
108
109/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
110/// there is one implicit_def for each use. Add isUndef marker to
111/// implicit_def defs and their uses.
112void LiveIntervals::processImplicitDefs() {
113  SmallSet<unsigned, 8> ImpDefRegs;
114  SmallVector<MachineInstr*, 8> ImpDefMIs;
115  MachineBasicBlock *Entry = mf_->begin();
116  SmallPtrSet<MachineBasicBlock*,16> Visited;
117  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
118         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
119       DFI != E; ++DFI) {
120    MachineBasicBlock *MBB = *DFI;
121    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122         I != E; ) {
123      MachineInstr *MI = &*I;
124      ++I;
125      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
126        unsigned Reg = MI->getOperand(0).getReg();
127        ImpDefRegs.insert(Reg);
128        ImpDefMIs.push_back(MI);
129        continue;
130      }
131
132      bool ChangedToImpDef = false;
133      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134        MachineOperand& MO = MI->getOperand(i);
135        if (!MO.isReg() || !MO.isUse())
136          continue;
137        unsigned Reg = MO.getReg();
138        if (!Reg)
139          continue;
140        if (!ImpDefRegs.count(Reg))
141          continue;
142        // Use is a copy, just turn it into an implicit_def.
143        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
144        if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
145            Reg == SrcReg) {
146          bool isKill = MO.isKill();
147          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
148          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
149            MI->RemoveOperand(j);
150          if (isKill)
151            ImpDefRegs.erase(Reg);
152          ChangedToImpDef = true;
153          break;
154        }
155
156        MO.setIsUndef();
157        if (MO.isKill() || MI->isRegTiedToDefOperand(i))
158          ImpDefRegs.erase(Reg);
159      }
160
161      if (ChangedToImpDef) {
162        // Backtrack to process this new implicit_def.
163        --I;
164      } else {
165        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
166          MachineOperand& MO = MI->getOperand(i);
167          if (!MO.isReg() || !MO.isDef())
168            continue;
169          ImpDefRegs.erase(MO.getReg());
170        }
171      }
172    }
173
174    // Any outstanding liveout implicit_def's?
175    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
176      MachineInstr *MI = ImpDefMIs[i];
177      unsigned Reg = MI->getOperand(0).getReg();
178      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
179          !ImpDefRegs.count(Reg)) {
180        // Delete all "local" implicit_def's. That include those which define
181        // physical registers since they cannot be liveout.
182        MI->eraseFromParent();
183        continue;
184      }
185
186      // If there are multiple defs of the same register and at least one
187      // is not an implicit_def, do not insert implicit_def's before the
188      // uses.
189      bool Skip = false;
190      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
191             DE = mri_->def_end(); DI != DE; ++DI) {
192        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
193          Skip = true;
194          break;
195        }
196      }
197      if (Skip)
198        continue;
199
200      // The only implicit_def which we want to keep are those that are live
201      // out of its block.
202      MI->eraseFromParent();
203
204      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
205             UE = mri_->use_end(); UI != UE; ) {
206        MachineOperand &RMO = UI.getOperand();
207        MachineInstr *RMI = &*UI;
208        ++UI;
209        MachineBasicBlock *RMBB = RMI->getParent();
210        if (RMBB == MBB)
211          continue;
212
213        // Turn a copy use into an implicit_def.
214        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
215        if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
216            Reg == SrcReg) {
217          RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
218          for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
219            RMI->RemoveOperand(j);
220          continue;
221        }
222
223        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
224        unsigned NewVReg = mri_->createVirtualRegister(RC);
225        RMO.setReg(NewVReg);
226        RMO.setIsUndef();
227        RMO.setIsKill();
228      }
229    }
230    ImpDefRegs.clear();
231    ImpDefMIs.clear();
232  }
233}
234
235void LiveIntervals::computeNumbering() {
236  Index2MiMap OldI2MI = i2miMap_;
237  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
238
239  Idx2MBBMap.clear();
240  MBB2IdxMap.clear();
241  mi2iMap_.clear();
242  i2miMap_.clear();
243  terminatorGaps.clear();
244
245  FunctionSize = 0;
246
247  // Number MachineInstrs and MachineBasicBlocks.
248  // Initialize MBB indexes to a sentinal.
249  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
250
251  unsigned MIIndex = 0;
252  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
253       MBB != E; ++MBB) {
254    unsigned StartIdx = MIIndex;
255
256    // Insert an empty slot at the beginning of each block.
257    MIIndex += InstrSlots::NUM;
258    i2miMap_.push_back(0);
259
260    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
261         I != E; ++I) {
262
263      if (I == MBB->getFirstTerminator()) {
264        // Leave a gap for before terminators, this is where we will point
265        // PHI kills.
266        bool inserted =
267          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
268        assert(inserted &&
269               "Multiple 'first' terminators encountered during numbering.");
270        inserted = inserted; // Avoid compiler warning if assertions turned off.
271        i2miMap_.push_back(0);
272
273        MIIndex += InstrSlots::NUM;
274      }
275
276      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
277      assert(inserted && "multiple MachineInstr -> index mappings");
278      inserted = true;
279      i2miMap_.push_back(I);
280      MIIndex += InstrSlots::NUM;
281      FunctionSize++;
282
283      // Insert max(1, numdefs) empty slots after every instruction.
284      unsigned Slots = I->getDesc().getNumDefs();
285      if (Slots == 0)
286        Slots = 1;
287      MIIndex += InstrSlots::NUM * Slots;
288      while (Slots--)
289        i2miMap_.push_back(0);
290    }
291
292    if (MBB->getFirstTerminator() == MBB->end()) {
293      // Leave a gap for before terminators, this is where we will point
294      // PHI kills.
295      bool inserted =
296        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
297      assert(inserted &&
298             "Multiple 'first' terminators encountered during numbering.");
299      inserted = inserted; // Avoid compiler warning if assertions turned off.
300      i2miMap_.push_back(0);
301
302      MIIndex += InstrSlots::NUM;
303    }
304
305    // Set the MBB2IdxMap entry for this MBB.
306    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
307    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
308  }
309
310  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
311
312  if (!OldI2MI.empty())
313    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
314      for (LiveInterval::iterator LI = OI->second->begin(),
315           LE = OI->second->end(); LI != LE; ++LI) {
316
317        // Remap the start index of the live range to the corresponding new
318        // number, or our best guess at what it _should_ correspond to if the
319        // original instruction has been erased.  This is either the following
320        // instruction or its predecessor.
321        unsigned index = LI->start / InstrSlots::NUM;
322        unsigned offset = LI->start % InstrSlots::NUM;
323        if (offset == InstrSlots::LOAD) {
324          std::vector<IdxMBBPair>::const_iterator I =
325                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
326          // Take the pair containing the index
327          std::vector<IdxMBBPair>::const_iterator J =
328                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
329
330          LI->start = getMBBStartIdx(J->second);
331        } else {
332          LI->start = mi2iMap_[OldI2MI[index]] + offset;
333        }
334
335        // Remap the ending index in the same way that we remapped the start,
336        // except for the final step where we always map to the immediately
337        // following instruction.
338        index = (LI->end - 1) / InstrSlots::NUM;
339        offset  = LI->end % InstrSlots::NUM;
340        if (offset == InstrSlots::LOAD) {
341          // VReg dies at end of block.
342          std::vector<IdxMBBPair>::const_iterator I =
343                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
344          --I;
345
346          LI->end = getMBBEndIdx(I->second) + 1;
347        } else {
348          unsigned idx = index;
349          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
350
351          if (index != OldI2MI.size())
352            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
353          else
354            LI->end = InstrSlots::NUM * i2miMap_.size();
355        }
356      }
357
358      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
359           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
360        VNInfo* vni = *VNI;
361
362        // Remap the VNInfo def index, which works the same as the
363        // start indices above. VN's with special sentinel defs
364        // don't need to be remapped.
365        if (vni->isDefAccurate() && !vni->isUnused()) {
366          unsigned index = vni->def / InstrSlots::NUM;
367          unsigned offset = vni->def % InstrSlots::NUM;
368          if (offset == InstrSlots::LOAD) {
369            std::vector<IdxMBBPair>::const_iterator I =
370                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
371            // Take the pair containing the index
372            std::vector<IdxMBBPair>::const_iterator J =
373                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
374
375            vni->def = getMBBStartIdx(J->second);
376          } else {
377            vni->def = mi2iMap_[OldI2MI[index]] + offset;
378          }
379        }
380
381        // Remap the VNInfo kill indices, which works the same as
382        // the end indices above.
383        for (size_t i = 0; i < vni->kills.size(); ++i) {
384          unsigned killIdx = vni->kills[i].killIdx;
385
386          unsigned index = (killIdx - 1) / InstrSlots::NUM;
387          unsigned offset = killIdx % InstrSlots::NUM;
388
389          if (offset == InstrSlots::LOAD) {
390            assert("Value killed at a load slot.");
391            /*std::vector<IdxMBBPair>::const_iterator I =
392             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
393            --I;
394
395            vni->kills[i] = getMBBEndIdx(I->second);*/
396          } else {
397            if (vni->kills[i].isPHIKill) {
398              std::vector<IdxMBBPair>::const_iterator I =
399                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
400              --I;
401              vni->kills[i].killIdx = terminatorGaps[I->second];
402            } else {
403              assert(OldI2MI[index] != 0 &&
404                     "Kill refers to instruction not present in index maps.");
405              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
406            }
407
408            /*
409            unsigned idx = index;
410            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
411
412            if (index != OldI2MI.size())
413              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
414                              (idx == index ? offset : 0);
415            else
416              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
417            */
418          }
419        }
420      }
421    }
422}
423
424void LiveIntervals::scaleNumbering(int factor) {
425  // Need to
426  //  * scale MBB begin and end points
427  //  * scale all ranges.
428  //  * Update VNI structures.
429  //  * Scale instruction numberings
430
431  // Scale the MBB indices.
432  Idx2MBBMap.clear();
433  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
434       MBB != MBBE; ++MBB) {
435    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
436    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
437    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
438    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
439  }
440  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
441
442  // Scale terminator gaps.
443  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
444       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
445       TGI != TGE; ++TGI) {
446    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
447  }
448
449  // Scale the intervals.
450  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
451    LI->second->scaleNumbering(factor);
452  }
453
454  // Scale MachineInstrs.
455  Mi2IndexMap oldmi2iMap = mi2iMap_;
456  unsigned highestSlot = 0;
457  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
458       MI != ME; ++MI) {
459    unsigned newSlot = InstrSlots::scale(MI->second, factor);
460    mi2iMap_[MI->first] = newSlot;
461    highestSlot = std::max(highestSlot, newSlot);
462  }
463
464  i2miMap_.clear();
465  i2miMap_.resize(highestSlot + 1);
466  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
467       MI != ME; ++MI) {
468    i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
469  }
470
471}
472
473
474/// runOnMachineFunction - Register allocate the whole function
475///
476bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
477  mf_ = &fn;
478  mri_ = &mf_->getRegInfo();
479  tm_ = &fn.getTarget();
480  tri_ = tm_->getRegisterInfo();
481  tii_ = tm_->getInstrInfo();
482  aa_ = &getAnalysis<AliasAnalysis>();
483  lv_ = &getAnalysis<LiveVariables>();
484  allocatableRegs_ = tri_->getAllocatableSet(fn);
485
486  processImplicitDefs();
487  computeNumbering();
488  computeIntervals();
489
490  numIntervals += getNumIntervals();
491
492  DEBUG(dump());
493  return true;
494}
495
496/// print - Implement the dump method.
497void LiveIntervals::print(std::ostream &O, const Module* ) const {
498  O << "********** INTERVALS **********\n";
499  for (const_iterator I = begin(), E = end(); I != E; ++I) {
500    I->second->print(O, tri_);
501    O << "\n";
502  }
503
504  O << "********** MACHINEINSTRS **********\n";
505  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
506       mbbi != mbbe; ++mbbi) {
507    O << ((Value*)mbbi->getBasicBlock())->getNameStr() << ":\n";
508    for (MachineBasicBlock::iterator mii = mbbi->begin(),
509           mie = mbbi->end(); mii != mie; ++mii) {
510      O << getInstructionIndex(mii) << '\t' << *mii;
511    }
512  }
513}
514
515/// conflictsWithPhysRegDef - Returns true if the specified register
516/// is defined during the duration of the specified interval.
517bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
518                                            VirtRegMap &vrm, unsigned reg) {
519  for (LiveInterval::Ranges::const_iterator
520         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
521    for (unsigned index = getBaseIndex(I->start),
522           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
523         index += InstrSlots::NUM) {
524      // skip deleted instructions
525      while (index != end && !getInstructionFromIndex(index))
526        index += InstrSlots::NUM;
527      if (index == end) break;
528
529      MachineInstr *MI = getInstructionFromIndex(index);
530      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
531      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
532        if (SrcReg == li.reg || DstReg == li.reg)
533          continue;
534      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
535        MachineOperand& mop = MI->getOperand(i);
536        if (!mop.isReg())
537          continue;
538        unsigned PhysReg = mop.getReg();
539        if (PhysReg == 0 || PhysReg == li.reg)
540          continue;
541        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
542          if (!vrm.hasPhys(PhysReg))
543            continue;
544          PhysReg = vrm.getPhys(PhysReg);
545        }
546        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
547          return true;
548      }
549    }
550  }
551
552  return false;
553}
554
555/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
556/// it can check use as well.
557bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
558                                            unsigned Reg, bool CheckUse,
559                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
560  for (LiveInterval::Ranges::const_iterator
561         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
562    for (unsigned index = getBaseIndex(I->start),
563           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
564         index += InstrSlots::NUM) {
565      // Skip deleted instructions.
566      MachineInstr *MI = 0;
567      while (index != end) {
568        MI = getInstructionFromIndex(index);
569        if (MI)
570          break;
571        index += InstrSlots::NUM;
572      }
573      if (index == end) break;
574
575      if (JoinedCopies.count(MI))
576        continue;
577      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
578        MachineOperand& MO = MI->getOperand(i);
579        if (!MO.isReg())
580          continue;
581        if (MO.isUse() && !CheckUse)
582          continue;
583        unsigned PhysReg = MO.getReg();
584        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
585          continue;
586        if (tri_->isSubRegister(Reg, PhysReg))
587          return true;
588      }
589    }
590  }
591
592  return false;
593}
594
595
596void LiveIntervals::printRegName(unsigned reg) const {
597  if (TargetRegisterInfo::isPhysicalRegister(reg))
598    errs() << tri_->getName(reg);
599  else
600    errs() << "%reg" << reg;
601}
602
603void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
604                                             MachineBasicBlock::iterator mi,
605                                             unsigned MIIdx, MachineOperand& MO,
606                                             unsigned MOIdx,
607                                             LiveInterval &interval) {
608  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
609
610  // Virtual registers may be defined multiple times (due to phi
611  // elimination and 2-addr elimination).  Much of what we do only has to be
612  // done once for the vreg.  We use an empty interval to detect the first
613  // time we see a vreg.
614  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
615  if (interval.empty()) {
616    // Get the Idx of the defining instructions.
617    unsigned defIndex = getDefIndex(MIIdx);
618    // Earlyclobbers move back one.
619    if (MO.isEarlyClobber())
620      defIndex = getUseIndex(MIIdx);
621    VNInfo *ValNo;
622    MachineInstr *CopyMI = NULL;
623    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
624    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
625        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
626        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
627        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
628      CopyMI = mi;
629    // Earlyclobbers move back one.
630    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
631
632    assert(ValNo->id == 0 && "First value in interval is not 0?");
633
634    // Loop over all of the blocks that the vreg is defined in.  There are
635    // two cases we have to handle here.  The most common case is a vreg
636    // whose lifetime is contained within a basic block.  In this case there
637    // will be a single kill, in MBB, which comes after the definition.
638    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
639      // FIXME: what about dead vars?
640      unsigned killIdx;
641      if (vi.Kills[0] != mi)
642        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
643      else
644        killIdx = defIndex+1;
645
646      // If the kill happens after the definition, we have an intra-block
647      // live range.
648      if (killIdx > defIndex) {
649        assert(vi.AliveBlocks.empty() &&
650               "Shouldn't be alive across any blocks!");
651        LiveRange LR(defIndex, killIdx, ValNo);
652        interval.addRange(LR);
653        DOUT << " +" << LR << "\n";
654        interval.addKill(ValNo, killIdx, false);
655        return;
656      }
657    }
658
659    // The other case we handle is when a virtual register lives to the end
660    // of the defining block, potentially live across some blocks, then is
661    // live into some number of blocks, but gets killed.  Start by adding a
662    // range that goes from this definition to the end of the defining block.
663    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
664    DOUT << " +" << NewLR;
665    interval.addRange(NewLR);
666
667    // Iterate over all of the blocks that the variable is completely
668    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
669    // live interval.
670    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
671             E = vi.AliveBlocks.end(); I != E; ++I) {
672      LiveRange LR(getMBBStartIdx(*I),
673                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
674                   ValNo);
675      interval.addRange(LR);
676      DOUT << " +" << LR;
677    }
678
679    // Finally, this virtual register is live from the start of any killing
680    // block to the 'use' slot of the killing instruction.
681    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
682      MachineInstr *Kill = vi.Kills[i];
683      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
684      LiveRange LR(getMBBStartIdx(Kill->getParent()),
685                   killIdx, ValNo);
686      interval.addRange(LR);
687      interval.addKill(ValNo, killIdx, false);
688      DOUT << " +" << LR;
689    }
690
691  } else {
692    // If this is the second time we see a virtual register definition, it
693    // must be due to phi elimination or two addr elimination.  If this is
694    // the result of two address elimination, then the vreg is one of the
695    // def-and-use register operand.
696    if (mi->isRegTiedToUseOperand(MOIdx)) {
697      // If this is a two-address definition, then we have already processed
698      // the live range.  The only problem is that we didn't realize there
699      // are actually two values in the live interval.  Because of this we
700      // need to take the LiveRegion that defines this register and split it
701      // into two values.
702      assert(interval.containsOneValue());
703      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
704      unsigned RedefIndex = getDefIndex(MIIdx);
705      if (MO.isEarlyClobber())
706        RedefIndex = getUseIndex(MIIdx);
707
708      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
709      VNInfo *OldValNo = OldLR->valno;
710
711      // Delete the initial value, which should be short and continuous,
712      // because the 2-addr copy must be in the same MBB as the redef.
713      interval.removeRange(DefIndex, RedefIndex);
714
715      // Two-address vregs should always only be redefined once.  This means
716      // that at this point, there should be exactly one value number in it.
717      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
718
719      // The new value number (#1) is defined by the instruction we claimed
720      // defined value #0.
721      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
722                                            false, // update at *
723                                            VNInfoAllocator);
724      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
725
726      // Value#0 is now defined by the 2-addr instruction.
727      OldValNo->def  = RedefIndex;
728      OldValNo->copy = 0;
729      if (MO.isEarlyClobber())
730        OldValNo->setHasRedefByEC(true);
731
732      // Add the new live interval which replaces the range for the input copy.
733      LiveRange LR(DefIndex, RedefIndex, ValNo);
734      DOUT << " replace range with " << LR;
735      interval.addRange(LR);
736      interval.addKill(ValNo, RedefIndex, false);
737
738      // If this redefinition is dead, we need to add a dummy unit live
739      // range covering the def slot.
740      if (MO.isDead())
741        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
742
743      DOUT << " RESULT: ";
744      interval.print(DOUT, tri_);
745
746    } else {
747      // Otherwise, this must be because of phi elimination.  If this is the
748      // first redefinition of the vreg that we have seen, go back and change
749      // the live range in the PHI block to be a different value number.
750      if (interval.containsOneValue()) {
751        assert(vi.Kills.size() == 1 &&
752               "PHI elimination vreg should have one kill, the PHI itself!");
753
754        // Remove the old range that we now know has an incorrect number.
755        VNInfo *VNI = interval.getValNumInfo(0);
756        MachineInstr *Killer = vi.Kills[0];
757        unsigned Start = getMBBStartIdx(Killer->getParent());
758        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
759        DOUT << " Removing [" << Start << "," << End << "] from: ";
760        interval.print(DOUT, tri_); DOUT << "\n";
761        interval.removeRange(Start, End);
762        assert(interval.ranges.size() == 1 &&
763               "newly discovered PHI interval has >1 ranges.");
764        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
765        interval.addKill(VNI, terminatorGaps[killMBB], true);
766        VNI->setHasPHIKill(true);
767        DOUT << " RESULT: "; interval.print(DOUT, tri_);
768
769        // Replace the interval with one of a NEW value number.  Note that this
770        // value number isn't actually defined by an instruction, weird huh? :)
771        LiveRange LR(Start, End,
772          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
773        LR.valno->setIsPHIDef(true);
774        DOUT << " replace range with " << LR;
775        interval.addRange(LR);
776        interval.addKill(LR.valno, End, false);
777        DOUT << " RESULT: "; interval.print(DOUT, tri_);
778      }
779
780      // In the case of PHI elimination, each variable definition is only
781      // live until the end of the block.  We've already taken care of the
782      // rest of the live range.
783      unsigned defIndex = getDefIndex(MIIdx);
784      if (MO.isEarlyClobber())
785        defIndex = getUseIndex(MIIdx);
786
787      VNInfo *ValNo;
788      MachineInstr *CopyMI = NULL;
789      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
790      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
791          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
792          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
793          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
794        CopyMI = mi;
795      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
796
797      unsigned killIndex = getMBBEndIdx(mbb) + 1;
798      LiveRange LR(defIndex, killIndex, ValNo);
799      interval.addRange(LR);
800      interval.addKill(ValNo, terminatorGaps[mbb], true);
801      ValNo->setHasPHIKill(true);
802      DOUT << " +" << LR;
803    }
804  }
805
806  DOUT << '\n';
807}
808
809void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
810                                              MachineBasicBlock::iterator mi,
811                                              unsigned MIIdx,
812                                              MachineOperand& MO,
813                                              LiveInterval &interval,
814                                              MachineInstr *CopyMI) {
815  // A physical register cannot be live across basic block, so its
816  // lifetime must end somewhere in its defining basic block.
817  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
818
819  unsigned baseIndex = MIIdx;
820  unsigned start = getDefIndex(baseIndex);
821  // Earlyclobbers move back one.
822  if (MO.isEarlyClobber())
823    start = getUseIndex(MIIdx);
824  unsigned end = start;
825
826  // If it is not used after definition, it is considered dead at
827  // the instruction defining it. Hence its interval is:
828  // [defSlot(def), defSlot(def)+1)
829  if (MO.isDead()) {
830    DOUT << " dead";
831    end = start + 1;
832    goto exit;
833  }
834
835  // If it is not dead on definition, it must be killed by a
836  // subsequent instruction. Hence its interval is:
837  // [defSlot(def), useSlot(kill)+1)
838  baseIndex += InstrSlots::NUM;
839  while (++mi != MBB->end()) {
840    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
841           getInstructionFromIndex(baseIndex) == 0)
842      baseIndex += InstrSlots::NUM;
843    if (mi->killsRegister(interval.reg, tri_)) {
844      DOUT << " killed";
845      end = getUseIndex(baseIndex) + 1;
846      goto exit;
847    } else {
848      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
849      if (DefIdx != -1) {
850        if (mi->isRegTiedToUseOperand(DefIdx)) {
851          // Two-address instruction.
852          end = getDefIndex(baseIndex);
853          if (mi->getOperand(DefIdx).isEarlyClobber())
854            end = getUseIndex(baseIndex);
855        } else {
856          // Another instruction redefines the register before it is ever read.
857          // Then the register is essentially dead at the instruction that defines
858          // it. Hence its interval is:
859          // [defSlot(def), defSlot(def)+1)
860          DOUT << " dead";
861          end = start + 1;
862        }
863        goto exit;
864      }
865    }
866
867    baseIndex += InstrSlots::NUM;
868  }
869
870  // The only case we should have a dead physreg here without a killing or
871  // instruction where we know it's dead is if it is live-in to the function
872  // and never used. Another possible case is the implicit use of the
873  // physical register has been deleted by two-address pass.
874  end = start + 1;
875
876exit:
877  assert(start < end && "did not find end of interval?");
878
879  // Already exists? Extend old live interval.
880  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
881  bool Extend = OldLR != interval.end();
882  VNInfo *ValNo = Extend
883    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
884  if (MO.isEarlyClobber() && Extend)
885    ValNo->setHasRedefByEC(true);
886  LiveRange LR(start, end, ValNo);
887  interval.addRange(LR);
888  interval.addKill(LR.valno, end, false);
889  DOUT << " +" << LR << '\n';
890}
891
892void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
893                                      MachineBasicBlock::iterator MI,
894                                      unsigned MIIdx,
895                                      MachineOperand& MO,
896                                      unsigned MOIdx) {
897  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
898    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
899                             getOrCreateInterval(MO.getReg()));
900  else if (allocatableRegs_[MO.getReg()]) {
901    MachineInstr *CopyMI = NULL;
902    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
903    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
904        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
905        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
906        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
907      CopyMI = MI;
908    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
909                              getOrCreateInterval(MO.getReg()), CopyMI);
910    // Def of a register also defines its sub-registers.
911    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
912      // If MI also modifies the sub-register explicitly, avoid processing it
913      // more than once. Do not pass in TRI here so it checks for exact match.
914      if (!MI->modifiesRegister(*AS))
915        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
916                                  getOrCreateInterval(*AS), 0);
917  }
918}
919
920void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
921                                         unsigned MIIdx,
922                                         LiveInterval &interval, bool isAlias) {
923  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
924
925  // Look for kills, if it reaches a def before it's killed, then it shouldn't
926  // be considered a livein.
927  MachineBasicBlock::iterator mi = MBB->begin();
928  unsigned baseIndex = MIIdx;
929  unsigned start = baseIndex;
930  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
931         getInstructionFromIndex(baseIndex) == 0)
932    baseIndex += InstrSlots::NUM;
933  unsigned end = baseIndex;
934  bool SeenDefUse = false;
935
936  while (mi != MBB->end()) {
937    if (mi->killsRegister(interval.reg, tri_)) {
938      DOUT << " killed";
939      end = getUseIndex(baseIndex) + 1;
940      SeenDefUse = true;
941      break;
942    } else if (mi->modifiesRegister(interval.reg, tri_)) {
943      // Another instruction redefines the register before it is ever read.
944      // Then the register is essentially dead at the instruction that defines
945      // it. Hence its interval is:
946      // [defSlot(def), defSlot(def)+1)
947      DOUT << " dead";
948      end = getDefIndex(start) + 1;
949      SeenDefUse = true;
950      break;
951    }
952
953    baseIndex += InstrSlots::NUM;
954    ++mi;
955    if (mi != MBB->end()) {
956      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
957             getInstructionFromIndex(baseIndex) == 0)
958        baseIndex += InstrSlots::NUM;
959    }
960  }
961
962  // Live-in register might not be used at all.
963  if (!SeenDefUse) {
964    if (isAlias) {
965      DOUT << " dead";
966      end = getDefIndex(MIIdx) + 1;
967    } else {
968      DOUT << " live through";
969      end = baseIndex;
970    }
971  }
972
973  VNInfo *vni =
974    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
975  vni->setIsPHIDef(true);
976  LiveRange LR(start, end, vni);
977
978  interval.addRange(LR);
979  interval.addKill(LR.valno, end, false);
980  DOUT << " +" << LR << '\n';
981}
982
983/// computeIntervals - computes the live intervals for virtual
984/// registers. for some ordering of the machine instructions [1,N] a
985/// live interval is an interval [i, j) where 1 <= i <= j < N for
986/// which a variable is live
987void LiveIntervals::computeIntervals() {
988
989  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
990        << "********** Function: "
991        << ((Value*)mf_->getFunction())->getName() << '\n');
992
993  SmallVector<unsigned, 8> UndefUses;
994  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
995       MBBI != E; ++MBBI) {
996    MachineBasicBlock *MBB = MBBI;
997    // Track the index of the current machine instr.
998    unsigned MIIndex = getMBBStartIdx(MBB);
999    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1000
1001    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1002
1003    // Create intervals for live-ins to this BB first.
1004    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1005           LE = MBB->livein_end(); LI != LE; ++LI) {
1006      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1007      // Multiple live-ins can alias the same register.
1008      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1009        if (!hasInterval(*AS))
1010          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1011                               true);
1012    }
1013
1014    // Skip over empty initial indices.
1015    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1016           getInstructionFromIndex(MIIndex) == 0)
1017      MIIndex += InstrSlots::NUM;
1018
1019    for (; MI != miEnd; ++MI) {
1020      DOUT << MIIndex << "\t" << *MI;
1021
1022      // Handle defs.
1023      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1024        MachineOperand &MO = MI->getOperand(i);
1025        if (!MO.isReg() || !MO.getReg())
1026          continue;
1027
1028        // handle register defs - build intervals
1029        if (MO.isDef())
1030          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1031        else if (MO.isUndef())
1032          UndefUses.push_back(MO.getReg());
1033      }
1034
1035      // Skip over the empty slots after each instruction.
1036      unsigned Slots = MI->getDesc().getNumDefs();
1037      if (Slots == 0)
1038        Slots = 1;
1039      MIIndex += InstrSlots::NUM * Slots;
1040
1041      // Skip over empty indices.
1042      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1043             getInstructionFromIndex(MIIndex) == 0)
1044        MIIndex += InstrSlots::NUM;
1045    }
1046  }
1047
1048  // Create empty intervals for registers defined by implicit_def's (except
1049  // for those implicit_def that define values which are liveout of their
1050  // blocks.
1051  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1052    unsigned UndefReg = UndefUses[i];
1053    (void)getOrCreateInterval(UndefReg);
1054  }
1055}
1056
1057bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1058                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1059  std::vector<IdxMBBPair>::const_iterator I =
1060    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1061
1062  bool ResVal = false;
1063  while (I != Idx2MBBMap.end()) {
1064    if (I->first >= End)
1065      break;
1066    MBBs.push_back(I->second);
1067    ResVal = true;
1068    ++I;
1069  }
1070  return ResVal;
1071}
1072
1073bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1074                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1075  std::vector<IdxMBBPair>::const_iterator I =
1076    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1077
1078  bool ResVal = false;
1079  while (I != Idx2MBBMap.end()) {
1080    if (I->first > End)
1081      break;
1082    MachineBasicBlock *MBB = I->second;
1083    if (getMBBEndIdx(MBB) > End)
1084      break;
1085    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1086           SE = MBB->succ_end(); SI != SE; ++SI)
1087      MBBs.push_back(*SI);
1088    ResVal = true;
1089    ++I;
1090  }
1091  return ResVal;
1092}
1093
1094LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1095  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1096  return new LiveInterval(reg, Weight);
1097}
1098
1099/// dupInterval - Duplicate a live interval. The caller is responsible for
1100/// managing the allocated memory.
1101LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1102  LiveInterval *NewLI = createInterval(li->reg);
1103  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1104  return NewLI;
1105}
1106
1107/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1108/// copy field and returns the source register that defines it.
1109unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1110  if (!VNI->copy)
1111    return 0;
1112
1113  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1114    // If it's extracting out of a physical register, return the sub-register.
1115    unsigned Reg = VNI->copy->getOperand(1).getReg();
1116    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1117      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1118    return Reg;
1119  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1120             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1121    return VNI->copy->getOperand(2).getReg();
1122
1123  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1124  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1125    return SrcReg;
1126  llvm_unreachable("Unrecognized copy instruction!");
1127  return 0;
1128}
1129
1130//===----------------------------------------------------------------------===//
1131// Register allocator hooks.
1132//
1133
1134/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1135/// allow one) virtual register operand, then its uses are implicitly using
1136/// the register. Returns the virtual register.
1137unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1138                                            MachineInstr *MI) const {
1139  unsigned RegOp = 0;
1140  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1141    MachineOperand &MO = MI->getOperand(i);
1142    if (!MO.isReg() || !MO.isUse())
1143      continue;
1144    unsigned Reg = MO.getReg();
1145    if (Reg == 0 || Reg == li.reg)
1146      continue;
1147
1148    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1149        !allocatableRegs_[Reg])
1150      continue;
1151    // FIXME: For now, only remat MI with at most one register operand.
1152    assert(!RegOp &&
1153           "Can't rematerialize instruction with multiple register operand!");
1154    RegOp = MO.getReg();
1155#ifndef NDEBUG
1156    break;
1157#endif
1158  }
1159  return RegOp;
1160}
1161
1162/// isValNoAvailableAt - Return true if the val# of the specified interval
1163/// which reaches the given instruction also reaches the specified use index.
1164bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1165                                       unsigned UseIdx) const {
1166  unsigned Index = getInstructionIndex(MI);
1167  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1168  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1169  return UI != li.end() && UI->valno == ValNo;
1170}
1171
1172/// isReMaterializable - Returns true if the definition MI of the specified
1173/// val# of the specified interval is re-materializable.
1174bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1175                                       const VNInfo *ValNo, MachineInstr *MI,
1176                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1177                                       bool &isLoad) {
1178  if (DisableReMat)
1179    return false;
1180
1181  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1182    return true;
1183
1184  int FrameIdx = 0;
1185  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1186      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1187    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1188    // this but remember this is not safe to fold into a two-address
1189    // instruction.
1190    // This is a load from fixed stack slot. It can be rematerialized.
1191    return true;
1192
1193  // If the target-specific rules don't identify an instruction as
1194  // being trivially rematerializable, use some target-independent
1195  // rules.
1196  if (!MI->getDesc().isRematerializable() ||
1197      !tii_->isTriviallyReMaterializable(MI)) {
1198    if (!EnableAggressiveRemat)
1199      return false;
1200
1201    // If the instruction accesses memory but the memoperands have been lost,
1202    // we can't analyze it.
1203    const TargetInstrDesc &TID = MI->getDesc();
1204    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1205      return false;
1206
1207    // Avoid instructions obviously unsafe for remat.
1208    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1209      return false;
1210
1211    // If the instruction accesses memory and the memory could be non-constant,
1212    // assume the instruction is not rematerializable.
1213    for (std::list<MachineMemOperand>::const_iterator
1214           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1215      const MachineMemOperand &MMO = *I;
1216      if (MMO.isVolatile() || MMO.isStore())
1217        return false;
1218      const Value *V = MMO.getValue();
1219      if (!V)
1220        return false;
1221      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1222        if (!PSV->isConstant(mf_->getFrameInfo()))
1223          return false;
1224      } else if (!aa_->pointsToConstantMemory(V))
1225        return false;
1226    }
1227
1228    // If any of the registers accessed are non-constant, conservatively assume
1229    // the instruction is not rematerializable.
1230    unsigned ImpUse = 0;
1231    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1232      const MachineOperand &MO = MI->getOperand(i);
1233      if (MO.isReg()) {
1234        unsigned Reg = MO.getReg();
1235        if (Reg == 0)
1236          continue;
1237        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1238          return false;
1239
1240        // Only allow one def, and that in the first operand.
1241        if (MO.isDef() != (i == 0))
1242          return false;
1243
1244        // Only allow constant-valued registers.
1245        bool IsLiveIn = mri_->isLiveIn(Reg);
1246        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1247                                          E = mri_->def_end();
1248
1249        // For the def, it should be the only def of that register.
1250        if (MO.isDef() && (next(I) != E || IsLiveIn))
1251          return false;
1252
1253        if (MO.isUse()) {
1254          // Only allow one use other register use, as that's all the
1255          // remat mechanisms support currently.
1256          if (Reg != li.reg) {
1257            if (ImpUse == 0)
1258              ImpUse = Reg;
1259            else if (Reg != ImpUse)
1260              return false;
1261          }
1262          // For the use, there should be only one associated def.
1263          if (I != E && (next(I) != E || IsLiveIn))
1264            return false;
1265        }
1266      }
1267    }
1268  }
1269
1270  unsigned ImpUse = getReMatImplicitUse(li, MI);
1271  if (ImpUse) {
1272    const LiveInterval &ImpLi = getInterval(ImpUse);
1273    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1274           re = mri_->use_end(); ri != re; ++ri) {
1275      MachineInstr *UseMI = &*ri;
1276      unsigned UseIdx = getInstructionIndex(UseMI);
1277      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1278        continue;
1279      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1280        return false;
1281    }
1282
1283    // If a register operand of the re-materialized instruction is going to
1284    // be spilled next, then it's not legal to re-materialize this instruction.
1285    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1286      if (ImpUse == SpillIs[i]->reg)
1287        return false;
1288  }
1289  return true;
1290}
1291
1292/// isReMaterializable - Returns true if the definition MI of the specified
1293/// val# of the specified interval is re-materializable.
1294bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1295                                       const VNInfo *ValNo, MachineInstr *MI) {
1296  SmallVector<LiveInterval*, 4> Dummy1;
1297  bool Dummy2;
1298  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1299}
1300
1301/// isReMaterializable - Returns true if every definition of MI of every
1302/// val# of the specified interval is re-materializable.
1303bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1304                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1305                                       bool &isLoad) {
1306  isLoad = false;
1307  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1308       i != e; ++i) {
1309    const VNInfo *VNI = *i;
1310    if (VNI->isUnused())
1311      continue; // Dead val#.
1312    // Is the def for the val# rematerializable?
1313    if (!VNI->isDefAccurate())
1314      return false;
1315    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1316    bool DefIsLoad = false;
1317    if (!ReMatDefMI ||
1318        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1319      return false;
1320    isLoad |= DefIsLoad;
1321  }
1322  return true;
1323}
1324
1325/// FilterFoldedOps - Filter out two-address use operands. Return
1326/// true if it finds any issue with the operands that ought to prevent
1327/// folding.
1328static bool FilterFoldedOps(MachineInstr *MI,
1329                            SmallVector<unsigned, 2> &Ops,
1330                            unsigned &MRInfo,
1331                            SmallVector<unsigned, 2> &FoldOps) {
1332  MRInfo = 0;
1333  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1334    unsigned OpIdx = Ops[i];
1335    MachineOperand &MO = MI->getOperand(OpIdx);
1336    // FIXME: fold subreg use.
1337    if (MO.getSubReg())
1338      return true;
1339    if (MO.isDef())
1340      MRInfo |= (unsigned)VirtRegMap::isMod;
1341    else {
1342      // Filter out two-address use operand(s).
1343      if (MI->isRegTiedToDefOperand(OpIdx)) {
1344        MRInfo = VirtRegMap::isModRef;
1345        continue;
1346      }
1347      MRInfo |= (unsigned)VirtRegMap::isRef;
1348    }
1349    FoldOps.push_back(OpIdx);
1350  }
1351  return false;
1352}
1353
1354
1355/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1356/// slot / to reg or any rematerialized load into ith operand of specified
1357/// MI. If it is successul, MI is updated with the newly created MI and
1358/// returns true.
1359bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1360                                         VirtRegMap &vrm, MachineInstr *DefMI,
1361                                         unsigned InstrIdx,
1362                                         SmallVector<unsigned, 2> &Ops,
1363                                         bool isSS, int Slot, unsigned Reg) {
1364  // If it is an implicit def instruction, just delete it.
1365  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1366    RemoveMachineInstrFromMaps(MI);
1367    vrm.RemoveMachineInstrFromMaps(MI);
1368    MI->eraseFromParent();
1369    ++numFolds;
1370    return true;
1371  }
1372
1373  // Filter the list of operand indexes that are to be folded. Abort if
1374  // any operand will prevent folding.
1375  unsigned MRInfo = 0;
1376  SmallVector<unsigned, 2> FoldOps;
1377  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1378    return false;
1379
1380  // The only time it's safe to fold into a two address instruction is when
1381  // it's folding reload and spill from / into a spill stack slot.
1382  if (DefMI && (MRInfo & VirtRegMap::isMod))
1383    return false;
1384
1385  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1386                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1387  if (fmi) {
1388    // Remember this instruction uses the spill slot.
1389    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1390
1391    // Attempt to fold the memory reference into the instruction. If
1392    // we can do this, we don't need to insert spill code.
1393    MachineBasicBlock &MBB = *MI->getParent();
1394    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1395      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1396    vrm.transferSpillPts(MI, fmi);
1397    vrm.transferRestorePts(MI, fmi);
1398    vrm.transferEmergencySpills(MI, fmi);
1399    mi2iMap_.erase(MI);
1400    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1401    mi2iMap_[fmi] = InstrIdx;
1402    MI = MBB.insert(MBB.erase(MI), fmi);
1403    ++numFolds;
1404    return true;
1405  }
1406  return false;
1407}
1408
1409/// canFoldMemoryOperand - Returns true if the specified load / store
1410/// folding is possible.
1411bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1412                                         SmallVector<unsigned, 2> &Ops,
1413                                         bool ReMat) const {
1414  // Filter the list of operand indexes that are to be folded. Abort if
1415  // any operand will prevent folding.
1416  unsigned MRInfo = 0;
1417  SmallVector<unsigned, 2> FoldOps;
1418  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1419    return false;
1420
1421  // It's only legal to remat for a use, not a def.
1422  if (ReMat && (MRInfo & VirtRegMap::isMod))
1423    return false;
1424
1425  return tii_->canFoldMemoryOperand(MI, FoldOps);
1426}
1427
1428bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1429  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1430  for (LiveInterval::Ranges::const_iterator
1431         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1432    std::vector<IdxMBBPair>::const_iterator II =
1433      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1434    if (II == Idx2MBBMap.end())
1435      continue;
1436    if (I->end > II->first)  // crossing a MBB.
1437      return false;
1438    MBBs.insert(II->second);
1439    if (MBBs.size() > 1)
1440      return false;
1441  }
1442  return true;
1443}
1444
1445/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1446/// interval on to-be re-materialized operands of MI) with new register.
1447void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1448                                       MachineInstr *MI, unsigned NewVReg,
1449                                       VirtRegMap &vrm) {
1450  // There is an implicit use. That means one of the other operand is
1451  // being remat'ed and the remat'ed instruction has li.reg as an
1452  // use operand. Make sure we rewrite that as well.
1453  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1454    MachineOperand &MO = MI->getOperand(i);
1455    if (!MO.isReg())
1456      continue;
1457    unsigned Reg = MO.getReg();
1458    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1459      continue;
1460    if (!vrm.isReMaterialized(Reg))
1461      continue;
1462    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1463    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1464    if (UseMO)
1465      UseMO->setReg(NewVReg);
1466  }
1467}
1468
1469/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1470/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1471bool LiveIntervals::
1472rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1473                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1474                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1475                 unsigned Slot, int LdSlot,
1476                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1477                 VirtRegMap &vrm,
1478                 const TargetRegisterClass* rc,
1479                 SmallVector<int, 4> &ReMatIds,
1480                 const MachineLoopInfo *loopInfo,
1481                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1482                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1483                 std::vector<LiveInterval*> &NewLIs) {
1484  bool CanFold = false;
1485 RestartInstruction:
1486  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1487    MachineOperand& mop = MI->getOperand(i);
1488    if (!mop.isReg())
1489      continue;
1490    unsigned Reg = mop.getReg();
1491    unsigned RegI = Reg;
1492    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1493      continue;
1494    if (Reg != li.reg)
1495      continue;
1496
1497    bool TryFold = !DefIsReMat;
1498    bool FoldSS = true; // Default behavior unless it's a remat.
1499    int FoldSlot = Slot;
1500    if (DefIsReMat) {
1501      // If this is the rematerializable definition MI itself and
1502      // all of its uses are rematerialized, simply delete it.
1503      if (MI == ReMatOrigDefMI && CanDelete) {
1504        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1505        DOUT << MI << '\n';
1506        RemoveMachineInstrFromMaps(MI);
1507        vrm.RemoveMachineInstrFromMaps(MI);
1508        MI->eraseFromParent();
1509        break;
1510      }
1511
1512      // If def for this use can't be rematerialized, then try folding.
1513      // If def is rematerializable and it's a load, also try folding.
1514      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1515      if (isLoad) {
1516        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1517        FoldSS = isLoadSS;
1518        FoldSlot = LdSlot;
1519      }
1520    }
1521
1522    // Scan all of the operands of this instruction rewriting operands
1523    // to use NewVReg instead of li.reg as appropriate.  We do this for
1524    // two reasons:
1525    //
1526    //   1. If the instr reads the same spilled vreg multiple times, we
1527    //      want to reuse the NewVReg.
1528    //   2. If the instr is a two-addr instruction, we are required to
1529    //      keep the src/dst regs pinned.
1530    //
1531    // Keep track of whether we replace a use and/or def so that we can
1532    // create the spill interval with the appropriate range.
1533
1534    HasUse = mop.isUse();
1535    HasDef = mop.isDef();
1536    SmallVector<unsigned, 2> Ops;
1537    Ops.push_back(i);
1538    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1539      const MachineOperand &MOj = MI->getOperand(j);
1540      if (!MOj.isReg())
1541        continue;
1542      unsigned RegJ = MOj.getReg();
1543      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1544        continue;
1545      if (RegJ == RegI) {
1546        Ops.push_back(j);
1547        if (!MOj.isUndef()) {
1548          HasUse |= MOj.isUse();
1549          HasDef |= MOj.isDef();
1550        }
1551      }
1552    }
1553
1554    // Create a new virtual register for the spill interval.
1555    // Create the new register now so we can map the fold instruction
1556    // to the new register so when it is unfolded we get the correct
1557    // answer.
1558    bool CreatedNewVReg = false;
1559    if (NewVReg == 0) {
1560      NewVReg = mri_->createVirtualRegister(rc);
1561      vrm.grow();
1562      CreatedNewVReg = true;
1563    }
1564
1565    if (!TryFold)
1566      CanFold = false;
1567    else {
1568      // Do not fold load / store here if we are splitting. We'll find an
1569      // optimal point to insert a load / store later.
1570      if (!TrySplit) {
1571        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1572                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1573          // Folding the load/store can completely change the instruction in
1574          // unpredictable ways, rescan it from the beginning.
1575
1576          if (FoldSS) {
1577            // We need to give the new vreg the same stack slot as the
1578            // spilled interval.
1579            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1580          }
1581
1582          HasUse = false;
1583          HasDef = false;
1584          CanFold = false;
1585          if (isNotInMIMap(MI))
1586            break;
1587          goto RestartInstruction;
1588        }
1589      } else {
1590        // We'll try to fold it later if it's profitable.
1591        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1592      }
1593    }
1594
1595    mop.setReg(NewVReg);
1596    if (mop.isImplicit())
1597      rewriteImplicitOps(li, MI, NewVReg, vrm);
1598
1599    // Reuse NewVReg for other reads.
1600    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1601      MachineOperand &mopj = MI->getOperand(Ops[j]);
1602      mopj.setReg(NewVReg);
1603      if (mopj.isImplicit())
1604        rewriteImplicitOps(li, MI, NewVReg, vrm);
1605    }
1606
1607    if (CreatedNewVReg) {
1608      if (DefIsReMat) {
1609        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1610        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1611          // Each valnum may have its own remat id.
1612          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1613        } else {
1614          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1615        }
1616        if (!CanDelete || (HasUse && HasDef)) {
1617          // If this is a two-addr instruction then its use operands are
1618          // rematerializable but its def is not. It should be assigned a
1619          // stack slot.
1620          vrm.assignVirt2StackSlot(NewVReg, Slot);
1621        }
1622      } else {
1623        vrm.assignVirt2StackSlot(NewVReg, Slot);
1624      }
1625    } else if (HasUse && HasDef &&
1626               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1627      // If this interval hasn't been assigned a stack slot (because earlier
1628      // def is a deleted remat def), do it now.
1629      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1630      vrm.assignVirt2StackSlot(NewVReg, Slot);
1631    }
1632
1633    // Re-matting an instruction with virtual register use. Add the
1634    // register as an implicit use on the use MI.
1635    if (DefIsReMat && ImpUse)
1636      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1637
1638    // Create a new register interval for this spill / remat.
1639    LiveInterval &nI = getOrCreateInterval(NewVReg);
1640    if (CreatedNewVReg) {
1641      NewLIs.push_back(&nI);
1642      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1643      if (TrySplit)
1644        vrm.setIsSplitFromReg(NewVReg, li.reg);
1645    }
1646
1647    if (HasUse) {
1648      if (CreatedNewVReg) {
1649        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1650                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1651        DOUT << " +" << LR;
1652        nI.addRange(LR);
1653      } else {
1654        // Extend the split live interval to this def / use.
1655        unsigned End = getUseIndex(index)+1;
1656        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1657                     nI.getValNumInfo(nI.getNumValNums()-1));
1658        DOUT << " +" << LR;
1659        nI.addRange(LR);
1660      }
1661    }
1662    if (HasDef) {
1663      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1664                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1665      DOUT << " +" << LR;
1666      nI.addRange(LR);
1667    }
1668
1669    DOUT << "\t\t\t\tAdded new interval: ";
1670    nI.print(DOUT, tri_);
1671    DOUT << '\n';
1672  }
1673  return CanFold;
1674}
1675bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1676                                   const VNInfo *VNI,
1677                                   MachineBasicBlock *MBB, unsigned Idx) const {
1678  unsigned End = getMBBEndIdx(MBB);
1679  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1680    if (VNI->kills[j].isPHIKill)
1681      continue;
1682
1683    unsigned KillIdx = VNI->kills[j].killIdx;
1684    if (KillIdx > Idx && KillIdx < End)
1685      return true;
1686  }
1687  return false;
1688}
1689
1690/// RewriteInfo - Keep track of machine instrs that will be rewritten
1691/// during spilling.
1692namespace {
1693  struct RewriteInfo {
1694    unsigned Index;
1695    MachineInstr *MI;
1696    bool HasUse;
1697    bool HasDef;
1698    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1699      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1700  };
1701
1702  struct RewriteInfoCompare {
1703    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1704      return LHS.Index < RHS.Index;
1705    }
1706  };
1707}
1708
1709void LiveIntervals::
1710rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1711                    LiveInterval::Ranges::const_iterator &I,
1712                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1713                    unsigned Slot, int LdSlot,
1714                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1715                    VirtRegMap &vrm,
1716                    const TargetRegisterClass* rc,
1717                    SmallVector<int, 4> &ReMatIds,
1718                    const MachineLoopInfo *loopInfo,
1719                    BitVector &SpillMBBs,
1720                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1721                    BitVector &RestoreMBBs,
1722                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1723                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1724                    std::vector<LiveInterval*> &NewLIs) {
1725  bool AllCanFold = true;
1726  unsigned NewVReg = 0;
1727  unsigned start = getBaseIndex(I->start);
1728  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1729
1730  // First collect all the def / use in this live range that will be rewritten.
1731  // Make sure they are sorted according to instruction index.
1732  std::vector<RewriteInfo> RewriteMIs;
1733  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1734         re = mri_->reg_end(); ri != re; ) {
1735    MachineInstr *MI = &*ri;
1736    MachineOperand &O = ri.getOperand();
1737    ++ri;
1738    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1739    unsigned index = getInstructionIndex(MI);
1740    if (index < start || index >= end)
1741      continue;
1742
1743    if (O.isUndef())
1744      // Must be defined by an implicit def. It should not be spilled. Note,
1745      // this is for correctness reason. e.g.
1746      // 8   %reg1024<def> = IMPLICIT_DEF
1747      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1748      // The live range [12, 14) are not part of the r1024 live interval since
1749      // it's defined by an implicit def. It will not conflicts with live
1750      // interval of r1025. Now suppose both registers are spilled, you can
1751      // easily see a situation where both registers are reloaded before
1752      // the INSERT_SUBREG and both target registers that would overlap.
1753      continue;
1754    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1755  }
1756  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1757
1758  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1759  // Now rewrite the defs and uses.
1760  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1761    RewriteInfo &rwi = RewriteMIs[i];
1762    ++i;
1763    unsigned index = rwi.Index;
1764    bool MIHasUse = rwi.HasUse;
1765    bool MIHasDef = rwi.HasDef;
1766    MachineInstr *MI = rwi.MI;
1767    // If MI def and/or use the same register multiple times, then there
1768    // are multiple entries.
1769    unsigned NumUses = MIHasUse;
1770    while (i != e && RewriteMIs[i].MI == MI) {
1771      assert(RewriteMIs[i].Index == index);
1772      bool isUse = RewriteMIs[i].HasUse;
1773      if (isUse) ++NumUses;
1774      MIHasUse |= isUse;
1775      MIHasDef |= RewriteMIs[i].HasDef;
1776      ++i;
1777    }
1778    MachineBasicBlock *MBB = MI->getParent();
1779
1780    if (ImpUse && MI != ReMatDefMI) {
1781      // Re-matting an instruction with virtual register use. Update the
1782      // register interval's spill weight to HUGE_VALF to prevent it from
1783      // being spilled.
1784      LiveInterval &ImpLi = getInterval(ImpUse);
1785      ImpLi.weight = HUGE_VALF;
1786    }
1787
1788    unsigned MBBId = MBB->getNumber();
1789    unsigned ThisVReg = 0;
1790    if (TrySplit) {
1791      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1792      if (NVI != MBBVRegsMap.end()) {
1793        ThisVReg = NVI->second;
1794        // One common case:
1795        // x = use
1796        // ...
1797        // ...
1798        // def = ...
1799        //     = use
1800        // It's better to start a new interval to avoid artifically
1801        // extend the new interval.
1802        if (MIHasDef && !MIHasUse) {
1803          MBBVRegsMap.erase(MBB->getNumber());
1804          ThisVReg = 0;
1805        }
1806      }
1807    }
1808
1809    bool IsNew = ThisVReg == 0;
1810    if (IsNew) {
1811      // This ends the previous live interval. If all of its def / use
1812      // can be folded, give it a low spill weight.
1813      if (NewVReg && TrySplit && AllCanFold) {
1814        LiveInterval &nI = getOrCreateInterval(NewVReg);
1815        nI.weight /= 10.0F;
1816      }
1817      AllCanFold = true;
1818    }
1819    NewVReg = ThisVReg;
1820
1821    bool HasDef = false;
1822    bool HasUse = false;
1823    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1824                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1825                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1826                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1827                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1828    if (!HasDef && !HasUse)
1829      continue;
1830
1831    AllCanFold &= CanFold;
1832
1833    // Update weight of spill interval.
1834    LiveInterval &nI = getOrCreateInterval(NewVReg);
1835    if (!TrySplit) {
1836      // The spill weight is now infinity as it cannot be spilled again.
1837      nI.weight = HUGE_VALF;
1838      continue;
1839    }
1840
1841    // Keep track of the last def and first use in each MBB.
1842    if (HasDef) {
1843      if (MI != ReMatOrigDefMI || !CanDelete) {
1844        bool HasKill = false;
1845        if (!HasUse)
1846          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1847        else {
1848          // If this is a two-address code, then this index starts a new VNInfo.
1849          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1850          if (VNI)
1851            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1852        }
1853        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1854          SpillIdxes.find(MBBId);
1855        if (!HasKill) {
1856          if (SII == SpillIdxes.end()) {
1857            std::vector<SRInfo> S;
1858            S.push_back(SRInfo(index, NewVReg, true));
1859            SpillIdxes.insert(std::make_pair(MBBId, S));
1860          } else if (SII->second.back().vreg != NewVReg) {
1861            SII->second.push_back(SRInfo(index, NewVReg, true));
1862          } else if ((int)index > SII->second.back().index) {
1863            // If there is an earlier def and this is a two-address
1864            // instruction, then it's not possible to fold the store (which
1865            // would also fold the load).
1866            SRInfo &Info = SII->second.back();
1867            Info.index = index;
1868            Info.canFold = !HasUse;
1869          }
1870          SpillMBBs.set(MBBId);
1871        } else if (SII != SpillIdxes.end() &&
1872                   SII->second.back().vreg == NewVReg &&
1873                   (int)index > SII->second.back().index) {
1874          // There is an earlier def that's not killed (must be two-address).
1875          // The spill is no longer needed.
1876          SII->second.pop_back();
1877          if (SII->second.empty()) {
1878            SpillIdxes.erase(MBBId);
1879            SpillMBBs.reset(MBBId);
1880          }
1881        }
1882      }
1883    }
1884
1885    if (HasUse) {
1886      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1887        SpillIdxes.find(MBBId);
1888      if (SII != SpillIdxes.end() &&
1889          SII->second.back().vreg == NewVReg &&
1890          (int)index > SII->second.back().index)
1891        // Use(s) following the last def, it's not safe to fold the spill.
1892        SII->second.back().canFold = false;
1893      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1894        RestoreIdxes.find(MBBId);
1895      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1896        // If we are splitting live intervals, only fold if it's the first
1897        // use and there isn't another use later in the MBB.
1898        RII->second.back().canFold = false;
1899      else if (IsNew) {
1900        // Only need a reload if there isn't an earlier def / use.
1901        if (RII == RestoreIdxes.end()) {
1902          std::vector<SRInfo> Infos;
1903          Infos.push_back(SRInfo(index, NewVReg, true));
1904          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1905        } else {
1906          RII->second.push_back(SRInfo(index, NewVReg, true));
1907        }
1908        RestoreMBBs.set(MBBId);
1909      }
1910    }
1911
1912    // Update spill weight.
1913    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1914    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1915  }
1916
1917  if (NewVReg && TrySplit && AllCanFold) {
1918    // If all of its def / use can be folded, give it a low spill weight.
1919    LiveInterval &nI = getOrCreateInterval(NewVReg);
1920    nI.weight /= 10.0F;
1921  }
1922}
1923
1924bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1925                        BitVector &RestoreMBBs,
1926                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1927  if (!RestoreMBBs[Id])
1928    return false;
1929  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1930  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1931    if (Restores[i].index == index &&
1932        Restores[i].vreg == vr &&
1933        Restores[i].canFold)
1934      return true;
1935  return false;
1936}
1937
1938void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1939                        BitVector &RestoreMBBs,
1940                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1941  if (!RestoreMBBs[Id])
1942    return;
1943  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1944  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1945    if (Restores[i].index == index && Restores[i].vreg)
1946      Restores[i].index = -1;
1947}
1948
1949/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1950/// spilled and create empty intervals for their uses.
1951void
1952LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1953                                    const TargetRegisterClass* rc,
1954                                    std::vector<LiveInterval*> &NewLIs) {
1955  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1956         re = mri_->reg_end(); ri != re; ) {
1957    MachineOperand &O = ri.getOperand();
1958    MachineInstr *MI = &*ri;
1959    ++ri;
1960    if (O.isDef()) {
1961      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1962             "Register def was not rewritten?");
1963      RemoveMachineInstrFromMaps(MI);
1964      vrm.RemoveMachineInstrFromMaps(MI);
1965      MI->eraseFromParent();
1966    } else {
1967      // This must be an use of an implicit_def so it's not part of the live
1968      // interval. Create a new empty live interval for it.
1969      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1970      unsigned NewVReg = mri_->createVirtualRegister(rc);
1971      vrm.grow();
1972      vrm.setIsImplicitlyDefined(NewVReg);
1973      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1974      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1975        MachineOperand &MO = MI->getOperand(i);
1976        if (MO.isReg() && MO.getReg() == li.reg) {
1977          MO.setReg(NewVReg);
1978          MO.setIsUndef();
1979        }
1980      }
1981    }
1982  }
1983}
1984
1985std::vector<LiveInterval*> LiveIntervals::
1986addIntervalsForSpillsFast(const LiveInterval &li,
1987                          const MachineLoopInfo *loopInfo,
1988                          VirtRegMap &vrm) {
1989  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1990
1991  std::vector<LiveInterval*> added;
1992
1993  assert(li.weight != HUGE_VALF &&
1994         "attempt to spill already spilled interval!");
1995
1996  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1997  DEBUG(li.dump());
1998  DOUT << '\n';
1999
2000  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2001
2002  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2003  while (RI != mri_->reg_end()) {
2004    MachineInstr* MI = &*RI;
2005
2006    SmallVector<unsigned, 2> Indices;
2007    bool HasUse = false;
2008    bool HasDef = false;
2009
2010    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2011      MachineOperand& mop = MI->getOperand(i);
2012      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2013
2014      HasUse |= MI->getOperand(i).isUse();
2015      HasDef |= MI->getOperand(i).isDef();
2016
2017      Indices.push_back(i);
2018    }
2019
2020    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2021                              Indices, true, slot, li.reg)) {
2022      unsigned NewVReg = mri_->createVirtualRegister(rc);
2023      vrm.grow();
2024      vrm.assignVirt2StackSlot(NewVReg, slot);
2025
2026      // create a new register for this spill
2027      LiveInterval &nI = getOrCreateInterval(NewVReg);
2028
2029      // the spill weight is now infinity as it
2030      // cannot be spilled again
2031      nI.weight = HUGE_VALF;
2032
2033      // Rewrite register operands to use the new vreg.
2034      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2035           E = Indices.end(); I != E; ++I) {
2036        MI->getOperand(*I).setReg(NewVReg);
2037
2038        if (MI->getOperand(*I).isUse())
2039          MI->getOperand(*I).setIsKill(true);
2040      }
2041
2042      // Fill in  the new live interval.
2043      unsigned index = getInstructionIndex(MI);
2044      if (HasUse) {
2045        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2046                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2047        DOUT << " +" << LR;
2048        nI.addRange(LR);
2049        vrm.addRestorePoint(NewVReg, MI);
2050      }
2051      if (HasDef) {
2052        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2053                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2054        DOUT << " +" << LR;
2055        nI.addRange(LR);
2056        vrm.addSpillPoint(NewVReg, true, MI);
2057      }
2058
2059      added.push_back(&nI);
2060
2061      DOUT << "\t\t\t\tadded new interval: ";
2062      DEBUG(nI.dump());
2063      DOUT << '\n';
2064    }
2065
2066
2067    RI = mri_->reg_begin(li.reg);
2068  }
2069
2070  return added;
2071}
2072
2073std::vector<LiveInterval*> LiveIntervals::
2074addIntervalsForSpills(const LiveInterval &li,
2075                      SmallVectorImpl<LiveInterval*> &SpillIs,
2076                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2077
2078  if (EnableFastSpilling)
2079    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2080
2081  assert(li.weight != HUGE_VALF &&
2082         "attempt to spill already spilled interval!");
2083
2084  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2085  li.print(DOUT, tri_);
2086  DOUT << '\n';
2087
2088  // Each bit specify whether a spill is required in the MBB.
2089  BitVector SpillMBBs(mf_->getNumBlockIDs());
2090  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2091  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2092  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2093  DenseMap<unsigned,unsigned> MBBVRegsMap;
2094  std::vector<LiveInterval*> NewLIs;
2095  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2096
2097  unsigned NumValNums = li.getNumValNums();
2098  SmallVector<MachineInstr*, 4> ReMatDefs;
2099  ReMatDefs.resize(NumValNums, NULL);
2100  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2101  ReMatOrigDefs.resize(NumValNums, NULL);
2102  SmallVector<int, 4> ReMatIds;
2103  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2104  BitVector ReMatDelete(NumValNums);
2105  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2106
2107  // Spilling a split live interval. It cannot be split any further. Also,
2108  // it's also guaranteed to be a single val# / range interval.
2109  if (vrm.getPreSplitReg(li.reg)) {
2110    vrm.setIsSplitFromReg(li.reg, 0);
2111    // Unset the split kill marker on the last use.
2112    unsigned KillIdx = vrm.getKillPoint(li.reg);
2113    if (KillIdx) {
2114      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2115      assert(KillMI && "Last use disappeared?");
2116      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2117      assert(KillOp != -1 && "Last use disappeared?");
2118      KillMI->getOperand(KillOp).setIsKill(false);
2119    }
2120    vrm.removeKillPoint(li.reg);
2121    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2122    Slot = vrm.getStackSlot(li.reg);
2123    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2124    MachineInstr *ReMatDefMI = DefIsReMat ?
2125      vrm.getReMaterializedMI(li.reg) : NULL;
2126    int LdSlot = 0;
2127    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2128    bool isLoad = isLoadSS ||
2129      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2130    bool IsFirstRange = true;
2131    for (LiveInterval::Ranges::const_iterator
2132           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2133      // If this is a split live interval with multiple ranges, it means there
2134      // are two-address instructions that re-defined the value. Only the
2135      // first def can be rematerialized!
2136      if (IsFirstRange) {
2137        // Note ReMatOrigDefMI has already been deleted.
2138        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2139                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2140                             false, vrm, rc, ReMatIds, loopInfo,
2141                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2142                             MBBVRegsMap, NewLIs);
2143      } else {
2144        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2145                             Slot, 0, false, false, false,
2146                             false, vrm, rc, ReMatIds, loopInfo,
2147                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2148                             MBBVRegsMap, NewLIs);
2149      }
2150      IsFirstRange = false;
2151    }
2152
2153    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2154    return NewLIs;
2155  }
2156
2157  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2158  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2159    TrySplit = false;
2160  if (TrySplit)
2161    ++numSplits;
2162  bool NeedStackSlot = false;
2163  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2164       i != e; ++i) {
2165    const VNInfo *VNI = *i;
2166    unsigned VN = VNI->id;
2167    if (VNI->isUnused())
2168      continue; // Dead val#.
2169    // Is the def for the val# rematerializable?
2170    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2171      ? getInstructionFromIndex(VNI->def) : 0;
2172    bool dummy;
2173    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2174      // Remember how to remat the def of this val#.
2175      ReMatOrigDefs[VN] = ReMatDefMI;
2176      // Original def may be modified so we have to make a copy here.
2177      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2178      ClonedMIs.push_back(Clone);
2179      ReMatDefs[VN] = Clone;
2180
2181      bool CanDelete = true;
2182      if (VNI->hasPHIKill()) {
2183        // A kill is a phi node, not all of its uses can be rematerialized.
2184        // It must not be deleted.
2185        CanDelete = false;
2186        // Need a stack slot if there is any live range where uses cannot be
2187        // rematerialized.
2188        NeedStackSlot = true;
2189      }
2190      if (CanDelete)
2191        ReMatDelete.set(VN);
2192    } else {
2193      // Need a stack slot if there is any live range where uses cannot be
2194      // rematerialized.
2195      NeedStackSlot = true;
2196    }
2197  }
2198
2199  // One stack slot per live interval.
2200  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2201    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2202      Slot = vrm.assignVirt2StackSlot(li.reg);
2203
2204    // This case only occurs when the prealloc splitter has already assigned
2205    // a stack slot to this vreg.
2206    else
2207      Slot = vrm.getStackSlot(li.reg);
2208  }
2209
2210  // Create new intervals and rewrite defs and uses.
2211  for (LiveInterval::Ranges::const_iterator
2212         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2213    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2214    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2215    bool DefIsReMat = ReMatDefMI != NULL;
2216    bool CanDelete = ReMatDelete[I->valno->id];
2217    int LdSlot = 0;
2218    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2219    bool isLoad = isLoadSS ||
2220      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2221    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2222                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2223                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2224                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2225                               MBBVRegsMap, NewLIs);
2226  }
2227
2228  // Insert spills / restores if we are splitting.
2229  if (!TrySplit) {
2230    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2231    return NewLIs;
2232  }
2233
2234  SmallPtrSet<LiveInterval*, 4> AddedKill;
2235  SmallVector<unsigned, 2> Ops;
2236  if (NeedStackSlot) {
2237    int Id = SpillMBBs.find_first();
2238    while (Id != -1) {
2239      std::vector<SRInfo> &spills = SpillIdxes[Id];
2240      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2241        int index = spills[i].index;
2242        unsigned VReg = spills[i].vreg;
2243        LiveInterval &nI = getOrCreateInterval(VReg);
2244        bool isReMat = vrm.isReMaterialized(VReg);
2245        MachineInstr *MI = getInstructionFromIndex(index);
2246        bool CanFold = false;
2247        bool FoundUse = false;
2248        Ops.clear();
2249        if (spills[i].canFold) {
2250          CanFold = true;
2251          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2252            MachineOperand &MO = MI->getOperand(j);
2253            if (!MO.isReg() || MO.getReg() != VReg)
2254              continue;
2255
2256            Ops.push_back(j);
2257            if (MO.isDef())
2258              continue;
2259            if (isReMat ||
2260                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2261                                                RestoreMBBs, RestoreIdxes))) {
2262              // MI has two-address uses of the same register. If the use
2263              // isn't the first and only use in the BB, then we can't fold
2264              // it. FIXME: Move this to rewriteInstructionsForSpills.
2265              CanFold = false;
2266              break;
2267            }
2268            FoundUse = true;
2269          }
2270        }
2271        // Fold the store into the def if possible.
2272        bool Folded = false;
2273        if (CanFold && !Ops.empty()) {
2274          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2275            Folded = true;
2276            if (FoundUse) {
2277              // Also folded uses, do not issue a load.
2278              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2279              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2280            }
2281            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2282          }
2283        }
2284
2285        // Otherwise tell the spiller to issue a spill.
2286        if (!Folded) {
2287          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2288          bool isKill = LR->end == getStoreIndex(index);
2289          if (!MI->registerDefIsDead(nI.reg))
2290            // No need to spill a dead def.
2291            vrm.addSpillPoint(VReg, isKill, MI);
2292          if (isKill)
2293            AddedKill.insert(&nI);
2294        }
2295      }
2296      Id = SpillMBBs.find_next(Id);
2297    }
2298  }
2299
2300  int Id = RestoreMBBs.find_first();
2301  while (Id != -1) {
2302    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2303    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2304      int index = restores[i].index;
2305      if (index == -1)
2306        continue;
2307      unsigned VReg = restores[i].vreg;
2308      LiveInterval &nI = getOrCreateInterval(VReg);
2309      bool isReMat = vrm.isReMaterialized(VReg);
2310      MachineInstr *MI = getInstructionFromIndex(index);
2311      bool CanFold = false;
2312      Ops.clear();
2313      if (restores[i].canFold) {
2314        CanFold = true;
2315        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2316          MachineOperand &MO = MI->getOperand(j);
2317          if (!MO.isReg() || MO.getReg() != VReg)
2318            continue;
2319
2320          if (MO.isDef()) {
2321            // If this restore were to be folded, it would have been folded
2322            // already.
2323            CanFold = false;
2324            break;
2325          }
2326          Ops.push_back(j);
2327        }
2328      }
2329
2330      // Fold the load into the use if possible.
2331      bool Folded = false;
2332      if (CanFold && !Ops.empty()) {
2333        if (!isReMat)
2334          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2335        else {
2336          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2337          int LdSlot = 0;
2338          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2339          // If the rematerializable def is a load, also try to fold it.
2340          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2341            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2342                                          Ops, isLoadSS, LdSlot, VReg);
2343          if (!Folded) {
2344            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2345            if (ImpUse) {
2346              // Re-matting an instruction with virtual register use. Add the
2347              // register as an implicit use on the use MI and update the register
2348              // interval's spill weight to HUGE_VALF to prevent it from being
2349              // spilled.
2350              LiveInterval &ImpLi = getInterval(ImpUse);
2351              ImpLi.weight = HUGE_VALF;
2352              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2353            }
2354          }
2355        }
2356      }
2357      // If folding is not possible / failed, then tell the spiller to issue a
2358      // load / rematerialization for us.
2359      if (Folded)
2360        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2361      else
2362        vrm.addRestorePoint(VReg, MI);
2363    }
2364    Id = RestoreMBBs.find_next(Id);
2365  }
2366
2367  // Finalize intervals: add kills, finalize spill weights, and filter out
2368  // dead intervals.
2369  std::vector<LiveInterval*> RetNewLIs;
2370  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2371    LiveInterval *LI = NewLIs[i];
2372    if (!LI->empty()) {
2373      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2374      if (!AddedKill.count(LI)) {
2375        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2376        unsigned LastUseIdx = getBaseIndex(LR->end);
2377        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2378        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2379        assert(UseIdx != -1);
2380        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2381          LastUse->getOperand(UseIdx).setIsKill();
2382          vrm.addKillPoint(LI->reg, LastUseIdx);
2383        }
2384      }
2385      RetNewLIs.push_back(LI);
2386    }
2387  }
2388
2389  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2390  return RetNewLIs;
2391}
2392
2393/// hasAllocatableSuperReg - Return true if the specified physical register has
2394/// any super register that's allocatable.
2395bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2396  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2397    if (allocatableRegs_[*AS] && hasInterval(*AS))
2398      return true;
2399  return false;
2400}
2401
2402/// getRepresentativeReg - Find the largest super register of the specified
2403/// physical register.
2404unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2405  // Find the largest super-register that is allocatable.
2406  unsigned BestReg = Reg;
2407  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2408    unsigned SuperReg = *AS;
2409    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2410      BestReg = SuperReg;
2411      break;
2412    }
2413  }
2414  return BestReg;
2415}
2416
2417/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2418/// specified interval that conflicts with the specified physical register.
2419unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2420                                                   unsigned PhysReg) const {
2421  unsigned NumConflicts = 0;
2422  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2423  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2424         E = mri_->reg_end(); I != E; ++I) {
2425    MachineOperand &O = I.getOperand();
2426    MachineInstr *MI = O.getParent();
2427    unsigned Index = getInstructionIndex(MI);
2428    if (pli.liveAt(Index))
2429      ++NumConflicts;
2430  }
2431  return NumConflicts;
2432}
2433
2434/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2435/// around all defs and uses of the specified interval. Return true if it
2436/// was able to cut its interval.
2437bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2438                                            unsigned PhysReg, VirtRegMap &vrm) {
2439  unsigned SpillReg = getRepresentativeReg(PhysReg);
2440
2441  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2442    // If there are registers which alias PhysReg, but which are not a
2443    // sub-register of the chosen representative super register. Assert
2444    // since we can't handle it yet.
2445    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2446           tri_->isSuperRegister(*AS, SpillReg));
2447
2448  bool Cut = false;
2449  LiveInterval &pli = getInterval(SpillReg);
2450  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2451  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2452         E = mri_->reg_end(); I != E; ++I) {
2453    MachineOperand &O = I.getOperand();
2454    MachineInstr *MI = O.getParent();
2455    if (SeenMIs.count(MI))
2456      continue;
2457    SeenMIs.insert(MI);
2458    unsigned Index = getInstructionIndex(MI);
2459    if (pli.liveAt(Index)) {
2460      vrm.addEmergencySpill(SpillReg, MI);
2461      unsigned StartIdx = getLoadIndex(Index);
2462      unsigned EndIdx = getStoreIndex(Index)+1;
2463      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2464        pli.removeRange(StartIdx, EndIdx);
2465        Cut = true;
2466      } else {
2467        std::string msg;
2468        raw_string_ostream Msg(msg);
2469        Msg << "Ran out of registers during register allocation!";
2470        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2471          Msg << "\nPlease check your inline asm statement for invalid "
2472               << "constraints:\n";
2473          MI->print(Msg, tm_);
2474        }
2475        llvm_report_error(Msg.str());
2476      }
2477      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2478        if (!hasInterval(*AS))
2479          continue;
2480        LiveInterval &spli = getInterval(*AS);
2481        if (spli.liveAt(Index))
2482          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2483      }
2484    }
2485  }
2486  return Cut;
2487}
2488
2489LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2490                                                  MachineInstr* startInst) {
2491  LiveInterval& Interval = getOrCreateInterval(reg);
2492  VNInfo* VN = Interval.getNextValue(
2493            getInstructionIndex(startInst) + InstrSlots::DEF,
2494            startInst, true, getVNInfoAllocator());
2495  VN->setHasPHIKill(true);
2496  VN->kills.push_back(
2497    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2498  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2499               getMBBEndIdx(startInst->getParent()) + 1, VN);
2500  Interval.addRange(LR);
2501
2502  return LR;
2503}
2504
2505raw_ostream &
2506IntervalPrefixPrinter::operator()(raw_ostream &out,
2507                                  const MachineInstr &instr) const {
2508      return out << liinfo.getInstructionIndex(&instr);
2509}
2510