LiveIntervalAnalysis.cpp revision 459a7c6b6ad9c4fcb9f119aa6eaaf2769b00d9b1
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/ADT/DepthFirstIterator.h"
38#include "llvm/ADT/SmallSet.h"
39#include "llvm/ADT/Statistic.h"
40#include "llvm/ADT/STLExtras.h"
41#include <algorithm>
42#include <limits>
43#include <cmath>
44using namespace llvm;
45
46// Hidden options for help debugging.
47static cl::opt<bool> DisableReMat("disable-rematerialization",
48                                  cl::init(false), cl::Hidden);
49
50static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
51                               cl::init(true), cl::Hidden);
52static cl::opt<int> SplitLimit("split-limit",
53                               cl::init(-1), cl::Hidden);
54
55static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
56
57static cl::opt<bool> EnableFastSpilling("fast-spill",
58                                        cl::init(false), cl::Hidden);
59
60STATISTIC(numIntervals, "Number of original intervals");
61STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
62STATISTIC(numSplits   , "Number of intervals split");
63
64char LiveIntervals::ID = 0;
65static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
66
67void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
68  AU.addRequired<AliasAnalysis>();
69  AU.addPreserved<AliasAnalysis>();
70  AU.addPreserved<LiveVariables>();
71  AU.addRequired<LiveVariables>();
72  AU.addPreservedID(MachineLoopInfoID);
73  AU.addPreservedID(MachineDominatorsID);
74
75  if (!StrongPHIElim) {
76    AU.addPreservedID(PHIEliminationID);
77    AU.addRequiredID(PHIEliminationID);
78  }
79
80  AU.addRequiredID(TwoAddressInstructionPassID);
81  MachineFunctionPass::getAnalysisUsage(AU);
82}
83
84void LiveIntervals::releaseMemory() {
85  // Free the live intervals themselves.
86  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87       E = r2iMap_.end(); I != E; ++I)
88    delete I->second;
89
90  MBB2IdxMap.clear();
91  Idx2MBBMap.clear();
92  mi2iMap_.clear();
93  i2miMap_.clear();
94  r2iMap_.clear();
95  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
96  VNInfoAllocator.Reset();
97  while (!ClonedMIs.empty()) {
98    MachineInstr *MI = ClonedMIs.back();
99    ClonedMIs.pop_back();
100    mf_->DeleteMachineInstr(MI);
101  }
102}
103
104/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
105/// there is one implicit_def for each use. Add isUndef marker to
106/// implicit_def defs and their uses.
107void LiveIntervals::processImplicitDefs() {
108  SmallSet<unsigned, 8> ImpDefRegs;
109  SmallVector<MachineInstr*, 8> ImpDefMIs;
110  MachineBasicBlock *Entry = mf_->begin();
111  SmallPtrSet<MachineBasicBlock*,16> Visited;
112  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
113         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
114       DFI != E; ++DFI) {
115    MachineBasicBlock *MBB = *DFI;
116    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
117         I != E; ) {
118      MachineInstr *MI = &*I;
119      ++I;
120      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
121        unsigned Reg = MI->getOperand(0).getReg();
122        MI->getOperand(0).setIsUndef();
123        ImpDefRegs.insert(Reg);
124        ImpDefMIs.push_back(MI);
125        continue;
126      }
127
128      bool ChangedToImpDef = false;
129      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
130        MachineOperand& MO = MI->getOperand(i);
131        if (!MO.isReg() || !MO.isUse())
132          continue;
133        unsigned Reg = MO.getReg();
134        if (!Reg)
135          continue;
136        if (!ImpDefRegs.count(Reg))
137          continue;
138        // Use is a copy, just turn it into an implicit_def.
139        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
140        if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
141            Reg == SrcReg) {
142          bool isKill = MO.isKill();
143          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
144          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
145            MI->RemoveOperand(j);
146          if (isKill)
147            ImpDefRegs.erase(Reg);
148          ChangedToImpDef = true;
149          break;
150        }
151
152        MO.setIsUndef();
153        if (MO.isKill() || MI->isRegTiedToDefOperand(i))
154          ImpDefRegs.erase(Reg);
155      }
156
157      if (ChangedToImpDef) {
158        // Backtrack to process this new implicit_def.
159        --I;
160      } else {
161        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
162          MachineOperand& MO = MI->getOperand(i);
163          if (!MO.isReg() || !MO.isDef())
164            continue;
165          ImpDefRegs.erase(MO.getReg());
166        }
167      }
168    }
169
170    // Any outstanding liveout implicit_def's?
171    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
172      MachineInstr *MI = ImpDefMIs[i];
173      unsigned Reg = MI->getOperand(0).getReg();
174      if (TargetRegisterInfo::isPhysicalRegister(Reg))
175        // Physical registers are not liveout (yet).
176        continue;
177      if (!ImpDefRegs.count(Reg))
178        continue;
179
180      // If there are multiple defs of the same register and at least one
181      // is not an implicit_def, do not insert implicit_def's before the
182      // uses.
183      bool Skip = false;
184      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
185             DE = mri_->def_end(); DI != DE; ++DI) {
186        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
187          Skip = true;
188          break;
189        }
190      }
191      if (Skip)
192        continue;
193
194      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
195             UE = mri_->use_end(); UI != UE; ) {
196        MachineOperand &RMO = UI.getOperand();
197        MachineInstr *RMI = &*UI;
198        ++UI;
199        MachineBasicBlock *RMBB = RMI->getParent();
200        if (RMBB == MBB)
201          continue;
202        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
203        unsigned NewVReg = mri_->createVirtualRegister(RC);
204        MachineInstrBuilder MIB =
205          BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
206                  tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
207        (*MIB).getOperand(0).setIsUndef();
208        RMO.setReg(NewVReg);
209        RMO.setIsUndef();
210        RMO.setIsKill();
211      }
212    }
213    ImpDefRegs.clear();
214    ImpDefMIs.clear();
215  }
216}
217
218void LiveIntervals::computeNumbering() {
219  Index2MiMap OldI2MI = i2miMap_;
220  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
221
222  Idx2MBBMap.clear();
223  MBB2IdxMap.clear();
224  mi2iMap_.clear();
225  i2miMap_.clear();
226
227  FunctionSize = 0;
228
229  // Number MachineInstrs and MachineBasicBlocks.
230  // Initialize MBB indexes to a sentinal.
231  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
232
233  unsigned MIIndex = 0;
234  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
235       MBB != E; ++MBB) {
236    unsigned StartIdx = MIIndex;
237
238    // Insert an empty slot at the beginning of each block.
239    MIIndex += InstrSlots::NUM;
240    i2miMap_.push_back(0);
241
242    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
243         I != E; ++I) {
244      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
245      assert(inserted && "multiple MachineInstr -> index mappings");
246      inserted = true;
247      i2miMap_.push_back(I);
248      MIIndex += InstrSlots::NUM;
249      FunctionSize++;
250
251      // Insert max(1, numdefs) empty slots after every instruction.
252      unsigned Slots = I->getDesc().getNumDefs();
253      if (Slots == 0)
254        Slots = 1;
255      MIIndex += InstrSlots::NUM * Slots;
256      while (Slots--)
257        i2miMap_.push_back(0);
258    }
259
260    // Set the MBB2IdxMap entry for this MBB.
261    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
262    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
263  }
264  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
265
266  if (!OldI2MI.empty())
267    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
268      for (LiveInterval::iterator LI = OI->second->begin(),
269           LE = OI->second->end(); LI != LE; ++LI) {
270
271        // Remap the start index of the live range to the corresponding new
272        // number, or our best guess at what it _should_ correspond to if the
273        // original instruction has been erased.  This is either the following
274        // instruction or its predecessor.
275        unsigned index = LI->start / InstrSlots::NUM;
276        unsigned offset = LI->start % InstrSlots::NUM;
277        if (offset == InstrSlots::LOAD) {
278          std::vector<IdxMBBPair>::const_iterator I =
279                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
280          // Take the pair containing the index
281          std::vector<IdxMBBPair>::const_iterator J =
282                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
283
284          LI->start = getMBBStartIdx(J->second);
285        } else {
286          LI->start = mi2iMap_[OldI2MI[index]] + offset;
287        }
288
289        // Remap the ending index in the same way that we remapped the start,
290        // except for the final step where we always map to the immediately
291        // following instruction.
292        index = (LI->end - 1) / InstrSlots::NUM;
293        offset  = LI->end % InstrSlots::NUM;
294        if (offset == InstrSlots::LOAD) {
295          // VReg dies at end of block.
296          std::vector<IdxMBBPair>::const_iterator I =
297                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
298          --I;
299
300          LI->end = getMBBEndIdx(I->second) + 1;
301        } else {
302          unsigned idx = index;
303          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
304
305          if (index != OldI2MI.size())
306            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
307          else
308            LI->end = InstrSlots::NUM * i2miMap_.size();
309        }
310      }
311
312      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
313           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
314        VNInfo* vni = *VNI;
315
316        // Remap the VNInfo def index, which works the same as the
317        // start indices above. VN's with special sentinel defs
318        // don't need to be remapped.
319        if (vni->isDefAccurate() && !vni->isUnused()) {
320          unsigned index = vni->def / InstrSlots::NUM;
321          unsigned offset = vni->def % InstrSlots::NUM;
322          if (offset == InstrSlots::LOAD) {
323            std::vector<IdxMBBPair>::const_iterator I =
324                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
325            // Take the pair containing the index
326            std::vector<IdxMBBPair>::const_iterator J =
327                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
328
329            vni->def = getMBBStartIdx(J->second);
330          } else {
331            vni->def = mi2iMap_[OldI2MI[index]] + offset;
332          }
333        }
334
335        // Remap the VNInfo kill indices, which works the same as
336        // the end indices above.
337        for (size_t i = 0; i < vni->kills.size(); ++i) {
338          // PHI kills don't need to be remapped.
339          if (!vni->kills[i]) continue;
340
341          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
342          unsigned offset = vni->kills[i] % InstrSlots::NUM;
343          if (offset == InstrSlots::LOAD) {
344            std::vector<IdxMBBPair>::const_iterator I =
345             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
346            --I;
347
348            vni->kills[i] = getMBBEndIdx(I->second);
349          } else {
350            unsigned idx = index;
351            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
352
353            if (index != OldI2MI.size())
354              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
355                              (idx == index ? offset : 0);
356            else
357              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
358          }
359        }
360      }
361    }
362}
363
364void LiveIntervals::scaleNumbering(int factor) {
365  // Need to
366  //  * scale MBB begin and end points
367  //  * scale all ranges.
368  //  * Update VNI structures.
369  //  * Scale instruction numberings
370
371  // Scale the MBB indices.
372  Idx2MBBMap.clear();
373  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
374       MBB != MBBE; ++MBB) {
375    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
376    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
377    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
378    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
379  }
380  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
381
382  // Scale the intervals.
383  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
384    LI->second->scaleNumbering(factor);
385  }
386
387  // Scale MachineInstrs.
388  Mi2IndexMap oldmi2iMap = mi2iMap_;
389  unsigned highestSlot = 0;
390  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
391       MI != ME; ++MI) {
392    unsigned newSlot = InstrSlots::scale(MI->second, factor);
393    mi2iMap_[MI->first] = newSlot;
394    highestSlot = std::max(highestSlot, newSlot);
395  }
396
397  i2miMap_.clear();
398  i2miMap_.resize(highestSlot + 1);
399  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
400       MI != ME; ++MI) {
401    i2miMap_[MI->second] = MI->first;
402  }
403
404}
405
406
407/// runOnMachineFunction - Register allocate the whole function
408///
409bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
410  mf_ = &fn;
411  mri_ = &mf_->getRegInfo();
412  tm_ = &fn.getTarget();
413  tri_ = tm_->getRegisterInfo();
414  tii_ = tm_->getInstrInfo();
415  aa_ = &getAnalysis<AliasAnalysis>();
416  lv_ = &getAnalysis<LiveVariables>();
417  allocatableRegs_ = tri_->getAllocatableSet(fn);
418
419  processImplicitDefs();
420  computeNumbering();
421  computeIntervals();
422
423  numIntervals += getNumIntervals();
424
425  DEBUG(dump());
426  return true;
427}
428
429/// print - Implement the dump method.
430void LiveIntervals::print(std::ostream &O, const Module* ) const {
431  O << "********** INTERVALS **********\n";
432  for (const_iterator I = begin(), E = end(); I != E; ++I) {
433    I->second->print(O, tri_);
434    O << "\n";
435  }
436
437  O << "********** MACHINEINSTRS **********\n";
438  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
439       mbbi != mbbe; ++mbbi) {
440    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
441    for (MachineBasicBlock::iterator mii = mbbi->begin(),
442           mie = mbbi->end(); mii != mie; ++mii) {
443      O << getInstructionIndex(mii) << '\t' << *mii;
444    }
445  }
446}
447
448/// conflictsWithPhysRegDef - Returns true if the specified register
449/// is defined during the duration of the specified interval.
450bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
451                                            VirtRegMap &vrm, unsigned reg) {
452  for (LiveInterval::Ranges::const_iterator
453         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
454    for (unsigned index = getBaseIndex(I->start),
455           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
456         index += InstrSlots::NUM) {
457      // skip deleted instructions
458      while (index != end && !getInstructionFromIndex(index))
459        index += InstrSlots::NUM;
460      if (index == end) break;
461
462      MachineInstr *MI = getInstructionFromIndex(index);
463      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
465        if (SrcReg == li.reg || DstReg == li.reg)
466          continue;
467      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
468        MachineOperand& mop = MI->getOperand(i);
469        if (!mop.isReg())
470          continue;
471        unsigned PhysReg = mop.getReg();
472        if (PhysReg == 0 || PhysReg == li.reg)
473          continue;
474        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
475          if (!vrm.hasPhys(PhysReg))
476            continue;
477          PhysReg = vrm.getPhys(PhysReg);
478        }
479        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
480          return true;
481      }
482    }
483  }
484
485  return false;
486}
487
488/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
489/// it can check use as well.
490bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
491                                            unsigned Reg, bool CheckUse,
492                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
493  for (LiveInterval::Ranges::const_iterator
494         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
495    for (unsigned index = getBaseIndex(I->start),
496           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
497         index += InstrSlots::NUM) {
498      // Skip deleted instructions.
499      MachineInstr *MI = 0;
500      while (index != end) {
501        MI = getInstructionFromIndex(index);
502        if (MI)
503          break;
504        index += InstrSlots::NUM;
505      }
506      if (index == end) break;
507
508      if (JoinedCopies.count(MI))
509        continue;
510      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
511        MachineOperand& MO = MI->getOperand(i);
512        if (!MO.isReg())
513          continue;
514        if (MO.isUse() && !CheckUse)
515          continue;
516        unsigned PhysReg = MO.getReg();
517        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
518          continue;
519        if (tri_->isSubRegister(Reg, PhysReg))
520          return true;
521      }
522    }
523  }
524
525  return false;
526}
527
528
529void LiveIntervals::printRegName(unsigned reg) const {
530  if (TargetRegisterInfo::isPhysicalRegister(reg))
531    cerr << tri_->getName(reg);
532  else
533    cerr << "%reg" << reg;
534}
535
536void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
537                                             MachineBasicBlock::iterator mi,
538                                             unsigned MIIdx, MachineOperand& MO,
539                                             unsigned MOIdx,
540                                             LiveInterval &interval) {
541  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
542  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
543
544  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
545    DOUT << "is a implicit_def\n";
546    return;
547  }
548
549  // Virtual registers may be defined multiple times (due to phi
550  // elimination and 2-addr elimination).  Much of what we do only has to be
551  // done once for the vreg.  We use an empty interval to detect the first
552  // time we see a vreg.
553  if (interval.empty()) {
554    // Get the Idx of the defining instructions.
555    unsigned defIndex = getDefIndex(MIIdx);
556    // Earlyclobbers move back one.
557    if (MO.isEarlyClobber())
558      defIndex = getUseIndex(MIIdx);
559    VNInfo *ValNo;
560    MachineInstr *CopyMI = NULL;
561    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
562    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
563        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
564        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
565        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
566      CopyMI = mi;
567    // Earlyclobbers move back one.
568    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
569
570    assert(ValNo->id == 0 && "First value in interval is not 0?");
571
572    // Loop over all of the blocks that the vreg is defined in.  There are
573    // two cases we have to handle here.  The most common case is a vreg
574    // whose lifetime is contained within a basic block.  In this case there
575    // will be a single kill, in MBB, which comes after the definition.
576    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
577      // FIXME: what about dead vars?
578      unsigned killIdx;
579      if (vi.Kills[0] != mi)
580        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
581      else
582        killIdx = defIndex+1;
583
584      // If the kill happens after the definition, we have an intra-block
585      // live range.
586      if (killIdx > defIndex) {
587        assert(vi.AliveBlocks.empty() &&
588               "Shouldn't be alive across any blocks!");
589        LiveRange LR(defIndex, killIdx, ValNo);
590        interval.addRange(LR);
591        DOUT << " +" << LR << "\n";
592        interval.addKill(ValNo, killIdx);
593        return;
594      }
595    }
596
597    // The other case we handle is when a virtual register lives to the end
598    // of the defining block, potentially live across some blocks, then is
599    // live into some number of blocks, but gets killed.  Start by adding a
600    // range that goes from this definition to the end of the defining block.
601    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
602    DOUT << " +" << NewLR;
603    interval.addRange(NewLR);
604
605    // Iterate over all of the blocks that the variable is completely
606    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
607    // live interval.
608    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
609             E = vi.AliveBlocks.end(); I != E; ++I) {
610      LiveRange LR(getMBBStartIdx(*I),
611                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
612                   ValNo);
613      interval.addRange(LR);
614      DOUT << " +" << LR;
615    }
616
617    // Finally, this virtual register is live from the start of any killing
618    // block to the 'use' slot of the killing instruction.
619    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
620      MachineInstr *Kill = vi.Kills[i];
621      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
622      LiveRange LR(getMBBStartIdx(Kill->getParent()),
623                   killIdx, ValNo);
624      interval.addRange(LR);
625      interval.addKill(ValNo, killIdx);
626      DOUT << " +" << LR;
627    }
628
629  } else {
630    // If this is the second time we see a virtual register definition, it
631    // must be due to phi elimination or two addr elimination.  If this is
632    // the result of two address elimination, then the vreg is one of the
633    // def-and-use register operand.
634    if (mi->isRegTiedToUseOperand(MOIdx)) {
635      // If this is a two-address definition, then we have already processed
636      // the live range.  The only problem is that we didn't realize there
637      // are actually two values in the live interval.  Because of this we
638      // need to take the LiveRegion that defines this register and split it
639      // into two values.
640      assert(interval.containsOneValue());
641      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
642      unsigned RedefIndex = getDefIndex(MIIdx);
643      if (MO.isEarlyClobber())
644        RedefIndex = getUseIndex(MIIdx);
645
646      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
647      VNInfo *OldValNo = OldLR->valno;
648
649      // Delete the initial value, which should be short and continuous,
650      // because the 2-addr copy must be in the same MBB as the redef.
651      interval.removeRange(DefIndex, RedefIndex);
652
653      // Two-address vregs should always only be redefined once.  This means
654      // that at this point, there should be exactly one value number in it.
655      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
656
657      // The new value number (#1) is defined by the instruction we claimed
658      // defined value #0.
659      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
660                                            false, // update at *
661                                            VNInfoAllocator);
662      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
663
664      // Value#0 is now defined by the 2-addr instruction.
665      OldValNo->def  = RedefIndex;
666      OldValNo->copy = 0;
667      if (MO.isEarlyClobber())
668        OldValNo->setHasRedefByEC(true);
669
670      // Add the new live interval which replaces the range for the input copy.
671      LiveRange LR(DefIndex, RedefIndex, ValNo);
672      DOUT << " replace range with " << LR;
673      interval.addRange(LR);
674      interval.addKill(ValNo, RedefIndex);
675
676      // If this redefinition is dead, we need to add a dummy unit live
677      // range covering the def slot.
678      if (MO.isDead())
679        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
680
681      DOUT << " RESULT: ";
682      interval.print(DOUT, tri_);
683
684    } else {
685      // Otherwise, this must be because of phi elimination.  If this is the
686      // first redefinition of the vreg that we have seen, go back and change
687      // the live range in the PHI block to be a different value number.
688      if (interval.containsOneValue()) {
689        assert(vi.Kills.size() == 1 &&
690               "PHI elimination vreg should have one kill, the PHI itself!");
691
692        // Remove the old range that we now know has an incorrect number.
693        VNInfo *VNI = interval.getValNumInfo(0);
694        MachineInstr *Killer = vi.Kills[0];
695        unsigned Start = getMBBStartIdx(Killer->getParent());
696        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
697        DOUT << " Removing [" << Start << "," << End << "] from: ";
698        interval.print(DOUT, tri_); DOUT << "\n";
699        interval.removeRange(Start, End);
700        VNI->setHasPHIKill(true);
701        DOUT << " RESULT: "; interval.print(DOUT, tri_);
702
703        // Replace the interval with one of a NEW value number.  Note that this
704        // value number isn't actually defined by an instruction, weird huh? :)
705        LiveRange LR(Start, End,
706          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
707        LR.valno->setIsPHIDef(true);
708        DOUT << " replace range with " << LR;
709        interval.addRange(LR);
710        interval.addKill(LR.valno, End);
711        DOUT << " RESULT: "; interval.print(DOUT, tri_);
712      }
713
714      // In the case of PHI elimination, each variable definition is only
715      // live until the end of the block.  We've already taken care of the
716      // rest of the live range.
717      unsigned defIndex = getDefIndex(MIIdx);
718      if (MO.isEarlyClobber())
719        defIndex = getUseIndex(MIIdx);
720
721      VNInfo *ValNo;
722      MachineInstr *CopyMI = NULL;
723      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
724      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
725          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
726          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
727          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
728        CopyMI = mi;
729      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
730
731      unsigned killIndex = getMBBEndIdx(mbb) + 1;
732      LiveRange LR(defIndex, killIndex, ValNo);
733      interval.addRange(LR);
734      interval.addKill(ValNo, killIndex);
735      ValNo->setHasPHIKill(true);
736      DOUT << " +" << LR;
737    }
738  }
739
740  DOUT << '\n';
741}
742
743void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
744                                              MachineBasicBlock::iterator mi,
745                                              unsigned MIIdx,
746                                              MachineOperand& MO,
747                                              LiveInterval &interval,
748                                              MachineInstr *CopyMI) {
749  // A physical register cannot be live across basic block, so its
750  // lifetime must end somewhere in its defining basic block.
751  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
752
753  unsigned baseIndex = MIIdx;
754  unsigned start = getDefIndex(baseIndex);
755  // Earlyclobbers move back one.
756  if (MO.isEarlyClobber())
757    start = getUseIndex(MIIdx);
758  unsigned end = start;
759
760  // If it is not used after definition, it is considered dead at
761  // the instruction defining it. Hence its interval is:
762  // [defSlot(def), defSlot(def)+1)
763  if (MO.isDead()) {
764    DOUT << " dead";
765    end = start + 1;
766    goto exit;
767  }
768
769  // If it is not dead on definition, it must be killed by a
770  // subsequent instruction. Hence its interval is:
771  // [defSlot(def), useSlot(kill)+1)
772  baseIndex += InstrSlots::NUM;
773  while (++mi != MBB->end()) {
774    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
775           getInstructionFromIndex(baseIndex) == 0)
776      baseIndex += InstrSlots::NUM;
777    if (mi->killsRegister(interval.reg, tri_)) {
778      DOUT << " killed";
779      end = getUseIndex(baseIndex) + 1;
780      goto exit;
781    } else {
782      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
783      if (DefIdx != -1) {
784        if (mi->isRegTiedToUseOperand(DefIdx)) {
785          // Two-address instruction.
786          end = getDefIndex(baseIndex);
787          if (mi->getOperand(DefIdx).isEarlyClobber())
788            end = getUseIndex(baseIndex);
789        } else {
790          // Another instruction redefines the register before it is ever read.
791          // Then the register is essentially dead at the instruction that defines
792          // it. Hence its interval is:
793          // [defSlot(def), defSlot(def)+1)
794          DOUT << " dead";
795          end = start + 1;
796        }
797        goto exit;
798      }
799    }
800
801    baseIndex += InstrSlots::NUM;
802  }
803
804  // The only case we should have a dead physreg here without a killing or
805  // instruction where we know it's dead is if it is live-in to the function
806  // and never used. Another possible case is the implicit use of the
807  // physical register has been deleted by two-address pass.
808  end = start + 1;
809
810exit:
811  assert(start < end && "did not find end of interval?");
812
813  // Already exists? Extend old live interval.
814  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
815  bool Extend = OldLR != interval.end();
816  VNInfo *ValNo = Extend
817    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
818  if (MO.isEarlyClobber() && Extend)
819    ValNo->setHasRedefByEC(true);
820  LiveRange LR(start, end, ValNo);
821  interval.addRange(LR);
822  interval.addKill(LR.valno, end);
823  DOUT << " +" << LR << '\n';
824}
825
826void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
827                                      MachineBasicBlock::iterator MI,
828                                      unsigned MIIdx,
829                                      MachineOperand& MO,
830                                      unsigned MOIdx) {
831  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
832    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
833                             getOrCreateInterval(MO.getReg()));
834  else if (allocatableRegs_[MO.getReg()]) {
835    MachineInstr *CopyMI = NULL;
836    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
837    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
838        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
839        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
840        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
841      CopyMI = MI;
842    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
843                              getOrCreateInterval(MO.getReg()), CopyMI);
844    // Def of a register also defines its sub-registers.
845    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
846      // If MI also modifies the sub-register explicitly, avoid processing it
847      // more than once. Do not pass in TRI here so it checks for exact match.
848      if (!MI->modifiesRegister(*AS))
849        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
850                                  getOrCreateInterval(*AS), 0);
851  }
852}
853
854void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
855                                         unsigned MIIdx,
856                                         LiveInterval &interval, bool isAlias) {
857  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
858
859  // Look for kills, if it reaches a def before it's killed, then it shouldn't
860  // be considered a livein.
861  MachineBasicBlock::iterator mi = MBB->begin();
862  unsigned baseIndex = MIIdx;
863  unsigned start = baseIndex;
864  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
865         getInstructionFromIndex(baseIndex) == 0)
866    baseIndex += InstrSlots::NUM;
867  unsigned end = baseIndex;
868  bool SeenDefUse = false;
869
870  while (mi != MBB->end()) {
871    if (mi->killsRegister(interval.reg, tri_)) {
872      DOUT << " killed";
873      end = getUseIndex(baseIndex) + 1;
874      SeenDefUse = true;
875      break;
876    } else if (mi->modifiesRegister(interval.reg, tri_)) {
877      // Another instruction redefines the register before it is ever read.
878      // Then the register is essentially dead at the instruction that defines
879      // it. Hence its interval is:
880      // [defSlot(def), defSlot(def)+1)
881      DOUT << " dead";
882      end = getDefIndex(start) + 1;
883      SeenDefUse = true;
884      break;
885    }
886
887    baseIndex += InstrSlots::NUM;
888    ++mi;
889    if (mi != MBB->end()) {
890      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
891             getInstructionFromIndex(baseIndex) == 0)
892        baseIndex += InstrSlots::NUM;
893    }
894  }
895
896  // Live-in register might not be used at all.
897  if (!SeenDefUse) {
898    if (isAlias) {
899      DOUT << " dead";
900      end = getDefIndex(MIIdx) + 1;
901    } else {
902      DOUT << " live through";
903      end = baseIndex;
904    }
905  }
906
907  VNInfo *vni =
908    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
909  vni->setIsPHIDef(true);
910  LiveRange LR(start, end, vni);
911
912  interval.addRange(LR);
913  interval.addKill(LR.valno, end);
914  DOUT << " +" << LR << '\n';
915}
916
917/// computeIntervals - computes the live intervals for virtual
918/// registers. for some ordering of the machine instructions [1,N] a
919/// live interval is an interval [i, j) where 1 <= i <= j < N for
920/// which a variable is live
921void LiveIntervals::computeIntervals() {
922
923  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
924       << "********** Function: "
925       << ((Value*)mf_->getFunction())->getName() << '\n';
926
927  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
928       MBBI != E; ++MBBI) {
929    MachineBasicBlock *MBB = MBBI;
930    // Track the index of the current machine instr.
931    unsigned MIIndex = getMBBStartIdx(MBB);
932    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
933
934    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
935
936    // Create intervals for live-ins to this BB first.
937    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
938           LE = MBB->livein_end(); LI != LE; ++LI) {
939      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
940      // Multiple live-ins can alias the same register.
941      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
942        if (!hasInterval(*AS))
943          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
944                               true);
945    }
946
947    // Skip over empty initial indices.
948    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
949           getInstructionFromIndex(MIIndex) == 0)
950      MIIndex += InstrSlots::NUM;
951
952    for (; MI != miEnd; ++MI) {
953      DOUT << MIIndex << "\t" << *MI;
954
955      // Handle defs.
956      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
957        MachineOperand &MO = MI->getOperand(i);
958        // handle register defs - build intervals
959        if (MO.isReg() && MO.getReg() && MO.isDef()) {
960          handleRegisterDef(MBB, MI, MIIndex, MO, i);
961        }
962      }
963
964      // Skip over the empty slots after each instruction.
965      unsigned Slots = MI->getDesc().getNumDefs();
966      if (Slots == 0)
967        Slots = 1;
968      MIIndex += InstrSlots::NUM * Slots;
969
970      // Skip over empty indices.
971      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
972             getInstructionFromIndex(MIIndex) == 0)
973        MIIndex += InstrSlots::NUM;
974    }
975  }
976}
977
978bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
979                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
980  std::vector<IdxMBBPair>::const_iterator I =
981    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
982
983  bool ResVal = false;
984  while (I != Idx2MBBMap.end()) {
985    if (I->first >= End)
986      break;
987    MBBs.push_back(I->second);
988    ResVal = true;
989    ++I;
990  }
991  return ResVal;
992}
993
994bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
995                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
996  std::vector<IdxMBBPair>::const_iterator I =
997    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
998
999  bool ResVal = false;
1000  while (I != Idx2MBBMap.end()) {
1001    if (I->first > End)
1002      break;
1003    MachineBasicBlock *MBB = I->second;
1004    if (getMBBEndIdx(MBB) > End)
1005      break;
1006    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1007           SE = MBB->succ_end(); SI != SE; ++SI)
1008      MBBs.push_back(*SI);
1009    ResVal = true;
1010    ++I;
1011  }
1012  return ResVal;
1013}
1014
1015LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1016  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1017  return new LiveInterval(reg, Weight);
1018}
1019
1020/// dupInterval - Duplicate a live interval. The caller is responsible for
1021/// managing the allocated memory.
1022LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1023  LiveInterval *NewLI = createInterval(li->reg);
1024  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1025  return NewLI;
1026}
1027
1028/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1029/// copy field and returns the source register that defines it.
1030unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1031  if (!VNI->copy)
1032    return 0;
1033
1034  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1035    // If it's extracting out of a physical register, return the sub-register.
1036    unsigned Reg = VNI->copy->getOperand(1).getReg();
1037    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1038      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1039    return Reg;
1040  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1041             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1042    return VNI->copy->getOperand(2).getReg();
1043
1044  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1045  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1046    return SrcReg;
1047  assert(0 && "Unrecognized copy instruction!");
1048  return 0;
1049}
1050
1051//===----------------------------------------------------------------------===//
1052// Register allocator hooks.
1053//
1054
1055/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1056/// allow one) virtual register operand, then its uses are implicitly using
1057/// the register. Returns the virtual register.
1058unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1059                                            MachineInstr *MI) const {
1060  unsigned RegOp = 0;
1061  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1062    MachineOperand &MO = MI->getOperand(i);
1063    if (!MO.isReg() || !MO.isUse())
1064      continue;
1065    unsigned Reg = MO.getReg();
1066    if (Reg == 0 || Reg == li.reg)
1067      continue;
1068
1069    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1070        !allocatableRegs_[Reg])
1071      continue;
1072    // FIXME: For now, only remat MI with at most one register operand.
1073    assert(!RegOp &&
1074           "Can't rematerialize instruction with multiple register operand!");
1075    RegOp = MO.getReg();
1076#ifndef NDEBUG
1077    break;
1078#endif
1079  }
1080  return RegOp;
1081}
1082
1083/// isValNoAvailableAt - Return true if the val# of the specified interval
1084/// which reaches the given instruction also reaches the specified use index.
1085bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1086                                       unsigned UseIdx) const {
1087  unsigned Index = getInstructionIndex(MI);
1088  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1089  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1090  return UI != li.end() && UI->valno == ValNo;
1091}
1092
1093/// isReMaterializable - Returns true if the definition MI of the specified
1094/// val# of the specified interval is re-materializable.
1095bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1096                                       const VNInfo *ValNo, MachineInstr *MI,
1097                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1098                                       bool &isLoad) {
1099  if (DisableReMat)
1100    return false;
1101
1102  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1103    return true;
1104
1105  int FrameIdx = 0;
1106  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1107      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1108    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1109    // this but remember this is not safe to fold into a two-address
1110    // instruction.
1111    // This is a load from fixed stack slot. It can be rematerialized.
1112    return true;
1113
1114  // If the target-specific rules don't identify an instruction as
1115  // being trivially rematerializable, use some target-independent
1116  // rules.
1117  if (!MI->getDesc().isRematerializable() ||
1118      !tii_->isTriviallyReMaterializable(MI)) {
1119    if (!EnableAggressiveRemat)
1120      return false;
1121
1122    // If the instruction accesses memory but the memoperands have been lost,
1123    // we can't analyze it.
1124    const TargetInstrDesc &TID = MI->getDesc();
1125    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1126      return false;
1127
1128    // Avoid instructions obviously unsafe for remat.
1129    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1130      return false;
1131
1132    // If the instruction accesses memory and the memory could be non-constant,
1133    // assume the instruction is not rematerializable.
1134    for (std::list<MachineMemOperand>::const_iterator
1135           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1136      const MachineMemOperand &MMO = *I;
1137      if (MMO.isVolatile() || MMO.isStore())
1138        return false;
1139      const Value *V = MMO.getValue();
1140      if (!V)
1141        return false;
1142      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1143        if (!PSV->isConstant(mf_->getFrameInfo()))
1144          return false;
1145      } else if (!aa_->pointsToConstantMemory(V))
1146        return false;
1147    }
1148
1149    // If any of the registers accessed are non-constant, conservatively assume
1150    // the instruction is not rematerializable.
1151    unsigned ImpUse = 0;
1152    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1153      const MachineOperand &MO = MI->getOperand(i);
1154      if (MO.isReg()) {
1155        unsigned Reg = MO.getReg();
1156        if (Reg == 0)
1157          continue;
1158        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1159          return false;
1160
1161        // Only allow one def, and that in the first operand.
1162        if (MO.isDef() != (i == 0))
1163          return false;
1164
1165        // Only allow constant-valued registers.
1166        bool IsLiveIn = mri_->isLiveIn(Reg);
1167        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1168                                          E = mri_->def_end();
1169
1170        // For the def, it should be the only def of that register.
1171        if (MO.isDef() && (next(I) != E || IsLiveIn))
1172          return false;
1173
1174        if (MO.isUse()) {
1175          // Only allow one use other register use, as that's all the
1176          // remat mechanisms support currently.
1177          if (Reg != li.reg) {
1178            if (ImpUse == 0)
1179              ImpUse = Reg;
1180            else if (Reg != ImpUse)
1181              return false;
1182          }
1183          // For the use, there should be only one associated def.
1184          if (I != E && (next(I) != E || IsLiveIn))
1185            return false;
1186        }
1187      }
1188    }
1189  }
1190
1191  unsigned ImpUse = getReMatImplicitUse(li, MI);
1192  if (ImpUse) {
1193    const LiveInterval &ImpLi = getInterval(ImpUse);
1194    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1195           re = mri_->use_end(); ri != re; ++ri) {
1196      MachineInstr *UseMI = &*ri;
1197      unsigned UseIdx = getInstructionIndex(UseMI);
1198      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1199        continue;
1200      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1201        return false;
1202    }
1203
1204    // If a register operand of the re-materialized instruction is going to
1205    // be spilled next, then it's not legal to re-materialize this instruction.
1206    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1207      if (ImpUse == SpillIs[i]->reg)
1208        return false;
1209  }
1210  return true;
1211}
1212
1213/// isReMaterializable - Returns true if the definition MI of the specified
1214/// val# of the specified interval is re-materializable.
1215bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1216                                       const VNInfo *ValNo, MachineInstr *MI) {
1217  SmallVector<LiveInterval*, 4> Dummy1;
1218  bool Dummy2;
1219  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1220}
1221
1222/// isReMaterializable - Returns true if every definition of MI of every
1223/// val# of the specified interval is re-materializable.
1224bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1225                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1226                                       bool &isLoad) {
1227  isLoad = false;
1228  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1229       i != e; ++i) {
1230    const VNInfo *VNI = *i;
1231    if (VNI->isUnused())
1232      continue; // Dead val#.
1233    // Is the def for the val# rematerializable?
1234    if (!VNI->isDefAccurate())
1235      return false;
1236    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1237    bool DefIsLoad = false;
1238    if (!ReMatDefMI ||
1239        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1240      return false;
1241    isLoad |= DefIsLoad;
1242  }
1243  return true;
1244}
1245
1246/// FilterFoldedOps - Filter out two-address use operands. Return
1247/// true if it finds any issue with the operands that ought to prevent
1248/// folding.
1249static bool FilterFoldedOps(MachineInstr *MI,
1250                            SmallVector<unsigned, 2> &Ops,
1251                            unsigned &MRInfo,
1252                            SmallVector<unsigned, 2> &FoldOps) {
1253  MRInfo = 0;
1254  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1255    unsigned OpIdx = Ops[i];
1256    MachineOperand &MO = MI->getOperand(OpIdx);
1257    // FIXME: fold subreg use.
1258    if (MO.getSubReg())
1259      return true;
1260    if (MO.isDef())
1261      MRInfo |= (unsigned)VirtRegMap::isMod;
1262    else {
1263      // Filter out two-address use operand(s).
1264      if (MI->isRegTiedToDefOperand(OpIdx)) {
1265        MRInfo = VirtRegMap::isModRef;
1266        continue;
1267      }
1268      MRInfo |= (unsigned)VirtRegMap::isRef;
1269    }
1270    FoldOps.push_back(OpIdx);
1271  }
1272  return false;
1273}
1274
1275
1276/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1277/// slot / to reg or any rematerialized load into ith operand of specified
1278/// MI. If it is successul, MI is updated with the newly created MI and
1279/// returns true.
1280bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1281                                         VirtRegMap &vrm, MachineInstr *DefMI,
1282                                         unsigned InstrIdx,
1283                                         SmallVector<unsigned, 2> &Ops,
1284                                         bool isSS, int Slot, unsigned Reg) {
1285  // If it is an implicit def instruction, just delete it.
1286  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1287    RemoveMachineInstrFromMaps(MI);
1288    vrm.RemoveMachineInstrFromMaps(MI);
1289    MI->eraseFromParent();
1290    ++numFolds;
1291    return true;
1292  }
1293
1294  // Filter the list of operand indexes that are to be folded. Abort if
1295  // any operand will prevent folding.
1296  unsigned MRInfo = 0;
1297  SmallVector<unsigned, 2> FoldOps;
1298  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1299    return false;
1300
1301  // The only time it's safe to fold into a two address instruction is when
1302  // it's folding reload and spill from / into a spill stack slot.
1303  if (DefMI && (MRInfo & VirtRegMap::isMod))
1304    return false;
1305
1306  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1307                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1308  if (fmi) {
1309    // Remember this instruction uses the spill slot.
1310    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1311
1312    // Attempt to fold the memory reference into the instruction. If
1313    // we can do this, we don't need to insert spill code.
1314    MachineBasicBlock &MBB = *MI->getParent();
1315    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1316      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1317    vrm.transferSpillPts(MI, fmi);
1318    vrm.transferRestorePts(MI, fmi);
1319    vrm.transferEmergencySpills(MI, fmi);
1320    mi2iMap_.erase(MI);
1321    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1322    mi2iMap_[fmi] = InstrIdx;
1323    MI = MBB.insert(MBB.erase(MI), fmi);
1324    ++numFolds;
1325    return true;
1326  }
1327  return false;
1328}
1329
1330/// canFoldMemoryOperand - Returns true if the specified load / store
1331/// folding is possible.
1332bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1333                                         SmallVector<unsigned, 2> &Ops,
1334                                         bool ReMat) const {
1335  // Filter the list of operand indexes that are to be folded. Abort if
1336  // any operand will prevent folding.
1337  unsigned MRInfo = 0;
1338  SmallVector<unsigned, 2> FoldOps;
1339  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1340    return false;
1341
1342  // It's only legal to remat for a use, not a def.
1343  if (ReMat && (MRInfo & VirtRegMap::isMod))
1344    return false;
1345
1346  return tii_->canFoldMemoryOperand(MI, FoldOps);
1347}
1348
1349bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1350  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1351  for (LiveInterval::Ranges::const_iterator
1352         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1353    std::vector<IdxMBBPair>::const_iterator II =
1354      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1355    if (II == Idx2MBBMap.end())
1356      continue;
1357    if (I->end > II->first)  // crossing a MBB.
1358      return false;
1359    MBBs.insert(II->second);
1360    if (MBBs.size() > 1)
1361      return false;
1362  }
1363  return true;
1364}
1365
1366/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1367/// interval on to-be re-materialized operands of MI) with new register.
1368void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1369                                       MachineInstr *MI, unsigned NewVReg,
1370                                       VirtRegMap &vrm) {
1371  // There is an implicit use. That means one of the other operand is
1372  // being remat'ed and the remat'ed instruction has li.reg as an
1373  // use operand. Make sure we rewrite that as well.
1374  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1375    MachineOperand &MO = MI->getOperand(i);
1376    if (!MO.isReg())
1377      continue;
1378    unsigned Reg = MO.getReg();
1379    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1380      continue;
1381    if (!vrm.isReMaterialized(Reg))
1382      continue;
1383    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1384    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1385    if (UseMO)
1386      UseMO->setReg(NewVReg);
1387  }
1388}
1389
1390/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1391/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1392bool LiveIntervals::
1393rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1394                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1395                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1396                 unsigned Slot, int LdSlot,
1397                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1398                 VirtRegMap &vrm,
1399                 const TargetRegisterClass* rc,
1400                 SmallVector<int, 4> &ReMatIds,
1401                 const MachineLoopInfo *loopInfo,
1402                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1403                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1404                 std::vector<LiveInterval*> &NewLIs) {
1405  bool CanFold = false;
1406 RestartInstruction:
1407  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1408    MachineOperand& mop = MI->getOperand(i);
1409    if (!mop.isReg())
1410      continue;
1411    unsigned Reg = mop.getReg();
1412    unsigned RegI = Reg;
1413    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1414      continue;
1415    if (Reg != li.reg)
1416      continue;
1417
1418    bool TryFold = !DefIsReMat;
1419    bool FoldSS = true; // Default behavior unless it's a remat.
1420    int FoldSlot = Slot;
1421    if (DefIsReMat) {
1422      // If this is the rematerializable definition MI itself and
1423      // all of its uses are rematerialized, simply delete it.
1424      if (MI == ReMatOrigDefMI && CanDelete) {
1425        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1426        DOUT << MI << '\n';
1427        RemoveMachineInstrFromMaps(MI);
1428        vrm.RemoveMachineInstrFromMaps(MI);
1429        MI->eraseFromParent();
1430        break;
1431      }
1432
1433      // If def for this use can't be rematerialized, then try folding.
1434      // If def is rematerializable and it's a load, also try folding.
1435      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1436      if (isLoad) {
1437        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1438        FoldSS = isLoadSS;
1439        FoldSlot = LdSlot;
1440      }
1441    }
1442
1443    // Scan all of the operands of this instruction rewriting operands
1444    // to use NewVReg instead of li.reg as appropriate.  We do this for
1445    // two reasons:
1446    //
1447    //   1. If the instr reads the same spilled vreg multiple times, we
1448    //      want to reuse the NewVReg.
1449    //   2. If the instr is a two-addr instruction, we are required to
1450    //      keep the src/dst regs pinned.
1451    //
1452    // Keep track of whether we replace a use and/or def so that we can
1453    // create the spill interval with the appropriate range.
1454
1455    HasUse = mop.isUse();
1456    HasDef = mop.isDef();
1457    SmallVector<unsigned, 2> Ops;
1458    Ops.push_back(i);
1459    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1460      const MachineOperand &MOj = MI->getOperand(j);
1461      if (!MOj.isReg())
1462        continue;
1463      unsigned RegJ = MOj.getReg();
1464      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1465        continue;
1466      if (RegJ == RegI) {
1467        Ops.push_back(j);
1468        HasUse |= MOj.isUse();
1469        HasDef |= MOj.isDef();
1470      }
1471    }
1472
1473    if (HasUse && !li.liveAt(getUseIndex(index)))
1474      // Must be defined by an implicit def. It should not be spilled. Note,
1475      // this is for correctness reason. e.g.
1476      // 8   %reg1024<def> = IMPLICIT_DEF
1477      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1478      // The live range [12, 14) are not part of the r1024 live interval since
1479      // it's defined by an implicit def. It will not conflicts with live
1480      // interval of r1025. Now suppose both registers are spilled, you can
1481      // easily see a situation where both registers are reloaded before
1482      // the INSERT_SUBREG and both target registers that would overlap.
1483      HasUse = false;
1484
1485    // Create a new virtual register for the spill interval.
1486    // Create the new register now so we can map the fold instruction
1487    // to the new register so when it is unfolded we get the correct
1488    // answer.
1489    bool CreatedNewVReg = false;
1490    if (NewVReg == 0) {
1491      NewVReg = mri_->createVirtualRegister(rc);
1492      vrm.grow();
1493      CreatedNewVReg = true;
1494    }
1495
1496    if (!TryFold)
1497      CanFold = false;
1498    else {
1499      // Do not fold load / store here if we are splitting. We'll find an
1500      // optimal point to insert a load / store later.
1501      if (!TrySplit) {
1502        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1503                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1504          // Folding the load/store can completely change the instruction in
1505          // unpredictable ways, rescan it from the beginning.
1506
1507          if (FoldSS) {
1508            // We need to give the new vreg the same stack slot as the
1509            // spilled interval.
1510            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1511          }
1512
1513          HasUse = false;
1514          HasDef = false;
1515          CanFold = false;
1516          if (isNotInMIMap(MI))
1517            break;
1518          goto RestartInstruction;
1519        }
1520      } else {
1521        // We'll try to fold it later if it's profitable.
1522        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1523      }
1524    }
1525
1526    mop.setReg(NewVReg);
1527    if (mop.isImplicit())
1528      rewriteImplicitOps(li, MI, NewVReg, vrm);
1529
1530    // Reuse NewVReg for other reads.
1531    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1532      MachineOperand &mopj = MI->getOperand(Ops[j]);
1533      mopj.setReg(NewVReg);
1534      if (mopj.isImplicit())
1535        rewriteImplicitOps(li, MI, NewVReg, vrm);
1536    }
1537
1538    if (CreatedNewVReg) {
1539      if (DefIsReMat) {
1540        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1541        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1542          // Each valnum may have its own remat id.
1543          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1544        } else {
1545          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1546        }
1547        if (!CanDelete || (HasUse && HasDef)) {
1548          // If this is a two-addr instruction then its use operands are
1549          // rematerializable but its def is not. It should be assigned a
1550          // stack slot.
1551          vrm.assignVirt2StackSlot(NewVReg, Slot);
1552        }
1553      } else {
1554        vrm.assignVirt2StackSlot(NewVReg, Slot);
1555      }
1556    } else if (HasUse && HasDef &&
1557               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1558      // If this interval hasn't been assigned a stack slot (because earlier
1559      // def is a deleted remat def), do it now.
1560      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1561      vrm.assignVirt2StackSlot(NewVReg, Slot);
1562    }
1563
1564    // Re-matting an instruction with virtual register use. Add the
1565    // register as an implicit use on the use MI.
1566    if (DefIsReMat && ImpUse)
1567      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1568
1569    // Create a new register interval for this spill / remat.
1570    LiveInterval &nI = getOrCreateInterval(NewVReg);
1571    if (CreatedNewVReg) {
1572      NewLIs.push_back(&nI);
1573      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1574      if (TrySplit)
1575        vrm.setIsSplitFromReg(NewVReg, li.reg);
1576    }
1577
1578    if (HasUse) {
1579      if (CreatedNewVReg) {
1580        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1581                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1582        DOUT << " +" << LR;
1583        nI.addRange(LR);
1584      } else {
1585        // Extend the split live interval to this def / use.
1586        unsigned End = getUseIndex(index)+1;
1587        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1588                     nI.getValNumInfo(nI.getNumValNums()-1));
1589        DOUT << " +" << LR;
1590        nI.addRange(LR);
1591      }
1592    }
1593    if (HasDef) {
1594      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1595                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1596      DOUT << " +" << LR;
1597      nI.addRange(LR);
1598    }
1599
1600    DOUT << "\t\t\t\tAdded new interval: ";
1601    nI.print(DOUT, tri_);
1602    DOUT << '\n';
1603  }
1604  return CanFold;
1605}
1606bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1607                                   const VNInfo *VNI,
1608                                   MachineBasicBlock *MBB, unsigned Idx) const {
1609  unsigned End = getMBBEndIdx(MBB);
1610  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1611    unsigned KillIdx = VNI->kills[j];
1612    if (KillIdx > Idx && KillIdx < End)
1613      return true;
1614  }
1615  return false;
1616}
1617
1618/// RewriteInfo - Keep track of machine instrs that will be rewritten
1619/// during spilling.
1620namespace {
1621  struct RewriteInfo {
1622    unsigned Index;
1623    MachineInstr *MI;
1624    bool HasUse;
1625    bool HasDef;
1626    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1627      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1628  };
1629
1630  struct RewriteInfoCompare {
1631    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1632      return LHS.Index < RHS.Index;
1633    }
1634  };
1635}
1636
1637void LiveIntervals::
1638rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1639                    LiveInterval::Ranges::const_iterator &I,
1640                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1641                    unsigned Slot, int LdSlot,
1642                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1643                    VirtRegMap &vrm,
1644                    const TargetRegisterClass* rc,
1645                    SmallVector<int, 4> &ReMatIds,
1646                    const MachineLoopInfo *loopInfo,
1647                    BitVector &SpillMBBs,
1648                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1649                    BitVector &RestoreMBBs,
1650                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1651                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1652                    std::vector<LiveInterval*> &NewLIs) {
1653  bool AllCanFold = true;
1654  unsigned NewVReg = 0;
1655  unsigned start = getBaseIndex(I->start);
1656  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1657
1658  // First collect all the def / use in this live range that will be rewritten.
1659  // Make sure they are sorted according to instruction index.
1660  std::vector<RewriteInfo> RewriteMIs;
1661  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1662         re = mri_->reg_end(); ri != re; ) {
1663    MachineInstr *MI = &*ri;
1664    MachineOperand &O = ri.getOperand();
1665    ++ri;
1666    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1667    unsigned index = getInstructionIndex(MI);
1668    if (index < start || index >= end)
1669      continue;
1670    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1671      // Must be defined by an implicit def. It should not be spilled. Note,
1672      // this is for correctness reason. e.g.
1673      // 8   %reg1024<def> = IMPLICIT_DEF
1674      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1675      // The live range [12, 14) are not part of the r1024 live interval since
1676      // it's defined by an implicit def. It will not conflicts with live
1677      // interval of r1025. Now suppose both registers are spilled, you can
1678      // easily see a situation where both registers are reloaded before
1679      // the INSERT_SUBREG and both target registers that would overlap.
1680      continue;
1681    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1682  }
1683  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1684
1685  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1686  // Now rewrite the defs and uses.
1687  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1688    RewriteInfo &rwi = RewriteMIs[i];
1689    ++i;
1690    unsigned index = rwi.Index;
1691    bool MIHasUse = rwi.HasUse;
1692    bool MIHasDef = rwi.HasDef;
1693    MachineInstr *MI = rwi.MI;
1694    // If MI def and/or use the same register multiple times, then there
1695    // are multiple entries.
1696    unsigned NumUses = MIHasUse;
1697    while (i != e && RewriteMIs[i].MI == MI) {
1698      assert(RewriteMIs[i].Index == index);
1699      bool isUse = RewriteMIs[i].HasUse;
1700      if (isUse) ++NumUses;
1701      MIHasUse |= isUse;
1702      MIHasDef |= RewriteMIs[i].HasDef;
1703      ++i;
1704    }
1705    MachineBasicBlock *MBB = MI->getParent();
1706
1707    if (ImpUse && MI != ReMatDefMI) {
1708      // Re-matting an instruction with virtual register use. Update the
1709      // register interval's spill weight to HUGE_VALF to prevent it from
1710      // being spilled.
1711      LiveInterval &ImpLi = getInterval(ImpUse);
1712      ImpLi.weight = HUGE_VALF;
1713    }
1714
1715    unsigned MBBId = MBB->getNumber();
1716    unsigned ThisVReg = 0;
1717    if (TrySplit) {
1718      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1719      if (NVI != MBBVRegsMap.end()) {
1720        ThisVReg = NVI->second;
1721        // One common case:
1722        // x = use
1723        // ...
1724        // ...
1725        // def = ...
1726        //     = use
1727        // It's better to start a new interval to avoid artifically
1728        // extend the new interval.
1729        if (MIHasDef && !MIHasUse) {
1730          MBBVRegsMap.erase(MBB->getNumber());
1731          ThisVReg = 0;
1732        }
1733      }
1734    }
1735
1736    bool IsNew = ThisVReg == 0;
1737    if (IsNew) {
1738      // This ends the previous live interval. If all of its def / use
1739      // can be folded, give it a low spill weight.
1740      if (NewVReg && TrySplit && AllCanFold) {
1741        LiveInterval &nI = getOrCreateInterval(NewVReg);
1742        nI.weight /= 10.0F;
1743      }
1744      AllCanFold = true;
1745    }
1746    NewVReg = ThisVReg;
1747
1748    bool HasDef = false;
1749    bool HasUse = false;
1750    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1751                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1752                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1753                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1754                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1755    if (!HasDef && !HasUse)
1756      continue;
1757
1758    AllCanFold &= CanFold;
1759
1760    // Update weight of spill interval.
1761    LiveInterval &nI = getOrCreateInterval(NewVReg);
1762    if (!TrySplit) {
1763      // The spill weight is now infinity as it cannot be spilled again.
1764      nI.weight = HUGE_VALF;
1765      continue;
1766    }
1767
1768    // Keep track of the last def and first use in each MBB.
1769    if (HasDef) {
1770      if (MI != ReMatOrigDefMI || !CanDelete) {
1771        bool HasKill = false;
1772        if (!HasUse)
1773          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1774        else {
1775          // If this is a two-address code, then this index starts a new VNInfo.
1776          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1777          if (VNI)
1778            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1779        }
1780        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1781          SpillIdxes.find(MBBId);
1782        if (!HasKill) {
1783          if (SII == SpillIdxes.end()) {
1784            std::vector<SRInfo> S;
1785            S.push_back(SRInfo(index, NewVReg, true));
1786            SpillIdxes.insert(std::make_pair(MBBId, S));
1787          } else if (SII->second.back().vreg != NewVReg) {
1788            SII->second.push_back(SRInfo(index, NewVReg, true));
1789          } else if ((int)index > SII->second.back().index) {
1790            // If there is an earlier def and this is a two-address
1791            // instruction, then it's not possible to fold the store (which
1792            // would also fold the load).
1793            SRInfo &Info = SII->second.back();
1794            Info.index = index;
1795            Info.canFold = !HasUse;
1796          }
1797          SpillMBBs.set(MBBId);
1798        } else if (SII != SpillIdxes.end() &&
1799                   SII->second.back().vreg == NewVReg &&
1800                   (int)index > SII->second.back().index) {
1801          // There is an earlier def that's not killed (must be two-address).
1802          // The spill is no longer needed.
1803          SII->second.pop_back();
1804          if (SII->second.empty()) {
1805            SpillIdxes.erase(MBBId);
1806            SpillMBBs.reset(MBBId);
1807          }
1808        }
1809      }
1810    }
1811
1812    if (HasUse) {
1813      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1814        SpillIdxes.find(MBBId);
1815      if (SII != SpillIdxes.end() &&
1816          SII->second.back().vreg == NewVReg &&
1817          (int)index > SII->second.back().index)
1818        // Use(s) following the last def, it's not safe to fold the spill.
1819        SII->second.back().canFold = false;
1820      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1821        RestoreIdxes.find(MBBId);
1822      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1823        // If we are splitting live intervals, only fold if it's the first
1824        // use and there isn't another use later in the MBB.
1825        RII->second.back().canFold = false;
1826      else if (IsNew) {
1827        // Only need a reload if there isn't an earlier def / use.
1828        if (RII == RestoreIdxes.end()) {
1829          std::vector<SRInfo> Infos;
1830          Infos.push_back(SRInfo(index, NewVReg, true));
1831          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1832        } else {
1833          RII->second.push_back(SRInfo(index, NewVReg, true));
1834        }
1835        RestoreMBBs.set(MBBId);
1836      }
1837    }
1838
1839    // Update spill weight.
1840    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1841    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1842  }
1843
1844  if (NewVReg && TrySplit && AllCanFold) {
1845    // If all of its def / use can be folded, give it a low spill weight.
1846    LiveInterval &nI = getOrCreateInterval(NewVReg);
1847    nI.weight /= 10.0F;
1848  }
1849}
1850
1851bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1852                        BitVector &RestoreMBBs,
1853                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1854  if (!RestoreMBBs[Id])
1855    return false;
1856  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1857  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1858    if (Restores[i].index == index &&
1859        Restores[i].vreg == vr &&
1860        Restores[i].canFold)
1861      return true;
1862  return false;
1863}
1864
1865void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1866                        BitVector &RestoreMBBs,
1867                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1868  if (!RestoreMBBs[Id])
1869    return;
1870  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1871  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1872    if (Restores[i].index == index && Restores[i].vreg)
1873      Restores[i].index = -1;
1874}
1875
1876/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1877/// spilled and create empty intervals for their uses.
1878void
1879LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1880                                    const TargetRegisterClass* rc,
1881                                    std::vector<LiveInterval*> &NewLIs) {
1882  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1883         re = mri_->reg_end(); ri != re; ) {
1884    MachineOperand &O = ri.getOperand();
1885    MachineInstr *MI = &*ri;
1886    ++ri;
1887    if (O.isDef()) {
1888      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1889             "Register def was not rewritten?");
1890      RemoveMachineInstrFromMaps(MI);
1891      vrm.RemoveMachineInstrFromMaps(MI);
1892      MI->eraseFromParent();
1893    } else {
1894      // This must be an use of an implicit_def so it's not part of the live
1895      // interval. Create a new empty live interval for it.
1896      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1897      unsigned NewVReg = mri_->createVirtualRegister(rc);
1898      vrm.grow();
1899      vrm.setIsImplicitlyDefined(NewVReg);
1900      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1901      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1902        MachineOperand &MO = MI->getOperand(i);
1903        if (MO.isReg() && MO.getReg() == li.reg) {
1904          MO.setReg(NewVReg);
1905          MO.setIsUndef();
1906        }
1907      }
1908    }
1909  }
1910}
1911
1912std::vector<LiveInterval*> LiveIntervals::
1913addIntervalsForSpillsFast(const LiveInterval &li,
1914                          const MachineLoopInfo *loopInfo,
1915                          VirtRegMap &vrm) {
1916  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1917
1918  std::vector<LiveInterval*> added;
1919
1920  assert(li.weight != HUGE_VALF &&
1921         "attempt to spill already spilled interval!");
1922
1923  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1924  DEBUG(li.dump());
1925  DOUT << '\n';
1926
1927  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1928
1929  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1930  while (RI != mri_->reg_end()) {
1931    MachineInstr* MI = &*RI;
1932
1933    SmallVector<unsigned, 2> Indices;
1934    bool HasUse = false;
1935    bool HasDef = false;
1936
1937    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1938      MachineOperand& mop = MI->getOperand(i);
1939      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1940
1941      HasUse |= MI->getOperand(i).isUse();
1942      HasDef |= MI->getOperand(i).isDef();
1943
1944      Indices.push_back(i);
1945    }
1946
1947    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1948                              Indices, true, slot, li.reg)) {
1949      unsigned NewVReg = mri_->createVirtualRegister(rc);
1950      vrm.grow();
1951      vrm.assignVirt2StackSlot(NewVReg, slot);
1952
1953      // create a new register for this spill
1954      LiveInterval &nI = getOrCreateInterval(NewVReg);
1955
1956      // the spill weight is now infinity as it
1957      // cannot be spilled again
1958      nI.weight = HUGE_VALF;
1959
1960      // Rewrite register operands to use the new vreg.
1961      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1962           E = Indices.end(); I != E; ++I) {
1963        MI->getOperand(*I).setReg(NewVReg);
1964
1965        if (MI->getOperand(*I).isUse())
1966          MI->getOperand(*I).setIsKill(true);
1967      }
1968
1969      // Fill in  the new live interval.
1970      unsigned index = getInstructionIndex(MI);
1971      if (HasUse) {
1972        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1973                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1974        DOUT << " +" << LR;
1975        nI.addRange(LR);
1976        vrm.addRestorePoint(NewVReg, MI);
1977      }
1978      if (HasDef) {
1979        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1980                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1981        DOUT << " +" << LR;
1982        nI.addRange(LR);
1983        vrm.addSpillPoint(NewVReg, true, MI);
1984      }
1985
1986      added.push_back(&nI);
1987
1988      DOUT << "\t\t\t\tadded new interval: ";
1989      DEBUG(nI.dump());
1990      DOUT << '\n';
1991    }
1992
1993
1994    RI = mri_->reg_begin(li.reg);
1995  }
1996
1997  return added;
1998}
1999
2000std::vector<LiveInterval*> LiveIntervals::
2001addIntervalsForSpills(const LiveInterval &li,
2002                      SmallVectorImpl<LiveInterval*> &SpillIs,
2003                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2004
2005  if (EnableFastSpilling)
2006    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2007
2008  assert(li.weight != HUGE_VALF &&
2009         "attempt to spill already spilled interval!");
2010
2011  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2012  li.print(DOUT, tri_);
2013  DOUT << '\n';
2014
2015  // Each bit specify whether a spill is required in the MBB.
2016  BitVector SpillMBBs(mf_->getNumBlockIDs());
2017  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2018  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2019  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2020  DenseMap<unsigned,unsigned> MBBVRegsMap;
2021  std::vector<LiveInterval*> NewLIs;
2022  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2023
2024  unsigned NumValNums = li.getNumValNums();
2025  SmallVector<MachineInstr*, 4> ReMatDefs;
2026  ReMatDefs.resize(NumValNums, NULL);
2027  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2028  ReMatOrigDefs.resize(NumValNums, NULL);
2029  SmallVector<int, 4> ReMatIds;
2030  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2031  BitVector ReMatDelete(NumValNums);
2032  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2033
2034  // Spilling a split live interval. It cannot be split any further. Also,
2035  // it's also guaranteed to be a single val# / range interval.
2036  if (vrm.getPreSplitReg(li.reg)) {
2037    vrm.setIsSplitFromReg(li.reg, 0);
2038    // Unset the split kill marker on the last use.
2039    unsigned KillIdx = vrm.getKillPoint(li.reg);
2040    if (KillIdx) {
2041      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2042      assert(KillMI && "Last use disappeared?");
2043      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2044      assert(KillOp != -1 && "Last use disappeared?");
2045      KillMI->getOperand(KillOp).setIsKill(false);
2046    }
2047    vrm.removeKillPoint(li.reg);
2048    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2049    Slot = vrm.getStackSlot(li.reg);
2050    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2051    MachineInstr *ReMatDefMI = DefIsReMat ?
2052      vrm.getReMaterializedMI(li.reg) : NULL;
2053    int LdSlot = 0;
2054    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2055    bool isLoad = isLoadSS ||
2056      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2057    bool IsFirstRange = true;
2058    for (LiveInterval::Ranges::const_iterator
2059           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2060      // If this is a split live interval with multiple ranges, it means there
2061      // are two-address instructions that re-defined the value. Only the
2062      // first def can be rematerialized!
2063      if (IsFirstRange) {
2064        // Note ReMatOrigDefMI has already been deleted.
2065        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2066                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2067                             false, vrm, rc, ReMatIds, loopInfo,
2068                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2069                             MBBVRegsMap, NewLIs);
2070      } else {
2071        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2072                             Slot, 0, false, false, false,
2073                             false, vrm, rc, ReMatIds, loopInfo,
2074                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2075                             MBBVRegsMap, NewLIs);
2076      }
2077      IsFirstRange = false;
2078    }
2079
2080    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2081    return NewLIs;
2082  }
2083
2084  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2085  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2086    TrySplit = false;
2087  if (TrySplit)
2088    ++numSplits;
2089  bool NeedStackSlot = false;
2090  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2091       i != e; ++i) {
2092    const VNInfo *VNI = *i;
2093    unsigned VN = VNI->id;
2094    if (VNI->isUnused())
2095      continue; // Dead val#.
2096    // Is the def for the val# rematerializable?
2097    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2098      ? getInstructionFromIndex(VNI->def) : 0;
2099    bool dummy;
2100    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2101      // Remember how to remat the def of this val#.
2102      ReMatOrigDefs[VN] = ReMatDefMI;
2103      // Original def may be modified so we have to make a copy here.
2104      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2105      ClonedMIs.push_back(Clone);
2106      ReMatDefs[VN] = Clone;
2107
2108      bool CanDelete = true;
2109      if (VNI->hasPHIKill()) {
2110        // A kill is a phi node, not all of its uses can be rematerialized.
2111        // It must not be deleted.
2112        CanDelete = false;
2113        // Need a stack slot if there is any live range where uses cannot be
2114        // rematerialized.
2115        NeedStackSlot = true;
2116      }
2117      if (CanDelete)
2118        ReMatDelete.set(VN);
2119    } else {
2120      // Need a stack slot if there is any live range where uses cannot be
2121      // rematerialized.
2122      NeedStackSlot = true;
2123    }
2124  }
2125
2126  // One stack slot per live interval.
2127  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2128    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2129      Slot = vrm.assignVirt2StackSlot(li.reg);
2130
2131    // This case only occurs when the prealloc splitter has already assigned
2132    // a stack slot to this vreg.
2133    else
2134      Slot = vrm.getStackSlot(li.reg);
2135  }
2136
2137  // Create new intervals and rewrite defs and uses.
2138  for (LiveInterval::Ranges::const_iterator
2139         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2140    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2141    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2142    bool DefIsReMat = ReMatDefMI != NULL;
2143    bool CanDelete = ReMatDelete[I->valno->id];
2144    int LdSlot = 0;
2145    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2146    bool isLoad = isLoadSS ||
2147      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2148    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2149                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2150                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2151                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2152                               MBBVRegsMap, NewLIs);
2153  }
2154
2155  // Insert spills / restores if we are splitting.
2156  if (!TrySplit) {
2157    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2158    return NewLIs;
2159  }
2160
2161  SmallPtrSet<LiveInterval*, 4> AddedKill;
2162  SmallVector<unsigned, 2> Ops;
2163  if (NeedStackSlot) {
2164    int Id = SpillMBBs.find_first();
2165    while (Id != -1) {
2166      std::vector<SRInfo> &spills = SpillIdxes[Id];
2167      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2168        int index = spills[i].index;
2169        unsigned VReg = spills[i].vreg;
2170        LiveInterval &nI = getOrCreateInterval(VReg);
2171        bool isReMat = vrm.isReMaterialized(VReg);
2172        MachineInstr *MI = getInstructionFromIndex(index);
2173        bool CanFold = false;
2174        bool FoundUse = false;
2175        Ops.clear();
2176        if (spills[i].canFold) {
2177          CanFold = true;
2178          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2179            MachineOperand &MO = MI->getOperand(j);
2180            if (!MO.isReg() || MO.getReg() != VReg)
2181              continue;
2182
2183            Ops.push_back(j);
2184            if (MO.isDef())
2185              continue;
2186            if (isReMat ||
2187                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2188                                                RestoreMBBs, RestoreIdxes))) {
2189              // MI has two-address uses of the same register. If the use
2190              // isn't the first and only use in the BB, then we can't fold
2191              // it. FIXME: Move this to rewriteInstructionsForSpills.
2192              CanFold = false;
2193              break;
2194            }
2195            FoundUse = true;
2196          }
2197        }
2198        // Fold the store into the def if possible.
2199        bool Folded = false;
2200        if (CanFold && !Ops.empty()) {
2201          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2202            Folded = true;
2203            if (FoundUse) {
2204              // Also folded uses, do not issue a load.
2205              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2206              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2207            }
2208            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2209          }
2210        }
2211
2212        // Otherwise tell the spiller to issue a spill.
2213        if (!Folded) {
2214          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2215          bool isKill = LR->end == getStoreIndex(index);
2216          if (!MI->registerDefIsDead(nI.reg))
2217            // No need to spill a dead def.
2218            vrm.addSpillPoint(VReg, isKill, MI);
2219          if (isKill)
2220            AddedKill.insert(&nI);
2221        }
2222      }
2223      Id = SpillMBBs.find_next(Id);
2224    }
2225  }
2226
2227  int Id = RestoreMBBs.find_first();
2228  while (Id != -1) {
2229    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2230    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2231      int index = restores[i].index;
2232      if (index == -1)
2233        continue;
2234      unsigned VReg = restores[i].vreg;
2235      LiveInterval &nI = getOrCreateInterval(VReg);
2236      bool isReMat = vrm.isReMaterialized(VReg);
2237      MachineInstr *MI = getInstructionFromIndex(index);
2238      bool CanFold = false;
2239      Ops.clear();
2240      if (restores[i].canFold) {
2241        CanFold = true;
2242        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2243          MachineOperand &MO = MI->getOperand(j);
2244          if (!MO.isReg() || MO.getReg() != VReg)
2245            continue;
2246
2247          if (MO.isDef()) {
2248            // If this restore were to be folded, it would have been folded
2249            // already.
2250            CanFold = false;
2251            break;
2252          }
2253          Ops.push_back(j);
2254        }
2255      }
2256
2257      // Fold the load into the use if possible.
2258      bool Folded = false;
2259      if (CanFold && !Ops.empty()) {
2260        if (!isReMat)
2261          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2262        else {
2263          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2264          int LdSlot = 0;
2265          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2266          // If the rematerializable def is a load, also try to fold it.
2267          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2268            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2269                                          Ops, isLoadSS, LdSlot, VReg);
2270          if (!Folded) {
2271            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2272            if (ImpUse) {
2273              // Re-matting an instruction with virtual register use. Add the
2274              // register as an implicit use on the use MI and update the register
2275              // interval's spill weight to HUGE_VALF to prevent it from being
2276              // spilled.
2277              LiveInterval &ImpLi = getInterval(ImpUse);
2278              ImpLi.weight = HUGE_VALF;
2279              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2280            }
2281          }
2282        }
2283      }
2284      // If folding is not possible / failed, then tell the spiller to issue a
2285      // load / rematerialization for us.
2286      if (Folded)
2287        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2288      else
2289        vrm.addRestorePoint(VReg, MI);
2290    }
2291    Id = RestoreMBBs.find_next(Id);
2292  }
2293
2294  // Finalize intervals: add kills, finalize spill weights, and filter out
2295  // dead intervals.
2296  std::vector<LiveInterval*> RetNewLIs;
2297  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2298    LiveInterval *LI = NewLIs[i];
2299    if (!LI->empty()) {
2300      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2301      if (!AddedKill.count(LI)) {
2302        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2303        unsigned LastUseIdx = getBaseIndex(LR->end);
2304        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2305        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2306        assert(UseIdx != -1);
2307        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2308          LastUse->getOperand(UseIdx).setIsKill();
2309          vrm.addKillPoint(LI->reg, LastUseIdx);
2310        }
2311      }
2312      RetNewLIs.push_back(LI);
2313    }
2314  }
2315
2316  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2317  return RetNewLIs;
2318}
2319
2320/// hasAllocatableSuperReg - Return true if the specified physical register has
2321/// any super register that's allocatable.
2322bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2323  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2324    if (allocatableRegs_[*AS] && hasInterval(*AS))
2325      return true;
2326  return false;
2327}
2328
2329/// getRepresentativeReg - Find the largest super register of the specified
2330/// physical register.
2331unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2332  // Find the largest super-register that is allocatable.
2333  unsigned BestReg = Reg;
2334  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2335    unsigned SuperReg = *AS;
2336    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2337      BestReg = SuperReg;
2338      break;
2339    }
2340  }
2341  return BestReg;
2342}
2343
2344/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2345/// specified interval that conflicts with the specified physical register.
2346unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2347                                                   unsigned PhysReg) const {
2348  unsigned NumConflicts = 0;
2349  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2350  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2351         E = mri_->reg_end(); I != E; ++I) {
2352    MachineOperand &O = I.getOperand();
2353    MachineInstr *MI = O.getParent();
2354    unsigned Index = getInstructionIndex(MI);
2355    if (pli.liveAt(Index))
2356      ++NumConflicts;
2357  }
2358  return NumConflicts;
2359}
2360
2361/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2362/// around all defs and uses of the specified interval. Return true if it
2363/// was able to cut its interval.
2364bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2365                                            unsigned PhysReg, VirtRegMap &vrm) {
2366  unsigned SpillReg = getRepresentativeReg(PhysReg);
2367
2368  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2369    // If there are registers which alias PhysReg, but which are not a
2370    // sub-register of the chosen representative super register. Assert
2371    // since we can't handle it yet.
2372    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2373           tri_->isSuperRegister(*AS, SpillReg));
2374
2375  bool Cut = false;
2376  LiveInterval &pli = getInterval(SpillReg);
2377  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2378  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2379         E = mri_->reg_end(); I != E; ++I) {
2380    MachineOperand &O = I.getOperand();
2381    MachineInstr *MI = O.getParent();
2382    if (SeenMIs.count(MI))
2383      continue;
2384    SeenMIs.insert(MI);
2385    unsigned Index = getInstructionIndex(MI);
2386    if (pli.liveAt(Index)) {
2387      vrm.addEmergencySpill(SpillReg, MI);
2388      unsigned StartIdx = getLoadIndex(Index);
2389      unsigned EndIdx = getStoreIndex(Index)+1;
2390      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2391        pli.removeRange(StartIdx, EndIdx);
2392        Cut = true;
2393      } else {
2394        cerr << "Ran out of registers during register allocation!\n";
2395        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2396          cerr << "Please check your inline asm statement for invalid "
2397               << "constraints:\n";
2398          MI->print(cerr.stream(), tm_);
2399        }
2400        exit(1);
2401      }
2402      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2403        if (!hasInterval(*AS))
2404          continue;
2405        LiveInterval &spli = getInterval(*AS);
2406        if (spli.liveAt(Index))
2407          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2408      }
2409    }
2410  }
2411  return Cut;
2412}
2413
2414LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2415                                                   MachineInstr* startInst) {
2416  LiveInterval& Interval = getOrCreateInterval(reg);
2417  VNInfo* VN = Interval.getNextValue(
2418            getInstructionIndex(startInst) + InstrSlots::DEF,
2419            startInst, true, getVNInfoAllocator());
2420  VN->setHasPHIKill(true);
2421  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2422  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2423               getMBBEndIdx(startInst->getParent()) + 1, VN);
2424  Interval.addRange(LR);
2425
2426  return LR;
2427}
2428