LiveIntervalAnalysis.cpp revision 77942d4ee47e76626d574b027e4b9c5c90e2bbf4
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/Target/TargetRegisterInfo.h"
32#include "llvm/Target/TargetInstrInfo.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/CommandLine.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/DepthFirstIterator.h"
40#include "llvm/ADT/SmallSet.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/ADT/STLExtras.h"
43#include <algorithm>
44#include <limits>
45#include <cmath>
46using namespace llvm;
47
48// Hidden options for help debugging.
49static cl::opt<bool> DisableReMat("disable-rematerialization",
50                                  cl::init(false), cl::Hidden);
51
52static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53                               cl::init(true), cl::Hidden);
54static cl::opt<int> SplitLimit("split-limit",
55                               cl::init(-1), cl::Hidden);
56
57static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
58
59static cl::opt<bool> EnableFastSpilling("fast-spill",
60                                        cl::init(false), cl::Hidden);
61
62STATISTIC(numIntervals, "Number of original intervals");
63STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits   , "Number of intervals split");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.setPreservesCFG();
71  AU.addRequired<AliasAnalysis>();
72  AU.addPreserved<AliasAnalysis>();
73  AU.addPreserved<LiveVariables>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreservedID(MachineLoopInfoID);
76  AU.addPreservedID(MachineDominatorsID);
77
78  if (!StrongPHIElim) {
79    AU.addPreservedID(PHIEliminationID);
80    AU.addRequiredID(PHIEliminationID);
81  }
82
83  AU.addRequiredID(TwoAddressInstructionPassID);
84  MachineFunctionPass::getAnalysisUsage(AU);
85}
86
87void LiveIntervals::releaseMemory() {
88  // Free the live intervals themselves.
89  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
90       E = r2iMap_.end(); I != E; ++I)
91    delete I->second;
92
93  MBB2IdxMap.clear();
94  Idx2MBBMap.clear();
95  mi2iMap_.clear();
96  i2miMap_.clear();
97  r2iMap_.clear();
98  terminatorGaps.clear();
99
100  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101  VNInfoAllocator.Reset();
102  while (!ClonedMIs.empty()) {
103    MachineInstr *MI = ClonedMIs.back();
104    ClonedMIs.pop_back();
105    mf_->DeleteMachineInstr(MI);
106  }
107}
108
109static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
110                                   const TargetInstrInfo *tii_) {
111  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
112  if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
113      Reg == SrcReg)
114    return true;
115
116  if ((MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
117       MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) &&
118      MI->getOperand(2).getReg() == Reg)
119    return true;
120  if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG &&
121      MI->getOperand(1).getReg() == Reg)
122    return true;
123  return false;
124}
125
126/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127/// there is one implicit_def for each use. Add isUndef marker to
128/// implicit_def defs and their uses.
129void LiveIntervals::processImplicitDefs() {
130  SmallSet<unsigned, 8> ImpDefRegs;
131  SmallVector<MachineInstr*, 8> ImpDefMIs;
132  MachineBasicBlock *Entry = mf_->begin();
133  SmallPtrSet<MachineBasicBlock*,16> Visited;
134  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136       DFI != E; ++DFI) {
137    MachineBasicBlock *MBB = *DFI;
138    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139         I != E; ) {
140      MachineInstr *MI = &*I;
141      ++I;
142      if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143        unsigned Reg = MI->getOperand(0).getReg();
144        ImpDefRegs.insert(Reg);
145        ImpDefMIs.push_back(MI);
146        continue;
147      }
148
149      bool ChangedToImpDef = false;
150      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
151        MachineOperand& MO = MI->getOperand(i);
152        if (!MO.isReg() || !MO.isUse() || MO.isUndef())
153          continue;
154        unsigned Reg = MO.getReg();
155        if (!Reg)
156          continue;
157        if (!ImpDefRegs.count(Reg))
158          continue;
159        // Use is a copy, just turn it into an implicit_def.
160        if (CanTurnIntoImplicitDef(MI, Reg, tii_)) {
161          bool isKill = MO.isKill();
162          MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
163          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
164            MI->RemoveOperand(j);
165          if (isKill)
166            ImpDefRegs.erase(Reg);
167          ChangedToImpDef = true;
168          break;
169        }
170
171        MO.setIsUndef();
172        if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
173          // Make sure other uses of
174          for (unsigned j = i+1; j != e; ++j) {
175            MachineOperand &MOJ = MI->getOperand(j);
176            if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
177              MOJ.setIsUndef();
178          }
179          ImpDefRegs.erase(Reg);
180        }
181      }
182
183      if (ChangedToImpDef) {
184        // Backtrack to process this new implicit_def.
185        --I;
186      } else {
187        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
188          MachineOperand& MO = MI->getOperand(i);
189          if (!MO.isReg() || !MO.isDef())
190            continue;
191          ImpDefRegs.erase(MO.getReg());
192        }
193      }
194    }
195
196    // Any outstanding liveout implicit_def's?
197    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
198      MachineInstr *MI = ImpDefMIs[i];
199      unsigned Reg = MI->getOperand(0).getReg();
200      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
201          !ImpDefRegs.count(Reg)) {
202        // Delete all "local" implicit_def's. That include those which define
203        // physical registers since they cannot be liveout.
204        MI->eraseFromParent();
205        continue;
206      }
207
208      // If there are multiple defs of the same register and at least one
209      // is not an implicit_def, do not insert implicit_def's before the
210      // uses.
211      bool Skip = false;
212      for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
213             DE = mri_->def_end(); DI != DE; ++DI) {
214        if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
215          Skip = true;
216          break;
217        }
218      }
219      if (Skip)
220        continue;
221
222      // The only implicit_def which we want to keep are those that are live
223      // out of its block.
224      MI->eraseFromParent();
225
226      for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
227             UE = mri_->use_end(); UI != UE; ) {
228        MachineOperand &RMO = UI.getOperand();
229        MachineInstr *RMI = &*UI;
230        ++UI;
231        MachineBasicBlock *RMBB = RMI->getParent();
232        if (RMBB == MBB)
233          continue;
234
235        // Turn a copy use into an implicit_def.
236        unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
237        if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
238            Reg == SrcReg) {
239          RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
240          for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
241            RMI->RemoveOperand(j);
242          continue;
243        }
244
245        const TargetRegisterClass* RC = mri_->getRegClass(Reg);
246        unsigned NewVReg = mri_->createVirtualRegister(RC);
247        RMO.setReg(NewVReg);
248        RMO.setIsUndef();
249        RMO.setIsKill();
250      }
251    }
252    ImpDefRegs.clear();
253    ImpDefMIs.clear();
254  }
255}
256
257void LiveIntervals::computeNumbering() {
258  Index2MiMap OldI2MI = i2miMap_;
259  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
260
261  Idx2MBBMap.clear();
262  MBB2IdxMap.clear();
263  mi2iMap_.clear();
264  i2miMap_.clear();
265  terminatorGaps.clear();
266
267  FunctionSize = 0;
268
269  // Number MachineInstrs and MachineBasicBlocks.
270  // Initialize MBB indexes to a sentinal.
271  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
272
273  unsigned MIIndex = 0;
274  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
275       MBB != E; ++MBB) {
276    unsigned StartIdx = MIIndex;
277
278    // Insert an empty slot at the beginning of each block.
279    MIIndex += InstrSlots::NUM;
280    i2miMap_.push_back(0);
281
282    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
283         I != E; ++I) {
284
285      if (I == MBB->getFirstTerminator()) {
286        // Leave a gap for before terminators, this is where we will point
287        // PHI kills.
288        bool inserted =
289          terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
290        assert(inserted &&
291               "Multiple 'first' terminators encountered during numbering.");
292        inserted = inserted; // Avoid compiler warning if assertions turned off.
293        i2miMap_.push_back(0);
294
295        MIIndex += InstrSlots::NUM;
296      }
297
298      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
299      assert(inserted && "multiple MachineInstr -> index mappings");
300      inserted = true;
301      i2miMap_.push_back(I);
302      MIIndex += InstrSlots::NUM;
303      FunctionSize++;
304
305      // Insert max(1, numdefs) empty slots after every instruction.
306      unsigned Slots = I->getDesc().getNumDefs();
307      if (Slots == 0)
308        Slots = 1;
309      MIIndex += InstrSlots::NUM * Slots;
310      while (Slots--)
311        i2miMap_.push_back(0);
312    }
313
314    if (MBB->getFirstTerminator() == MBB->end()) {
315      // Leave a gap for before terminators, this is where we will point
316      // PHI kills.
317      bool inserted =
318        terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
319      assert(inserted &&
320             "Multiple 'first' terminators encountered during numbering.");
321      inserted = inserted; // Avoid compiler warning if assertions turned off.
322      i2miMap_.push_back(0);
323
324      MIIndex += InstrSlots::NUM;
325    }
326
327    // Set the MBB2IdxMap entry for this MBB.
328    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
329    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
330  }
331
332  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
333
334  if (!OldI2MI.empty())
335    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
336      for (LiveInterval::iterator LI = OI->second->begin(),
337           LE = OI->second->end(); LI != LE; ++LI) {
338
339        // Remap the start index of the live range to the corresponding new
340        // number, or our best guess at what it _should_ correspond to if the
341        // original instruction has been erased.  This is either the following
342        // instruction or its predecessor.
343        unsigned index = LI->start / InstrSlots::NUM;
344        unsigned offset = LI->start % InstrSlots::NUM;
345        if (offset == InstrSlots::LOAD) {
346          std::vector<IdxMBBPair>::const_iterator I =
347                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
348          // Take the pair containing the index
349          std::vector<IdxMBBPair>::const_iterator J =
350                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
351
352          LI->start = getMBBStartIdx(J->second);
353        } else {
354          LI->start = mi2iMap_[OldI2MI[index]] + offset;
355        }
356
357        // Remap the ending index in the same way that we remapped the start,
358        // except for the final step where we always map to the immediately
359        // following instruction.
360        index = (LI->end - 1) / InstrSlots::NUM;
361        offset  = LI->end % InstrSlots::NUM;
362        if (offset == InstrSlots::LOAD) {
363          // VReg dies at end of block.
364          std::vector<IdxMBBPair>::const_iterator I =
365                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
366          --I;
367
368          LI->end = getMBBEndIdx(I->second) + 1;
369        } else {
370          unsigned idx = index;
371          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
372
373          if (index != OldI2MI.size())
374            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
375          else
376            LI->end = InstrSlots::NUM * i2miMap_.size();
377        }
378      }
379
380      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
381           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
382        VNInfo* vni = *VNI;
383
384        // Remap the VNInfo def index, which works the same as the
385        // start indices above. VN's with special sentinel defs
386        // don't need to be remapped.
387        if (vni->isDefAccurate() && !vni->isUnused()) {
388          unsigned index = vni->def / InstrSlots::NUM;
389          unsigned offset = vni->def % InstrSlots::NUM;
390          if (offset == InstrSlots::LOAD) {
391            std::vector<IdxMBBPair>::const_iterator I =
392                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
393            // Take the pair containing the index
394            std::vector<IdxMBBPair>::const_iterator J =
395                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
396
397            vni->def = getMBBStartIdx(J->second);
398          } else {
399            vni->def = mi2iMap_[OldI2MI[index]] + offset;
400          }
401        }
402
403        // Remap the VNInfo kill indices, which works the same as
404        // the end indices above.
405        for (size_t i = 0; i < vni->kills.size(); ++i) {
406          unsigned killIdx = vni->kills[i].killIdx;
407
408          unsigned index = (killIdx - 1) / InstrSlots::NUM;
409          unsigned offset = killIdx % InstrSlots::NUM;
410
411          if (offset == InstrSlots::LOAD) {
412            assert("Value killed at a load slot.");
413            /*std::vector<IdxMBBPair>::const_iterator I =
414             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
415            --I;
416
417            vni->kills[i] = getMBBEndIdx(I->second);*/
418          } else {
419            if (vni->kills[i].isPHIKill) {
420              std::vector<IdxMBBPair>::const_iterator I =
421                std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
422              --I;
423              vni->kills[i].killIdx = terminatorGaps[I->second];
424            } else {
425              assert(OldI2MI[index] != 0 &&
426                     "Kill refers to instruction not present in index maps.");
427              vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
428            }
429
430            /*
431            unsigned idx = index;
432            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
433
434            if (index != OldI2MI.size())
435              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
436                              (idx == index ? offset : 0);
437            else
438              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
439            */
440          }
441        }
442      }
443    }
444}
445
446void LiveIntervals::scaleNumbering(int factor) {
447  // Need to
448  //  * scale MBB begin and end points
449  //  * scale all ranges.
450  //  * Update VNI structures.
451  //  * Scale instruction numberings
452
453  // Scale the MBB indices.
454  Idx2MBBMap.clear();
455  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
456       MBB != MBBE; ++MBB) {
457    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
458    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
459    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
460    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
461  }
462  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
463
464  // Scale terminator gaps.
465  for (DenseMap<MachineBasicBlock*, unsigned>::iterator
466       TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
467       TGI != TGE; ++TGI) {
468    terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
469  }
470
471  // Scale the intervals.
472  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
473    LI->second->scaleNumbering(factor);
474  }
475
476  // Scale MachineInstrs.
477  Mi2IndexMap oldmi2iMap = mi2iMap_;
478  unsigned highestSlot = 0;
479  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
480       MI != ME; ++MI) {
481    unsigned newSlot = InstrSlots::scale(MI->second, factor);
482    mi2iMap_[MI->first] = newSlot;
483    highestSlot = std::max(highestSlot, newSlot);
484  }
485
486  i2miMap_.clear();
487  i2miMap_.resize(highestSlot + 1);
488  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
489       MI != ME; ++MI) {
490    i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
491  }
492
493}
494
495
496/// runOnMachineFunction - Register allocate the whole function
497///
498bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
499  mf_ = &fn;
500  mri_ = &mf_->getRegInfo();
501  tm_ = &fn.getTarget();
502  tri_ = tm_->getRegisterInfo();
503  tii_ = tm_->getInstrInfo();
504  aa_ = &getAnalysis<AliasAnalysis>();
505  lv_ = &getAnalysis<LiveVariables>();
506  allocatableRegs_ = tri_->getAllocatableSet(fn);
507
508  processImplicitDefs();
509  computeNumbering();
510  computeIntervals();
511
512  numIntervals += getNumIntervals();
513
514  DEBUG(dump());
515  return true;
516}
517
518/// print - Implement the dump method.
519void LiveIntervals::print(std::ostream &O, const Module* ) const {
520  O << "********** INTERVALS **********\n";
521  for (const_iterator I = begin(), E = end(); I != E; ++I) {
522    I->second->print(O, tri_);
523    O << "\n";
524  }
525
526  O << "********** MACHINEINSTRS **********\n";
527  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
528       mbbi != mbbe; ++mbbi) {
529    O << ((Value*)mbbi->getBasicBlock())->getNameStr() << ":\n";
530    for (MachineBasicBlock::iterator mii = mbbi->begin(),
531           mie = mbbi->end(); mii != mie; ++mii) {
532      O << getInstructionIndex(mii) << '\t' << *mii;
533    }
534  }
535}
536
537/// conflictsWithPhysRegDef - Returns true if the specified register
538/// is defined during the duration of the specified interval.
539bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
540                                            VirtRegMap &vrm, unsigned reg) {
541  for (LiveInterval::Ranges::const_iterator
542         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
543    for (unsigned index = getBaseIndex(I->start),
544           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
545         index += InstrSlots::NUM) {
546      // skip deleted instructions
547      while (index != end && !getInstructionFromIndex(index))
548        index += InstrSlots::NUM;
549      if (index == end) break;
550
551      MachineInstr *MI = getInstructionFromIndex(index);
552      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
553      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
554        if (SrcReg == li.reg || DstReg == li.reg)
555          continue;
556      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
557        MachineOperand& mop = MI->getOperand(i);
558        if (!mop.isReg())
559          continue;
560        unsigned PhysReg = mop.getReg();
561        if (PhysReg == 0 || PhysReg == li.reg)
562          continue;
563        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
564          if (!vrm.hasPhys(PhysReg))
565            continue;
566          PhysReg = vrm.getPhys(PhysReg);
567        }
568        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
569          return true;
570      }
571    }
572  }
573
574  return false;
575}
576
577/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
578/// it can check use as well.
579bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
580                                            unsigned Reg, bool CheckUse,
581                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
582  for (LiveInterval::Ranges::const_iterator
583         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
584    for (unsigned index = getBaseIndex(I->start),
585           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
586         index += InstrSlots::NUM) {
587      // Skip deleted instructions.
588      MachineInstr *MI = 0;
589      while (index != end) {
590        MI = getInstructionFromIndex(index);
591        if (MI)
592          break;
593        index += InstrSlots::NUM;
594      }
595      if (index == end) break;
596
597      if (JoinedCopies.count(MI))
598        continue;
599      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
600        MachineOperand& MO = MI->getOperand(i);
601        if (!MO.isReg())
602          continue;
603        if (MO.isUse() && !CheckUse)
604          continue;
605        unsigned PhysReg = MO.getReg();
606        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
607          continue;
608        if (tri_->isSubRegister(Reg, PhysReg))
609          return true;
610      }
611    }
612  }
613
614  return false;
615}
616
617
618void LiveIntervals::printRegName(unsigned reg) const {
619  if (TargetRegisterInfo::isPhysicalRegister(reg))
620    errs() << tri_->getName(reg);
621  else
622    errs() << "%reg" << reg;
623}
624
625void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
626                                             MachineBasicBlock::iterator mi,
627                                             unsigned MIIdx, MachineOperand& MO,
628                                             unsigned MOIdx,
629                                             LiveInterval &interval) {
630  DEBUG({
631      errs() << "\t\tregister: ";
632      printRegName(interval.reg);
633    });
634
635  // Virtual registers may be defined multiple times (due to phi
636  // elimination and 2-addr elimination).  Much of what we do only has to be
637  // done once for the vreg.  We use an empty interval to detect the first
638  // time we see a vreg.
639  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
640  if (interval.empty()) {
641    // Get the Idx of the defining instructions.
642    unsigned defIndex = getDefIndex(MIIdx);
643    // Earlyclobbers move back one.
644    if (MO.isEarlyClobber())
645      defIndex = getUseIndex(MIIdx);
646    VNInfo *ValNo;
647    MachineInstr *CopyMI = NULL;
648    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
649    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
650        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
651        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
652        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
653      CopyMI = mi;
654    // Earlyclobbers move back one.
655    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
656
657    assert(ValNo->id == 0 && "First value in interval is not 0?");
658
659    // Loop over all of the blocks that the vreg is defined in.  There are
660    // two cases we have to handle here.  The most common case is a vreg
661    // whose lifetime is contained within a basic block.  In this case there
662    // will be a single kill, in MBB, which comes after the definition.
663    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
664      // FIXME: what about dead vars?
665      unsigned killIdx;
666      if (vi.Kills[0] != mi)
667        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
668      else
669        killIdx = defIndex+1;
670
671      // If the kill happens after the definition, we have an intra-block
672      // live range.
673      if (killIdx > defIndex) {
674        assert(vi.AliveBlocks.empty() &&
675               "Shouldn't be alive across any blocks!");
676        LiveRange LR(defIndex, killIdx, ValNo);
677        interval.addRange(LR);
678        DEBUG(errs() << " +" << LR << "\n");
679        interval.addKill(ValNo, killIdx, false);
680        return;
681      }
682    }
683
684    // The other case we handle is when a virtual register lives to the end
685    // of the defining block, potentially live across some blocks, then is
686    // live into some number of blocks, but gets killed.  Start by adding a
687    // range that goes from this definition to the end of the defining block.
688    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
689    DEBUG(errs() << " +" << NewLR);
690    interval.addRange(NewLR);
691
692    // Iterate over all of the blocks that the variable is completely
693    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
694    // live interval.
695    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
696             E = vi.AliveBlocks.end(); I != E; ++I) {
697      LiveRange LR(getMBBStartIdx(*I),
698                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
699                   ValNo);
700      interval.addRange(LR);
701      DEBUG(errs() << " +" << LR);
702    }
703
704    // Finally, this virtual register is live from the start of any killing
705    // block to the 'use' slot of the killing instruction.
706    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
707      MachineInstr *Kill = vi.Kills[i];
708      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
709      LiveRange LR(getMBBStartIdx(Kill->getParent()),
710                   killIdx, ValNo);
711      interval.addRange(LR);
712      interval.addKill(ValNo, killIdx, false);
713      DEBUG(errs() << " +" << LR);
714    }
715
716  } else {
717    // If this is the second time we see a virtual register definition, it
718    // must be due to phi elimination or two addr elimination.  If this is
719    // the result of two address elimination, then the vreg is one of the
720    // def-and-use register operand.
721    if (mi->isRegTiedToUseOperand(MOIdx)) {
722      // If this is a two-address definition, then we have already processed
723      // the live range.  The only problem is that we didn't realize there
724      // are actually two values in the live interval.  Because of this we
725      // need to take the LiveRegion that defines this register and split it
726      // into two values.
727      assert(interval.containsOneValue());
728      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
729      unsigned RedefIndex = getDefIndex(MIIdx);
730      if (MO.isEarlyClobber())
731        RedefIndex = getUseIndex(MIIdx);
732
733      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
734      VNInfo *OldValNo = OldLR->valno;
735
736      // Delete the initial value, which should be short and continuous,
737      // because the 2-addr copy must be in the same MBB as the redef.
738      interval.removeRange(DefIndex, RedefIndex);
739
740      // Two-address vregs should always only be redefined once.  This means
741      // that at this point, there should be exactly one value number in it.
742      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
743
744      // The new value number (#1) is defined by the instruction we claimed
745      // defined value #0.
746      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
747                                            false, // update at *
748                                            VNInfoAllocator);
749      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
750
751      // Value#0 is now defined by the 2-addr instruction.
752      OldValNo->def  = RedefIndex;
753      OldValNo->setCopy(0);
754      if (MO.isEarlyClobber())
755        OldValNo->setHasRedefByEC(true);
756
757      // Add the new live interval which replaces the range for the input copy.
758      LiveRange LR(DefIndex, RedefIndex, ValNo);
759      DEBUG(errs() << " replace range with " << LR);
760      interval.addRange(LR);
761      interval.addKill(ValNo, RedefIndex, false);
762
763      // If this redefinition is dead, we need to add a dummy unit live
764      // range covering the def slot.
765      if (MO.isDead())
766        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
767
768      DEBUG({
769          errs() << " RESULT: ";
770          interval.print(errs(), tri_);
771        });
772    } else {
773      // Otherwise, this must be because of phi elimination.  If this is the
774      // first redefinition of the vreg that we have seen, go back and change
775      // the live range in the PHI block to be a different value number.
776      if (interval.containsOneValue()) {
777        assert(vi.Kills.size() == 1 &&
778               "PHI elimination vreg should have one kill, the PHI itself!");
779
780        // Remove the old range that we now know has an incorrect number.
781        VNInfo *VNI = interval.getValNumInfo(0);
782        MachineInstr *Killer = vi.Kills[0];
783        unsigned Start = getMBBStartIdx(Killer->getParent());
784        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
785        DEBUG({
786            errs() << " Removing [" << Start << "," << End << "] from: ";
787            interval.print(errs(), tri_);
788            errs() << "\n";
789          });
790        interval.removeRange(Start, End);
791        assert(interval.ranges.size() == 1 &&
792               "newly discovered PHI interval has >1 ranges.");
793        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
794        interval.addKill(VNI, terminatorGaps[killMBB], true);
795        VNI->setHasPHIKill(true);
796        DEBUG({
797            errs() << " RESULT: ";
798            interval.print(errs(), tri_);
799          });
800
801        // Replace the interval with one of a NEW value number.  Note that this
802        // value number isn't actually defined by an instruction, weird huh? :)
803        LiveRange LR(Start, End,
804          interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
805        LR.valno->setIsPHIDef(true);
806        DEBUG(errs() << " replace range with " << LR);
807        interval.addRange(LR);
808        interval.addKill(LR.valno, End, false);
809        DEBUG({
810            errs() << " RESULT: ";
811            interval.print(errs(), tri_);
812          });
813      }
814
815      // In the case of PHI elimination, each variable definition is only
816      // live until the end of the block.  We've already taken care of the
817      // rest of the live range.
818      unsigned defIndex = getDefIndex(MIIdx);
819      if (MO.isEarlyClobber())
820        defIndex = getUseIndex(MIIdx);
821
822      VNInfo *ValNo;
823      MachineInstr *CopyMI = NULL;
824      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
825      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
826          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
827          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
828          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
829        CopyMI = mi;
830      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
831
832      unsigned killIndex = getMBBEndIdx(mbb) + 1;
833      LiveRange LR(defIndex, killIndex, ValNo);
834      interval.addRange(LR);
835      interval.addKill(ValNo, terminatorGaps[mbb], true);
836      ValNo->setHasPHIKill(true);
837      DEBUG(errs() << " +" << LR);
838    }
839  }
840
841  DEBUG(errs() << '\n');
842}
843
844void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
845                                              MachineBasicBlock::iterator mi,
846                                              unsigned MIIdx,
847                                              MachineOperand& MO,
848                                              LiveInterval &interval,
849                                              MachineInstr *CopyMI) {
850  // A physical register cannot be live across basic block, so its
851  // lifetime must end somewhere in its defining basic block.
852  DEBUG({
853      errs() << "\t\tregister: ";
854      printRegName(interval.reg);
855    });
856
857  unsigned baseIndex = MIIdx;
858  unsigned start = getDefIndex(baseIndex);
859  // Earlyclobbers move back one.
860  if (MO.isEarlyClobber())
861    start = getUseIndex(MIIdx);
862  unsigned end = start;
863
864  // If it is not used after definition, it is considered dead at
865  // the instruction defining it. Hence its interval is:
866  // [defSlot(def), defSlot(def)+1)
867  if (MO.isDead()) {
868    DEBUG(errs() << " dead");
869    end = start + 1;
870    goto exit;
871  }
872
873  // If it is not dead on definition, it must be killed by a
874  // subsequent instruction. Hence its interval is:
875  // [defSlot(def), useSlot(kill)+1)
876  baseIndex += InstrSlots::NUM;
877  while (++mi != MBB->end()) {
878    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
879           getInstructionFromIndex(baseIndex) == 0)
880      baseIndex += InstrSlots::NUM;
881    if (mi->killsRegister(interval.reg, tri_)) {
882      DEBUG(errs() << " killed");
883      end = getUseIndex(baseIndex) + 1;
884      goto exit;
885    } else {
886      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
887      if (DefIdx != -1) {
888        if (mi->isRegTiedToUseOperand(DefIdx)) {
889          // Two-address instruction.
890          end = getDefIndex(baseIndex);
891          if (mi->getOperand(DefIdx).isEarlyClobber())
892            end = getUseIndex(baseIndex);
893        } else {
894          // Another instruction redefines the register before it is ever read.
895          // Then the register is essentially dead at the instruction that defines
896          // it. Hence its interval is:
897          // [defSlot(def), defSlot(def)+1)
898          DEBUG(errs() << " dead");
899          end = start + 1;
900        }
901        goto exit;
902      }
903    }
904
905    baseIndex += InstrSlots::NUM;
906  }
907
908  // The only case we should have a dead physreg here without a killing or
909  // instruction where we know it's dead is if it is live-in to the function
910  // and never used. Another possible case is the implicit use of the
911  // physical register has been deleted by two-address pass.
912  end = start + 1;
913
914exit:
915  assert(start < end && "did not find end of interval?");
916
917  // Already exists? Extend old live interval.
918  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
919  bool Extend = OldLR != interval.end();
920  VNInfo *ValNo = Extend
921    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
922  if (MO.isEarlyClobber() && Extend)
923    ValNo->setHasRedefByEC(true);
924  LiveRange LR(start, end, ValNo);
925  interval.addRange(LR);
926  interval.addKill(LR.valno, end, false);
927  DEBUG(errs() << " +" << LR << '\n');
928}
929
930void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
931                                      MachineBasicBlock::iterator MI,
932                                      unsigned MIIdx,
933                                      MachineOperand& MO,
934                                      unsigned MOIdx) {
935  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
936    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
937                             getOrCreateInterval(MO.getReg()));
938  else if (allocatableRegs_[MO.getReg()]) {
939    MachineInstr *CopyMI = NULL;
940    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
941    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
942        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
943        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
944        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
945      CopyMI = MI;
946    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
947                              getOrCreateInterval(MO.getReg()), CopyMI);
948    // Def of a register also defines its sub-registers.
949    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
950      // If MI also modifies the sub-register explicitly, avoid processing it
951      // more than once. Do not pass in TRI here so it checks for exact match.
952      if (!MI->modifiesRegister(*AS))
953        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
954                                  getOrCreateInterval(*AS), 0);
955  }
956}
957
958void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
959                                         unsigned MIIdx,
960                                         LiveInterval &interval, bool isAlias) {
961  DEBUG({
962      errs() << "\t\tlivein register: ";
963      printRegName(interval.reg);
964    });
965
966  // Look for kills, if it reaches a def before it's killed, then it shouldn't
967  // be considered a livein.
968  MachineBasicBlock::iterator mi = MBB->begin();
969  unsigned baseIndex = MIIdx;
970  unsigned start = baseIndex;
971  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
972         getInstructionFromIndex(baseIndex) == 0)
973    baseIndex += InstrSlots::NUM;
974  unsigned end = baseIndex;
975  bool SeenDefUse = false;
976
977  while (mi != MBB->end()) {
978    if (mi->killsRegister(interval.reg, tri_)) {
979      DEBUG(errs() << " killed");
980      end = getUseIndex(baseIndex) + 1;
981      SeenDefUse = true;
982      break;
983    } else if (mi->modifiesRegister(interval.reg, tri_)) {
984      // Another instruction redefines the register before it is ever read.
985      // Then the register is essentially dead at the instruction that defines
986      // it. Hence its interval is:
987      // [defSlot(def), defSlot(def)+1)
988      DEBUG(errs() << " dead");
989      end = getDefIndex(start) + 1;
990      SeenDefUse = true;
991      break;
992    }
993
994    baseIndex += InstrSlots::NUM;
995    ++mi;
996    if (mi != MBB->end()) {
997      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
998             getInstructionFromIndex(baseIndex) == 0)
999        baseIndex += InstrSlots::NUM;
1000    }
1001  }
1002
1003  // Live-in register might not be used at all.
1004  if (!SeenDefUse) {
1005    if (isAlias) {
1006      DEBUG(errs() << " dead");
1007      end = getDefIndex(MIIdx) + 1;
1008    } else {
1009      DEBUG(errs() << " live through");
1010      end = baseIndex;
1011    }
1012  }
1013
1014  VNInfo *vni =
1015    interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
1016  vni->setIsPHIDef(true);
1017  LiveRange LR(start, end, vni);
1018
1019  interval.addRange(LR);
1020  interval.addKill(LR.valno, end, false);
1021  DEBUG(errs() << " +" << LR << '\n');
1022}
1023
1024/// computeIntervals - computes the live intervals for virtual
1025/// registers. for some ordering of the machine instructions [1,N] a
1026/// live interval is an interval [i, j) where 1 <= i <= j < N for
1027/// which a variable is live
1028void LiveIntervals::computeIntervals() {
1029  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1030               << "********** Function: "
1031               << ((Value*)mf_->getFunction())->getName() << '\n');
1032
1033  SmallVector<unsigned, 8> UndefUses;
1034  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1035       MBBI != E; ++MBBI) {
1036    MachineBasicBlock *MBB = MBBI;
1037    // Track the index of the current machine instr.
1038    unsigned MIIndex = getMBBStartIdx(MBB);
1039    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1040
1041    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1042
1043    // Create intervals for live-ins to this BB first.
1044    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1045           LE = MBB->livein_end(); LI != LE; ++LI) {
1046      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1047      // Multiple live-ins can alias the same register.
1048      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1049        if (!hasInterval(*AS))
1050          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1051                               true);
1052    }
1053
1054    // Skip over empty initial indices.
1055    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1056           getInstructionFromIndex(MIIndex) == 0)
1057      MIIndex += InstrSlots::NUM;
1058
1059    for (; MI != miEnd; ++MI) {
1060      DEBUG(errs() << MIIndex << "\t" << *MI);
1061
1062      // Handle defs.
1063      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1064        MachineOperand &MO = MI->getOperand(i);
1065        if (!MO.isReg() || !MO.getReg())
1066          continue;
1067
1068        // handle register defs - build intervals
1069        if (MO.isDef())
1070          handleRegisterDef(MBB, MI, MIIndex, MO, i);
1071        else if (MO.isUndef())
1072          UndefUses.push_back(MO.getReg());
1073      }
1074
1075      // Skip over the empty slots after each instruction.
1076      unsigned Slots = MI->getDesc().getNumDefs();
1077      if (Slots == 0)
1078        Slots = 1;
1079      MIIndex += InstrSlots::NUM * Slots;
1080
1081      // Skip over empty indices.
1082      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1083             getInstructionFromIndex(MIIndex) == 0)
1084        MIIndex += InstrSlots::NUM;
1085    }
1086  }
1087
1088  // Create empty intervals for registers defined by implicit_def's (except
1089  // for those implicit_def that define values which are liveout of their
1090  // blocks.
1091  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1092    unsigned UndefReg = UndefUses[i];
1093    (void)getOrCreateInterval(UndefReg);
1094  }
1095}
1096
1097bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1098                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1099  std::vector<IdxMBBPair>::const_iterator I =
1100    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1101
1102  bool ResVal = false;
1103  while (I != Idx2MBBMap.end()) {
1104    if (I->first >= End)
1105      break;
1106    MBBs.push_back(I->second);
1107    ResVal = true;
1108    ++I;
1109  }
1110  return ResVal;
1111}
1112
1113bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1114                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1115  std::vector<IdxMBBPair>::const_iterator I =
1116    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1117
1118  bool ResVal = false;
1119  while (I != Idx2MBBMap.end()) {
1120    if (I->first > End)
1121      break;
1122    MachineBasicBlock *MBB = I->second;
1123    if (getMBBEndIdx(MBB) > End)
1124      break;
1125    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1126           SE = MBB->succ_end(); SI != SE; ++SI)
1127      MBBs.push_back(*SI);
1128    ResVal = true;
1129    ++I;
1130  }
1131  return ResVal;
1132}
1133
1134LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1135  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1136  return new LiveInterval(reg, Weight);
1137}
1138
1139/// dupInterval - Duplicate a live interval. The caller is responsible for
1140/// managing the allocated memory.
1141LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1142  LiveInterval *NewLI = createInterval(li->reg);
1143  NewLI->Copy(*li, mri_, getVNInfoAllocator());
1144  return NewLI;
1145}
1146
1147/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1148/// copy field and returns the source register that defines it.
1149unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1150  if (!VNI->getCopy())
1151    return 0;
1152
1153  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1154    // If it's extracting out of a physical register, return the sub-register.
1155    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1156    if (TargetRegisterInfo::isPhysicalRegister(Reg))
1157      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1158    return Reg;
1159  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1160             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1161    return VNI->getCopy()->getOperand(2).getReg();
1162
1163  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1164  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1165    return SrcReg;
1166  llvm_unreachable("Unrecognized copy instruction!");
1167  return 0;
1168}
1169
1170//===----------------------------------------------------------------------===//
1171// Register allocator hooks.
1172//
1173
1174/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1175/// allow one) virtual register operand, then its uses are implicitly using
1176/// the register. Returns the virtual register.
1177unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1178                                            MachineInstr *MI) const {
1179  unsigned RegOp = 0;
1180  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1181    MachineOperand &MO = MI->getOperand(i);
1182    if (!MO.isReg() || !MO.isUse())
1183      continue;
1184    unsigned Reg = MO.getReg();
1185    if (Reg == 0 || Reg == li.reg)
1186      continue;
1187
1188    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1189        !allocatableRegs_[Reg])
1190      continue;
1191    // FIXME: For now, only remat MI with at most one register operand.
1192    assert(!RegOp &&
1193           "Can't rematerialize instruction with multiple register operand!");
1194    RegOp = MO.getReg();
1195#ifndef NDEBUG
1196    break;
1197#endif
1198  }
1199  return RegOp;
1200}
1201
1202/// isValNoAvailableAt - Return true if the val# of the specified interval
1203/// which reaches the given instruction also reaches the specified use index.
1204bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1205                                       unsigned UseIdx) const {
1206  unsigned Index = getInstructionIndex(MI);
1207  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1208  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1209  return UI != li.end() && UI->valno == ValNo;
1210}
1211
1212/// isReMaterializable - Returns true if the definition MI of the specified
1213/// val# of the specified interval is re-materializable.
1214bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1215                                       const VNInfo *ValNo, MachineInstr *MI,
1216                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1217                                       bool &isLoad) {
1218  if (DisableReMat)
1219    return false;
1220
1221  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1222    return true;
1223
1224  int FrameIdx = 0;
1225  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1226      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1227    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1228    // this but remember this is not safe to fold into a two-address
1229    // instruction.
1230    // This is a load from fixed stack slot. It can be rematerialized.
1231    return true;
1232
1233  // If the target-specific rules don't identify an instruction as
1234  // being trivially rematerializable, use some target-independent
1235  // rules.
1236  if (!MI->getDesc().isRematerializable() ||
1237      !tii_->isTriviallyReMaterializable(MI)) {
1238    if (!EnableAggressiveRemat)
1239      return false;
1240
1241    // If the instruction accesses memory but the memoperands have been lost,
1242    // we can't analyze it.
1243    const TargetInstrDesc &TID = MI->getDesc();
1244    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1245      return false;
1246
1247    // Avoid instructions obviously unsafe for remat.
1248    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1249      return false;
1250
1251    // If the instruction accesses memory and the memory could be non-constant,
1252    // assume the instruction is not rematerializable.
1253    for (std::list<MachineMemOperand>::const_iterator
1254           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1255      const MachineMemOperand &MMO = *I;
1256      if (MMO.isVolatile() || MMO.isStore())
1257        return false;
1258      const Value *V = MMO.getValue();
1259      if (!V)
1260        return false;
1261      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1262        if (!PSV->isConstant(mf_->getFrameInfo()))
1263          return false;
1264      } else if (!aa_->pointsToConstantMemory(V))
1265        return false;
1266    }
1267
1268    // If any of the registers accessed are non-constant, conservatively assume
1269    // the instruction is not rematerializable.
1270    unsigned ImpUse = 0;
1271    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1272      const MachineOperand &MO = MI->getOperand(i);
1273      if (MO.isReg()) {
1274        unsigned Reg = MO.getReg();
1275        if (Reg == 0)
1276          continue;
1277        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1278          return false;
1279
1280        // Only allow one def, and that in the first operand.
1281        if (MO.isDef() != (i == 0))
1282          return false;
1283
1284        // Only allow constant-valued registers.
1285        bool IsLiveIn = mri_->isLiveIn(Reg);
1286        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1287                                          E = mri_->def_end();
1288
1289        // For the def, it should be the only def of that register.
1290        if (MO.isDef() && (next(I) != E || IsLiveIn))
1291          return false;
1292
1293        if (MO.isUse()) {
1294          // Only allow one use other register use, as that's all the
1295          // remat mechanisms support currently.
1296          if (Reg != li.reg) {
1297            if (ImpUse == 0)
1298              ImpUse = Reg;
1299            else if (Reg != ImpUse)
1300              return false;
1301          }
1302          // For the use, there should be only one associated def.
1303          if (I != E && (next(I) != E || IsLiveIn))
1304            return false;
1305        }
1306      }
1307    }
1308  }
1309
1310  unsigned ImpUse = getReMatImplicitUse(li, MI);
1311  if (ImpUse) {
1312    const LiveInterval &ImpLi = getInterval(ImpUse);
1313    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1314           re = mri_->use_end(); ri != re; ++ri) {
1315      MachineInstr *UseMI = &*ri;
1316      unsigned UseIdx = getInstructionIndex(UseMI);
1317      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1318        continue;
1319      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1320        return false;
1321    }
1322
1323    // If a register operand of the re-materialized instruction is going to
1324    // be spilled next, then it's not legal to re-materialize this instruction.
1325    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1326      if (ImpUse == SpillIs[i]->reg)
1327        return false;
1328  }
1329  return true;
1330}
1331
1332/// isReMaterializable - Returns true if the definition MI of the specified
1333/// val# of the specified interval is re-materializable.
1334bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1335                                       const VNInfo *ValNo, MachineInstr *MI) {
1336  SmallVector<LiveInterval*, 4> Dummy1;
1337  bool Dummy2;
1338  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1339}
1340
1341/// isReMaterializable - Returns true if every definition of MI of every
1342/// val# of the specified interval is re-materializable.
1343bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1344                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1345                                       bool &isLoad) {
1346  isLoad = false;
1347  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1348       i != e; ++i) {
1349    const VNInfo *VNI = *i;
1350    if (VNI->isUnused())
1351      continue; // Dead val#.
1352    // Is the def for the val# rematerializable?
1353    if (!VNI->isDefAccurate())
1354      return false;
1355    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1356    bool DefIsLoad = false;
1357    if (!ReMatDefMI ||
1358        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1359      return false;
1360    isLoad |= DefIsLoad;
1361  }
1362  return true;
1363}
1364
1365/// FilterFoldedOps - Filter out two-address use operands. Return
1366/// true if it finds any issue with the operands that ought to prevent
1367/// folding.
1368static bool FilterFoldedOps(MachineInstr *MI,
1369                            SmallVector<unsigned, 2> &Ops,
1370                            unsigned &MRInfo,
1371                            SmallVector<unsigned, 2> &FoldOps) {
1372  MRInfo = 0;
1373  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1374    unsigned OpIdx = Ops[i];
1375    MachineOperand &MO = MI->getOperand(OpIdx);
1376    // FIXME: fold subreg use.
1377    if (MO.getSubReg())
1378      return true;
1379    if (MO.isDef())
1380      MRInfo |= (unsigned)VirtRegMap::isMod;
1381    else {
1382      // Filter out two-address use operand(s).
1383      if (MI->isRegTiedToDefOperand(OpIdx)) {
1384        MRInfo = VirtRegMap::isModRef;
1385        continue;
1386      }
1387      MRInfo |= (unsigned)VirtRegMap::isRef;
1388    }
1389    FoldOps.push_back(OpIdx);
1390  }
1391  return false;
1392}
1393
1394
1395/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1396/// slot / to reg or any rematerialized load into ith operand of specified
1397/// MI. If it is successul, MI is updated with the newly created MI and
1398/// returns true.
1399bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1400                                         VirtRegMap &vrm, MachineInstr *DefMI,
1401                                         unsigned InstrIdx,
1402                                         SmallVector<unsigned, 2> &Ops,
1403                                         bool isSS, int Slot, unsigned Reg) {
1404  // If it is an implicit def instruction, just delete it.
1405  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1406    RemoveMachineInstrFromMaps(MI);
1407    vrm.RemoveMachineInstrFromMaps(MI);
1408    MI->eraseFromParent();
1409    ++numFolds;
1410    return true;
1411  }
1412
1413  // Filter the list of operand indexes that are to be folded. Abort if
1414  // any operand will prevent folding.
1415  unsigned MRInfo = 0;
1416  SmallVector<unsigned, 2> FoldOps;
1417  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1418    return false;
1419
1420  // The only time it's safe to fold into a two address instruction is when
1421  // it's folding reload and spill from / into a spill stack slot.
1422  if (DefMI && (MRInfo & VirtRegMap::isMod))
1423    return false;
1424
1425  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1426                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1427  if (fmi) {
1428    // Remember this instruction uses the spill slot.
1429    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1430
1431    // Attempt to fold the memory reference into the instruction. If
1432    // we can do this, we don't need to insert spill code.
1433    MachineBasicBlock &MBB = *MI->getParent();
1434    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1435      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1436    vrm.transferSpillPts(MI, fmi);
1437    vrm.transferRestorePts(MI, fmi);
1438    vrm.transferEmergencySpills(MI, fmi);
1439    mi2iMap_.erase(MI);
1440    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1441    mi2iMap_[fmi] = InstrIdx;
1442    MI = MBB.insert(MBB.erase(MI), fmi);
1443    ++numFolds;
1444    return true;
1445  }
1446  return false;
1447}
1448
1449/// canFoldMemoryOperand - Returns true if the specified load / store
1450/// folding is possible.
1451bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1452                                         SmallVector<unsigned, 2> &Ops,
1453                                         bool ReMat) const {
1454  // Filter the list of operand indexes that are to be folded. Abort if
1455  // any operand will prevent folding.
1456  unsigned MRInfo = 0;
1457  SmallVector<unsigned, 2> FoldOps;
1458  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1459    return false;
1460
1461  // It's only legal to remat for a use, not a def.
1462  if (ReMat && (MRInfo & VirtRegMap::isMod))
1463    return false;
1464
1465  return tii_->canFoldMemoryOperand(MI, FoldOps);
1466}
1467
1468bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1469  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1470  for (LiveInterval::Ranges::const_iterator
1471         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1472    std::vector<IdxMBBPair>::const_iterator II =
1473      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1474    if (II == Idx2MBBMap.end())
1475      continue;
1476    if (I->end > II->first)  // crossing a MBB.
1477      return false;
1478    MBBs.insert(II->second);
1479    if (MBBs.size() > 1)
1480      return false;
1481  }
1482  return true;
1483}
1484
1485/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1486/// interval on to-be re-materialized operands of MI) with new register.
1487void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1488                                       MachineInstr *MI, unsigned NewVReg,
1489                                       VirtRegMap &vrm) {
1490  // There is an implicit use. That means one of the other operand is
1491  // being remat'ed and the remat'ed instruction has li.reg as an
1492  // use operand. Make sure we rewrite that as well.
1493  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1494    MachineOperand &MO = MI->getOperand(i);
1495    if (!MO.isReg())
1496      continue;
1497    unsigned Reg = MO.getReg();
1498    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1499      continue;
1500    if (!vrm.isReMaterialized(Reg))
1501      continue;
1502    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1503    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1504    if (UseMO)
1505      UseMO->setReg(NewVReg);
1506  }
1507}
1508
1509/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1510/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1511bool LiveIntervals::
1512rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1513                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1514                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1515                 unsigned Slot, int LdSlot,
1516                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1517                 VirtRegMap &vrm,
1518                 const TargetRegisterClass* rc,
1519                 SmallVector<int, 4> &ReMatIds,
1520                 const MachineLoopInfo *loopInfo,
1521                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1522                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1523                 std::vector<LiveInterval*> &NewLIs) {
1524  bool CanFold = false;
1525 RestartInstruction:
1526  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1527    MachineOperand& mop = MI->getOperand(i);
1528    if (!mop.isReg())
1529      continue;
1530    unsigned Reg = mop.getReg();
1531    unsigned RegI = Reg;
1532    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1533      continue;
1534    if (Reg != li.reg)
1535      continue;
1536
1537    bool TryFold = !DefIsReMat;
1538    bool FoldSS = true; // Default behavior unless it's a remat.
1539    int FoldSlot = Slot;
1540    if (DefIsReMat) {
1541      // If this is the rematerializable definition MI itself and
1542      // all of its uses are rematerialized, simply delete it.
1543      if (MI == ReMatOrigDefMI && CanDelete) {
1544        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1545                     << MI << '\n');
1546        RemoveMachineInstrFromMaps(MI);
1547        vrm.RemoveMachineInstrFromMaps(MI);
1548        MI->eraseFromParent();
1549        break;
1550      }
1551
1552      // If def for this use can't be rematerialized, then try folding.
1553      // If def is rematerializable and it's a load, also try folding.
1554      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1555      if (isLoad) {
1556        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1557        FoldSS = isLoadSS;
1558        FoldSlot = LdSlot;
1559      }
1560    }
1561
1562    // Scan all of the operands of this instruction rewriting operands
1563    // to use NewVReg instead of li.reg as appropriate.  We do this for
1564    // two reasons:
1565    //
1566    //   1. If the instr reads the same spilled vreg multiple times, we
1567    //      want to reuse the NewVReg.
1568    //   2. If the instr is a two-addr instruction, we are required to
1569    //      keep the src/dst regs pinned.
1570    //
1571    // Keep track of whether we replace a use and/or def so that we can
1572    // create the spill interval with the appropriate range.
1573
1574    HasUse = mop.isUse();
1575    HasDef = mop.isDef();
1576    SmallVector<unsigned, 2> Ops;
1577    Ops.push_back(i);
1578    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1579      const MachineOperand &MOj = MI->getOperand(j);
1580      if (!MOj.isReg())
1581        continue;
1582      unsigned RegJ = MOj.getReg();
1583      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1584        continue;
1585      if (RegJ == RegI) {
1586        Ops.push_back(j);
1587        if (!MOj.isUndef()) {
1588          HasUse |= MOj.isUse();
1589          HasDef |= MOj.isDef();
1590        }
1591      }
1592    }
1593
1594    // Create a new virtual register for the spill interval.
1595    // Create the new register now so we can map the fold instruction
1596    // to the new register so when it is unfolded we get the correct
1597    // answer.
1598    bool CreatedNewVReg = false;
1599    if (NewVReg == 0) {
1600      NewVReg = mri_->createVirtualRegister(rc);
1601      vrm.grow();
1602      CreatedNewVReg = true;
1603    }
1604
1605    if (!TryFold)
1606      CanFold = false;
1607    else {
1608      // Do not fold load / store here if we are splitting. We'll find an
1609      // optimal point to insert a load / store later.
1610      if (!TrySplit) {
1611        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1612                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1613          // Folding the load/store can completely change the instruction in
1614          // unpredictable ways, rescan it from the beginning.
1615
1616          if (FoldSS) {
1617            // We need to give the new vreg the same stack slot as the
1618            // spilled interval.
1619            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1620          }
1621
1622          HasUse = false;
1623          HasDef = false;
1624          CanFold = false;
1625          if (isNotInMIMap(MI))
1626            break;
1627          goto RestartInstruction;
1628        }
1629      } else {
1630        // We'll try to fold it later if it's profitable.
1631        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1632      }
1633    }
1634
1635    mop.setReg(NewVReg);
1636    if (mop.isImplicit())
1637      rewriteImplicitOps(li, MI, NewVReg, vrm);
1638
1639    // Reuse NewVReg for other reads.
1640    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1641      MachineOperand &mopj = MI->getOperand(Ops[j]);
1642      mopj.setReg(NewVReg);
1643      if (mopj.isImplicit())
1644        rewriteImplicitOps(li, MI, NewVReg, vrm);
1645    }
1646
1647    if (CreatedNewVReg) {
1648      if (DefIsReMat) {
1649        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1650        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1651          // Each valnum may have its own remat id.
1652          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1653        } else {
1654          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1655        }
1656        if (!CanDelete || (HasUse && HasDef)) {
1657          // If this is a two-addr instruction then its use operands are
1658          // rematerializable but its def is not. It should be assigned a
1659          // stack slot.
1660          vrm.assignVirt2StackSlot(NewVReg, Slot);
1661        }
1662      } else {
1663        vrm.assignVirt2StackSlot(NewVReg, Slot);
1664      }
1665    } else if (HasUse && HasDef &&
1666               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1667      // If this interval hasn't been assigned a stack slot (because earlier
1668      // def is a deleted remat def), do it now.
1669      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1670      vrm.assignVirt2StackSlot(NewVReg, Slot);
1671    }
1672
1673    // Re-matting an instruction with virtual register use. Add the
1674    // register as an implicit use on the use MI.
1675    if (DefIsReMat && ImpUse)
1676      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1677
1678    // Create a new register interval for this spill / remat.
1679    LiveInterval &nI = getOrCreateInterval(NewVReg);
1680    if (CreatedNewVReg) {
1681      NewLIs.push_back(&nI);
1682      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1683      if (TrySplit)
1684        vrm.setIsSplitFromReg(NewVReg, li.reg);
1685    }
1686
1687    if (HasUse) {
1688      if (CreatedNewVReg) {
1689        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1690                     nI.getNextValue(0, 0, false, VNInfoAllocator));
1691        DEBUG(errs() << " +" << LR);
1692        nI.addRange(LR);
1693      } else {
1694        // Extend the split live interval to this def / use.
1695        unsigned End = getUseIndex(index)+1;
1696        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1697                     nI.getValNumInfo(nI.getNumValNums()-1));
1698        DEBUG(errs() << " +" << LR);
1699        nI.addRange(LR);
1700      }
1701    }
1702    if (HasDef) {
1703      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1704                   nI.getNextValue(0, 0, false, VNInfoAllocator));
1705      DEBUG(errs() << " +" << LR);
1706      nI.addRange(LR);
1707    }
1708
1709    DEBUG({
1710        errs() << "\t\t\t\tAdded new interval: ";
1711        nI.print(errs(), tri_);
1712        errs() << '\n';
1713      });
1714  }
1715  return CanFold;
1716}
1717bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1718                                   const VNInfo *VNI,
1719                                   MachineBasicBlock *MBB, unsigned Idx) const {
1720  unsigned End = getMBBEndIdx(MBB);
1721  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1722    if (VNI->kills[j].isPHIKill)
1723      continue;
1724
1725    unsigned KillIdx = VNI->kills[j].killIdx;
1726    if (KillIdx > Idx && KillIdx < End)
1727      return true;
1728  }
1729  return false;
1730}
1731
1732/// RewriteInfo - Keep track of machine instrs that will be rewritten
1733/// during spilling.
1734namespace {
1735  struct RewriteInfo {
1736    unsigned Index;
1737    MachineInstr *MI;
1738    bool HasUse;
1739    bool HasDef;
1740    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1741      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1742  };
1743
1744  struct RewriteInfoCompare {
1745    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1746      return LHS.Index < RHS.Index;
1747    }
1748  };
1749}
1750
1751void LiveIntervals::
1752rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1753                    LiveInterval::Ranges::const_iterator &I,
1754                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1755                    unsigned Slot, int LdSlot,
1756                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1757                    VirtRegMap &vrm,
1758                    const TargetRegisterClass* rc,
1759                    SmallVector<int, 4> &ReMatIds,
1760                    const MachineLoopInfo *loopInfo,
1761                    BitVector &SpillMBBs,
1762                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1763                    BitVector &RestoreMBBs,
1764                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1765                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1766                    std::vector<LiveInterval*> &NewLIs) {
1767  bool AllCanFold = true;
1768  unsigned NewVReg = 0;
1769  unsigned start = getBaseIndex(I->start);
1770  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1771
1772  // First collect all the def / use in this live range that will be rewritten.
1773  // Make sure they are sorted according to instruction index.
1774  std::vector<RewriteInfo> RewriteMIs;
1775  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1776         re = mri_->reg_end(); ri != re; ) {
1777    MachineInstr *MI = &*ri;
1778    MachineOperand &O = ri.getOperand();
1779    ++ri;
1780    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1781    unsigned index = getInstructionIndex(MI);
1782    if (index < start || index >= end)
1783      continue;
1784
1785    if (O.isUndef())
1786      // Must be defined by an implicit def. It should not be spilled. Note,
1787      // this is for correctness reason. e.g.
1788      // 8   %reg1024<def> = IMPLICIT_DEF
1789      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1790      // The live range [12, 14) are not part of the r1024 live interval since
1791      // it's defined by an implicit def. It will not conflicts with live
1792      // interval of r1025. Now suppose both registers are spilled, you can
1793      // easily see a situation where both registers are reloaded before
1794      // the INSERT_SUBREG and both target registers that would overlap.
1795      continue;
1796    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1797  }
1798  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1799
1800  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1801  // Now rewrite the defs and uses.
1802  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1803    RewriteInfo &rwi = RewriteMIs[i];
1804    ++i;
1805    unsigned index = rwi.Index;
1806    bool MIHasUse = rwi.HasUse;
1807    bool MIHasDef = rwi.HasDef;
1808    MachineInstr *MI = rwi.MI;
1809    // If MI def and/or use the same register multiple times, then there
1810    // are multiple entries.
1811    unsigned NumUses = MIHasUse;
1812    while (i != e && RewriteMIs[i].MI == MI) {
1813      assert(RewriteMIs[i].Index == index);
1814      bool isUse = RewriteMIs[i].HasUse;
1815      if (isUse) ++NumUses;
1816      MIHasUse |= isUse;
1817      MIHasDef |= RewriteMIs[i].HasDef;
1818      ++i;
1819    }
1820    MachineBasicBlock *MBB = MI->getParent();
1821
1822    if (ImpUse && MI != ReMatDefMI) {
1823      // Re-matting an instruction with virtual register use. Update the
1824      // register interval's spill weight to HUGE_VALF to prevent it from
1825      // being spilled.
1826      LiveInterval &ImpLi = getInterval(ImpUse);
1827      ImpLi.weight = HUGE_VALF;
1828    }
1829
1830    unsigned MBBId = MBB->getNumber();
1831    unsigned ThisVReg = 0;
1832    if (TrySplit) {
1833      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1834      if (NVI != MBBVRegsMap.end()) {
1835        ThisVReg = NVI->second;
1836        // One common case:
1837        // x = use
1838        // ...
1839        // ...
1840        // def = ...
1841        //     = use
1842        // It's better to start a new interval to avoid artifically
1843        // extend the new interval.
1844        if (MIHasDef && !MIHasUse) {
1845          MBBVRegsMap.erase(MBB->getNumber());
1846          ThisVReg = 0;
1847        }
1848      }
1849    }
1850
1851    bool IsNew = ThisVReg == 0;
1852    if (IsNew) {
1853      // This ends the previous live interval. If all of its def / use
1854      // can be folded, give it a low spill weight.
1855      if (NewVReg && TrySplit && AllCanFold) {
1856        LiveInterval &nI = getOrCreateInterval(NewVReg);
1857        nI.weight /= 10.0F;
1858      }
1859      AllCanFold = true;
1860    }
1861    NewVReg = ThisVReg;
1862
1863    bool HasDef = false;
1864    bool HasUse = false;
1865    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1866                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1867                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1868                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1869                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1870    if (!HasDef && !HasUse)
1871      continue;
1872
1873    AllCanFold &= CanFold;
1874
1875    // Update weight of spill interval.
1876    LiveInterval &nI = getOrCreateInterval(NewVReg);
1877    if (!TrySplit) {
1878      // The spill weight is now infinity as it cannot be spilled again.
1879      nI.weight = HUGE_VALF;
1880      continue;
1881    }
1882
1883    // Keep track of the last def and first use in each MBB.
1884    if (HasDef) {
1885      if (MI != ReMatOrigDefMI || !CanDelete) {
1886        bool HasKill = false;
1887        if (!HasUse)
1888          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1889        else {
1890          // If this is a two-address code, then this index starts a new VNInfo.
1891          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1892          if (VNI)
1893            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1894        }
1895        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1896          SpillIdxes.find(MBBId);
1897        if (!HasKill) {
1898          if (SII == SpillIdxes.end()) {
1899            std::vector<SRInfo> S;
1900            S.push_back(SRInfo(index, NewVReg, true));
1901            SpillIdxes.insert(std::make_pair(MBBId, S));
1902          } else if (SII->second.back().vreg != NewVReg) {
1903            SII->second.push_back(SRInfo(index, NewVReg, true));
1904          } else if ((int)index > SII->second.back().index) {
1905            // If there is an earlier def and this is a two-address
1906            // instruction, then it's not possible to fold the store (which
1907            // would also fold the load).
1908            SRInfo &Info = SII->second.back();
1909            Info.index = index;
1910            Info.canFold = !HasUse;
1911          }
1912          SpillMBBs.set(MBBId);
1913        } else if (SII != SpillIdxes.end() &&
1914                   SII->second.back().vreg == NewVReg &&
1915                   (int)index > SII->second.back().index) {
1916          // There is an earlier def that's not killed (must be two-address).
1917          // The spill is no longer needed.
1918          SII->second.pop_back();
1919          if (SII->second.empty()) {
1920            SpillIdxes.erase(MBBId);
1921            SpillMBBs.reset(MBBId);
1922          }
1923        }
1924      }
1925    }
1926
1927    if (HasUse) {
1928      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1929        SpillIdxes.find(MBBId);
1930      if (SII != SpillIdxes.end() &&
1931          SII->second.back().vreg == NewVReg &&
1932          (int)index > SII->second.back().index)
1933        // Use(s) following the last def, it's not safe to fold the spill.
1934        SII->second.back().canFold = false;
1935      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1936        RestoreIdxes.find(MBBId);
1937      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1938        // If we are splitting live intervals, only fold if it's the first
1939        // use and there isn't another use later in the MBB.
1940        RII->second.back().canFold = false;
1941      else if (IsNew) {
1942        // Only need a reload if there isn't an earlier def / use.
1943        if (RII == RestoreIdxes.end()) {
1944          std::vector<SRInfo> Infos;
1945          Infos.push_back(SRInfo(index, NewVReg, true));
1946          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1947        } else {
1948          RII->second.push_back(SRInfo(index, NewVReg, true));
1949        }
1950        RestoreMBBs.set(MBBId);
1951      }
1952    }
1953
1954    // Update spill weight.
1955    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1956    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1957  }
1958
1959  if (NewVReg && TrySplit && AllCanFold) {
1960    // If all of its def / use can be folded, give it a low spill weight.
1961    LiveInterval &nI = getOrCreateInterval(NewVReg);
1962    nI.weight /= 10.0F;
1963  }
1964}
1965
1966bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1967                        BitVector &RestoreMBBs,
1968                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1969  if (!RestoreMBBs[Id])
1970    return false;
1971  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1972  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1973    if (Restores[i].index == index &&
1974        Restores[i].vreg == vr &&
1975        Restores[i].canFold)
1976      return true;
1977  return false;
1978}
1979
1980void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1981                        BitVector &RestoreMBBs,
1982                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1983  if (!RestoreMBBs[Id])
1984    return;
1985  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1986  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1987    if (Restores[i].index == index && Restores[i].vreg)
1988      Restores[i].index = -1;
1989}
1990
1991/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1992/// spilled and create empty intervals for their uses.
1993void
1994LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1995                                    const TargetRegisterClass* rc,
1996                                    std::vector<LiveInterval*> &NewLIs) {
1997  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1998         re = mri_->reg_end(); ri != re; ) {
1999    MachineOperand &O = ri.getOperand();
2000    MachineInstr *MI = &*ri;
2001    ++ri;
2002    if (O.isDef()) {
2003      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2004             "Register def was not rewritten?");
2005      RemoveMachineInstrFromMaps(MI);
2006      vrm.RemoveMachineInstrFromMaps(MI);
2007      MI->eraseFromParent();
2008    } else {
2009      // This must be an use of an implicit_def so it's not part of the live
2010      // interval. Create a new empty live interval for it.
2011      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2012      unsigned NewVReg = mri_->createVirtualRegister(rc);
2013      vrm.grow();
2014      vrm.setIsImplicitlyDefined(NewVReg);
2015      NewLIs.push_back(&getOrCreateInterval(NewVReg));
2016      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2017        MachineOperand &MO = MI->getOperand(i);
2018        if (MO.isReg() && MO.getReg() == li.reg) {
2019          MO.setReg(NewVReg);
2020          MO.setIsUndef();
2021        }
2022      }
2023    }
2024  }
2025}
2026
2027std::vector<LiveInterval*> LiveIntervals::
2028addIntervalsForSpillsFast(const LiveInterval &li,
2029                          const MachineLoopInfo *loopInfo,
2030                          VirtRegMap &vrm) {
2031  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2032
2033  std::vector<LiveInterval*> added;
2034
2035  assert(li.weight != HUGE_VALF &&
2036         "attempt to spill already spilled interval!");
2037
2038  DEBUG({
2039      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2040      li.dump();
2041      errs() << '\n';
2042    });
2043
2044  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2045
2046  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2047  while (RI != mri_->reg_end()) {
2048    MachineInstr* MI = &*RI;
2049
2050    SmallVector<unsigned, 2> Indices;
2051    bool HasUse = false;
2052    bool HasDef = false;
2053
2054    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2055      MachineOperand& mop = MI->getOperand(i);
2056      if (!mop.isReg() || mop.getReg() != li.reg) continue;
2057
2058      HasUse |= MI->getOperand(i).isUse();
2059      HasDef |= MI->getOperand(i).isDef();
2060
2061      Indices.push_back(i);
2062    }
2063
2064    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2065                              Indices, true, slot, li.reg)) {
2066      unsigned NewVReg = mri_->createVirtualRegister(rc);
2067      vrm.grow();
2068      vrm.assignVirt2StackSlot(NewVReg, slot);
2069
2070      // create a new register for this spill
2071      LiveInterval &nI = getOrCreateInterval(NewVReg);
2072
2073      // the spill weight is now infinity as it
2074      // cannot be spilled again
2075      nI.weight = HUGE_VALF;
2076
2077      // Rewrite register operands to use the new vreg.
2078      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2079           E = Indices.end(); I != E; ++I) {
2080        MI->getOperand(*I).setReg(NewVReg);
2081
2082        if (MI->getOperand(*I).isUse())
2083          MI->getOperand(*I).setIsKill(true);
2084      }
2085
2086      // Fill in  the new live interval.
2087      unsigned index = getInstructionIndex(MI);
2088      if (HasUse) {
2089        LiveRange LR(getLoadIndex(index), getUseIndex(index),
2090                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2091        DEBUG(errs() << " +" << LR);
2092        nI.addRange(LR);
2093        vrm.addRestorePoint(NewVReg, MI);
2094      }
2095      if (HasDef) {
2096        LiveRange LR(getDefIndex(index), getStoreIndex(index),
2097                     nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2098        DEBUG(errs() << " +" << LR);
2099        nI.addRange(LR);
2100        vrm.addSpillPoint(NewVReg, true, MI);
2101      }
2102
2103      added.push_back(&nI);
2104
2105      DEBUG({
2106          errs() << "\t\t\t\tadded new interval: ";
2107          nI.dump();
2108          errs() << '\n';
2109        });
2110    }
2111
2112
2113    RI = mri_->reg_begin(li.reg);
2114  }
2115
2116  return added;
2117}
2118
2119std::vector<LiveInterval*> LiveIntervals::
2120addIntervalsForSpills(const LiveInterval &li,
2121                      SmallVectorImpl<LiveInterval*> &SpillIs,
2122                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2123
2124  if (EnableFastSpilling)
2125    return addIntervalsForSpillsFast(li, loopInfo, vrm);
2126
2127  assert(li.weight != HUGE_VALF &&
2128         "attempt to spill already spilled interval!");
2129
2130  DEBUG({
2131      errs() << "\t\t\t\tadding intervals for spills for interval: ";
2132      li.print(errs(), tri_);
2133      errs() << '\n';
2134    });
2135
2136  // Each bit specify whether a spill is required in the MBB.
2137  BitVector SpillMBBs(mf_->getNumBlockIDs());
2138  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2139  BitVector RestoreMBBs(mf_->getNumBlockIDs());
2140  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2141  DenseMap<unsigned,unsigned> MBBVRegsMap;
2142  std::vector<LiveInterval*> NewLIs;
2143  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2144
2145  unsigned NumValNums = li.getNumValNums();
2146  SmallVector<MachineInstr*, 4> ReMatDefs;
2147  ReMatDefs.resize(NumValNums, NULL);
2148  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2149  ReMatOrigDefs.resize(NumValNums, NULL);
2150  SmallVector<int, 4> ReMatIds;
2151  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2152  BitVector ReMatDelete(NumValNums);
2153  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2154
2155  // Spilling a split live interval. It cannot be split any further. Also,
2156  // it's also guaranteed to be a single val# / range interval.
2157  if (vrm.getPreSplitReg(li.reg)) {
2158    vrm.setIsSplitFromReg(li.reg, 0);
2159    // Unset the split kill marker on the last use.
2160    unsigned KillIdx = vrm.getKillPoint(li.reg);
2161    if (KillIdx) {
2162      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2163      assert(KillMI && "Last use disappeared?");
2164      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2165      assert(KillOp != -1 && "Last use disappeared?");
2166      KillMI->getOperand(KillOp).setIsKill(false);
2167    }
2168    vrm.removeKillPoint(li.reg);
2169    bool DefIsReMat = vrm.isReMaterialized(li.reg);
2170    Slot = vrm.getStackSlot(li.reg);
2171    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2172    MachineInstr *ReMatDefMI = DefIsReMat ?
2173      vrm.getReMaterializedMI(li.reg) : NULL;
2174    int LdSlot = 0;
2175    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2176    bool isLoad = isLoadSS ||
2177      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2178    bool IsFirstRange = true;
2179    for (LiveInterval::Ranges::const_iterator
2180           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2181      // If this is a split live interval with multiple ranges, it means there
2182      // are two-address instructions that re-defined the value. Only the
2183      // first def can be rematerialized!
2184      if (IsFirstRange) {
2185        // Note ReMatOrigDefMI has already been deleted.
2186        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2187                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2188                             false, vrm, rc, ReMatIds, loopInfo,
2189                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2190                             MBBVRegsMap, NewLIs);
2191      } else {
2192        rewriteInstructionsForSpills(li, false, I, NULL, 0,
2193                             Slot, 0, false, false, false,
2194                             false, vrm, rc, ReMatIds, loopInfo,
2195                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2196                             MBBVRegsMap, NewLIs);
2197      }
2198      IsFirstRange = false;
2199    }
2200
2201    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2202    return NewLIs;
2203  }
2204
2205  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2206  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2207    TrySplit = false;
2208  if (TrySplit)
2209    ++numSplits;
2210  bool NeedStackSlot = false;
2211  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2212       i != e; ++i) {
2213    const VNInfo *VNI = *i;
2214    unsigned VN = VNI->id;
2215    if (VNI->isUnused())
2216      continue; // Dead val#.
2217    // Is the def for the val# rematerializable?
2218    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2219      ? getInstructionFromIndex(VNI->def) : 0;
2220    bool dummy;
2221    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2222      // Remember how to remat the def of this val#.
2223      ReMatOrigDefs[VN] = ReMatDefMI;
2224      // Original def may be modified so we have to make a copy here.
2225      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2226      ClonedMIs.push_back(Clone);
2227      ReMatDefs[VN] = Clone;
2228
2229      bool CanDelete = true;
2230      if (VNI->hasPHIKill()) {
2231        // A kill is a phi node, not all of its uses can be rematerialized.
2232        // It must not be deleted.
2233        CanDelete = false;
2234        // Need a stack slot if there is any live range where uses cannot be
2235        // rematerialized.
2236        NeedStackSlot = true;
2237      }
2238      if (CanDelete)
2239        ReMatDelete.set(VN);
2240    } else {
2241      // Need a stack slot if there is any live range where uses cannot be
2242      // rematerialized.
2243      NeedStackSlot = true;
2244    }
2245  }
2246
2247  // One stack slot per live interval.
2248  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2249    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2250      Slot = vrm.assignVirt2StackSlot(li.reg);
2251
2252    // This case only occurs when the prealloc splitter has already assigned
2253    // a stack slot to this vreg.
2254    else
2255      Slot = vrm.getStackSlot(li.reg);
2256  }
2257
2258  // Create new intervals and rewrite defs and uses.
2259  for (LiveInterval::Ranges::const_iterator
2260         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2261    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2262    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2263    bool DefIsReMat = ReMatDefMI != NULL;
2264    bool CanDelete = ReMatDelete[I->valno->id];
2265    int LdSlot = 0;
2266    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2267    bool isLoad = isLoadSS ||
2268      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2269    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2270                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2271                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2272                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2273                               MBBVRegsMap, NewLIs);
2274  }
2275
2276  // Insert spills / restores if we are splitting.
2277  if (!TrySplit) {
2278    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2279    return NewLIs;
2280  }
2281
2282  SmallPtrSet<LiveInterval*, 4> AddedKill;
2283  SmallVector<unsigned, 2> Ops;
2284  if (NeedStackSlot) {
2285    int Id = SpillMBBs.find_first();
2286    while (Id != -1) {
2287      std::vector<SRInfo> &spills = SpillIdxes[Id];
2288      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2289        int index = spills[i].index;
2290        unsigned VReg = spills[i].vreg;
2291        LiveInterval &nI = getOrCreateInterval(VReg);
2292        bool isReMat = vrm.isReMaterialized(VReg);
2293        MachineInstr *MI = getInstructionFromIndex(index);
2294        bool CanFold = false;
2295        bool FoundUse = false;
2296        Ops.clear();
2297        if (spills[i].canFold) {
2298          CanFold = true;
2299          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2300            MachineOperand &MO = MI->getOperand(j);
2301            if (!MO.isReg() || MO.getReg() != VReg)
2302              continue;
2303
2304            Ops.push_back(j);
2305            if (MO.isDef())
2306              continue;
2307            if (isReMat ||
2308                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2309                                                RestoreMBBs, RestoreIdxes))) {
2310              // MI has two-address uses of the same register. If the use
2311              // isn't the first and only use in the BB, then we can't fold
2312              // it. FIXME: Move this to rewriteInstructionsForSpills.
2313              CanFold = false;
2314              break;
2315            }
2316            FoundUse = true;
2317          }
2318        }
2319        // Fold the store into the def if possible.
2320        bool Folded = false;
2321        if (CanFold && !Ops.empty()) {
2322          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2323            Folded = true;
2324            if (FoundUse) {
2325              // Also folded uses, do not issue a load.
2326              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2327              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2328            }
2329            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2330          }
2331        }
2332
2333        // Otherwise tell the spiller to issue a spill.
2334        if (!Folded) {
2335          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2336          bool isKill = LR->end == getStoreIndex(index);
2337          if (!MI->registerDefIsDead(nI.reg))
2338            // No need to spill a dead def.
2339            vrm.addSpillPoint(VReg, isKill, MI);
2340          if (isKill)
2341            AddedKill.insert(&nI);
2342        }
2343      }
2344      Id = SpillMBBs.find_next(Id);
2345    }
2346  }
2347
2348  int Id = RestoreMBBs.find_first();
2349  while (Id != -1) {
2350    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2351    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2352      int index = restores[i].index;
2353      if (index == -1)
2354        continue;
2355      unsigned VReg = restores[i].vreg;
2356      LiveInterval &nI = getOrCreateInterval(VReg);
2357      bool isReMat = vrm.isReMaterialized(VReg);
2358      MachineInstr *MI = getInstructionFromIndex(index);
2359      bool CanFold = false;
2360      Ops.clear();
2361      if (restores[i].canFold) {
2362        CanFold = true;
2363        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2364          MachineOperand &MO = MI->getOperand(j);
2365          if (!MO.isReg() || MO.getReg() != VReg)
2366            continue;
2367
2368          if (MO.isDef()) {
2369            // If this restore were to be folded, it would have been folded
2370            // already.
2371            CanFold = false;
2372            break;
2373          }
2374          Ops.push_back(j);
2375        }
2376      }
2377
2378      // Fold the load into the use if possible.
2379      bool Folded = false;
2380      if (CanFold && !Ops.empty()) {
2381        if (!isReMat)
2382          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2383        else {
2384          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2385          int LdSlot = 0;
2386          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2387          // If the rematerializable def is a load, also try to fold it.
2388          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2389            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2390                                          Ops, isLoadSS, LdSlot, VReg);
2391          if (!Folded) {
2392            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2393            if (ImpUse) {
2394              // Re-matting an instruction with virtual register use. Add the
2395              // register as an implicit use on the use MI and update the register
2396              // interval's spill weight to HUGE_VALF to prevent it from being
2397              // spilled.
2398              LiveInterval &ImpLi = getInterval(ImpUse);
2399              ImpLi.weight = HUGE_VALF;
2400              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2401            }
2402          }
2403        }
2404      }
2405      // If folding is not possible / failed, then tell the spiller to issue a
2406      // load / rematerialization for us.
2407      if (Folded)
2408        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2409      else
2410        vrm.addRestorePoint(VReg, MI);
2411    }
2412    Id = RestoreMBBs.find_next(Id);
2413  }
2414
2415  // Finalize intervals: add kills, finalize spill weights, and filter out
2416  // dead intervals.
2417  std::vector<LiveInterval*> RetNewLIs;
2418  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2419    LiveInterval *LI = NewLIs[i];
2420    if (!LI->empty()) {
2421      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2422      if (!AddedKill.count(LI)) {
2423        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2424        unsigned LastUseIdx = getBaseIndex(LR->end);
2425        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2426        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2427        assert(UseIdx != -1);
2428        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2429          LastUse->getOperand(UseIdx).setIsKill();
2430          vrm.addKillPoint(LI->reg, LastUseIdx);
2431        }
2432      }
2433      RetNewLIs.push_back(LI);
2434    }
2435  }
2436
2437  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2438  return RetNewLIs;
2439}
2440
2441/// hasAllocatableSuperReg - Return true if the specified physical register has
2442/// any super register that's allocatable.
2443bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2444  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2445    if (allocatableRegs_[*AS] && hasInterval(*AS))
2446      return true;
2447  return false;
2448}
2449
2450/// getRepresentativeReg - Find the largest super register of the specified
2451/// physical register.
2452unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2453  // Find the largest super-register that is allocatable.
2454  unsigned BestReg = Reg;
2455  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2456    unsigned SuperReg = *AS;
2457    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2458      BestReg = SuperReg;
2459      break;
2460    }
2461  }
2462  return BestReg;
2463}
2464
2465/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2466/// specified interval that conflicts with the specified physical register.
2467unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2468                                                   unsigned PhysReg) const {
2469  unsigned NumConflicts = 0;
2470  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2471  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2472         E = mri_->reg_end(); I != E; ++I) {
2473    MachineOperand &O = I.getOperand();
2474    MachineInstr *MI = O.getParent();
2475    unsigned Index = getInstructionIndex(MI);
2476    if (pli.liveAt(Index))
2477      ++NumConflicts;
2478  }
2479  return NumConflicts;
2480}
2481
2482/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2483/// around all defs and uses of the specified interval. Return true if it
2484/// was able to cut its interval.
2485bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2486                                            unsigned PhysReg, VirtRegMap &vrm) {
2487  unsigned SpillReg = getRepresentativeReg(PhysReg);
2488
2489  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2490    // If there are registers which alias PhysReg, but which are not a
2491    // sub-register of the chosen representative super register. Assert
2492    // since we can't handle it yet.
2493    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2494           tri_->isSuperRegister(*AS, SpillReg));
2495
2496  bool Cut = false;
2497  LiveInterval &pli = getInterval(SpillReg);
2498  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2499  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2500         E = mri_->reg_end(); I != E; ++I) {
2501    MachineOperand &O = I.getOperand();
2502    MachineInstr *MI = O.getParent();
2503    if (SeenMIs.count(MI))
2504      continue;
2505    SeenMIs.insert(MI);
2506    unsigned Index = getInstructionIndex(MI);
2507    if (pli.liveAt(Index)) {
2508      vrm.addEmergencySpill(SpillReg, MI);
2509      unsigned StartIdx = getLoadIndex(Index);
2510      unsigned EndIdx = getStoreIndex(Index)+1;
2511      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2512        pli.removeRange(StartIdx, EndIdx);
2513        Cut = true;
2514      } else {
2515        std::string msg;
2516        raw_string_ostream Msg(msg);
2517        Msg << "Ran out of registers during register allocation!";
2518        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2519          Msg << "\nPlease check your inline asm statement for invalid "
2520               << "constraints:\n";
2521          MI->print(Msg, tm_);
2522        }
2523        llvm_report_error(Msg.str());
2524      }
2525      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2526        if (!hasInterval(*AS))
2527          continue;
2528        LiveInterval &spli = getInterval(*AS);
2529        if (spli.liveAt(Index))
2530          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2531      }
2532    }
2533  }
2534  return Cut;
2535}
2536
2537LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2538                                                  MachineInstr* startInst) {
2539  LiveInterval& Interval = getOrCreateInterval(reg);
2540  VNInfo* VN = Interval.getNextValue(
2541            getInstructionIndex(startInst) + InstrSlots::DEF,
2542            startInst, true, getVNInfoAllocator());
2543  VN->setHasPHIKill(true);
2544  VN->kills.push_back(
2545    VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2546  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2547               getMBBEndIdx(startInst->getParent()) + 1, VN);
2548  Interval.addRange(LR);
2549
2550  return LR;
2551}
2552
2553