LiveIntervalAnalysis.cpp revision 51dcfbe96f66305314f5605c189ca7fc9aa1b301
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52static cl::opt<bool> EnableFastSpilling("fast-spill",
53                                        cl::init(false), cl::Hidden);
54
55STATISTIC(numIntervals, "Number of original intervals");
56STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70  AU.addPreservedID(PHIEliminationID);
71  AU.addRequiredID(PHIEliminationID);
72  AU.addRequiredID(TwoAddressInstructionPassID);
73  MachineFunctionPass::getAnalysisUsage(AU);
74}
75
76void LiveIntervals::releaseMemory() {
77  // Free the live intervals themselves.
78  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79       E = r2iMap_.end(); I != E; ++I)
80    delete I->second;
81
82  MBB2IdxMap.clear();
83  Idx2MBBMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86  r2iMap_.clear();
87  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88  VNInfoAllocator.Reset();
89  while (!ClonedMIs.empty()) {
90    MachineInstr *MI = ClonedMIs.back();
91    ClonedMIs.pop_back();
92    mf_->DeleteMachineInstr(MI);
93  }
94}
95
96void LiveIntervals::computeNumbering() {
97  Index2MiMap OldI2MI = i2miMap_;
98  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99
100  Idx2MBBMap.clear();
101  MBB2IdxMap.clear();
102  mi2iMap_.clear();
103  i2miMap_.clear();
104
105  FunctionSize = 0;
106
107  // Number MachineInstrs and MachineBasicBlocks.
108  // Initialize MBB indexes to a sentinal.
109  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
110
111  unsigned MIIndex = 0;
112  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
113       MBB != E; ++MBB) {
114    unsigned StartIdx = MIIndex;
115
116    // Insert an empty slot at the beginning of each block.
117    MIIndex += InstrSlots::NUM;
118    i2miMap_.push_back(0);
119
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ++I) {
122      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123      assert(inserted && "multiple MachineInstr -> index mappings");
124      i2miMap_.push_back(I);
125      MIIndex += InstrSlots::NUM;
126      FunctionSize++;
127
128      // Insert an empty slot after every instruction.
129      MIIndex += InstrSlots::NUM;
130      i2miMap_.push_back(0);
131    }
132
133    // Set the MBB2IdxMap entry for this MBB.
134    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
136  }
137  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
138
139  if (!OldI2MI.empty())
140    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141      for (LiveInterval::iterator LI = OI->second->begin(),
142           LE = OI->second->end(); LI != LE; ++LI) {
143
144        // Remap the start index of the live range to the corresponding new
145        // number, or our best guess at what it _should_ correspond to if the
146        // original instruction has been erased.  This is either the following
147        // instruction or its predecessor.
148        unsigned index = LI->start / InstrSlots::NUM;
149        unsigned offset = LI->start % InstrSlots::NUM;
150        if (offset == InstrSlots::LOAD) {
151          std::vector<IdxMBBPair>::const_iterator I =
152                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153          // Take the pair containing the index
154          std::vector<IdxMBBPair>::const_iterator J =
155                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
156
157          LI->start = getMBBStartIdx(J->second);
158        } else {
159          LI->start = mi2iMap_[OldI2MI[index]] + offset;
160        }
161
162        // Remap the ending index in the same way that we remapped the start,
163        // except for the final step where we always map to the immediately
164        // following instruction.
165        index = (LI->end - 1) / InstrSlots::NUM;
166        offset  = LI->end % InstrSlots::NUM;
167        if (offset == InstrSlots::LOAD) {
168          // VReg dies at end of block.
169          std::vector<IdxMBBPair>::const_iterator I =
170                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
171          --I;
172
173          LI->end = getMBBEndIdx(I->second) + 1;
174        } else {
175          unsigned idx = index;
176          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
177
178          if (index != OldI2MI.size())
179            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
180          else
181            LI->end = InstrSlots::NUM * i2miMap_.size();
182        }
183      }
184
185      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
187        VNInfo* vni = *VNI;
188
189        // Remap the VNInfo def index, which works the same as the
190        // start indices above. VN's with special sentinel defs
191        // don't need to be remapped.
192        if (vni->def != ~0U && vni->def != ~1U) {
193          unsigned index = vni->def / InstrSlots::NUM;
194          unsigned offset = vni->def % InstrSlots::NUM;
195          if (offset == InstrSlots::LOAD) {
196            std::vector<IdxMBBPair>::const_iterator I =
197                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198            // Take the pair containing the index
199            std::vector<IdxMBBPair>::const_iterator J =
200                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
201
202            vni->def = getMBBStartIdx(J->second);
203          } else {
204            vni->def = mi2iMap_[OldI2MI[index]] + offset;
205          }
206        }
207
208        // Remap the VNInfo kill indices, which works the same as
209        // the end indices above.
210        for (size_t i = 0; i < vni->kills.size(); ++i) {
211          // PHI kills don't need to be remapped.
212          if (!vni->kills[i]) continue;
213
214          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215          unsigned offset = vni->kills[i] % InstrSlots::NUM;
216          if (offset == InstrSlots::STORE) {
217            std::vector<IdxMBBPair>::const_iterator I =
218             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
219            --I;
220
221            vni->kills[i] = getMBBEndIdx(I->second);
222          } else {
223            unsigned idx = index;
224            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
225
226            if (index != OldI2MI.size())
227              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228                              (idx == index ? offset : 0);
229            else
230              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
231          }
232        }
233      }
234    }
235}
236
237/// runOnMachineFunction - Register allocate the whole function
238///
239bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
240  mf_ = &fn;
241  mri_ = &mf_->getRegInfo();
242  tm_ = &fn.getTarget();
243  tri_ = tm_->getRegisterInfo();
244  tii_ = tm_->getInstrInfo();
245  aa_ = &getAnalysis<AliasAnalysis>();
246  lv_ = &getAnalysis<LiveVariables>();
247  allocatableRegs_ = tri_->getAllocatableSet(fn);
248
249  computeNumbering();
250  computeIntervals();
251
252  numIntervals += getNumIntervals();
253
254  DOUT << "********** INTERVALS **********\n";
255  for (iterator I = begin(), E = end(); I != E; ++I) {
256    I->second->print(DOUT, tri_);
257    DOUT << "\n";
258  }
259
260  numIntervalsAfter += getNumIntervals();
261  DEBUG(dump());
262  return true;
263}
264
265/// print - Implement the dump method.
266void LiveIntervals::print(std::ostream &O, const Module* ) const {
267  O << "********** INTERVALS **********\n";
268  for (const_iterator I = begin(), E = end(); I != E; ++I) {
269    I->second->print(O, tri_);
270    O << "\n";
271  }
272
273  O << "********** MACHINEINSTRS **********\n";
274  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275       mbbi != mbbe; ++mbbi) {
276    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277    for (MachineBasicBlock::iterator mii = mbbi->begin(),
278           mie = mbbi->end(); mii != mie; ++mii) {
279      O << getInstructionIndex(mii) << '\t' << *mii;
280    }
281  }
282}
283
284/// conflictsWithPhysRegDef - Returns true if the specified register
285/// is defined during the duration of the specified interval.
286bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287                                            VirtRegMap &vrm, unsigned reg) {
288  for (LiveInterval::Ranges::const_iterator
289         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290    for (unsigned index = getBaseIndex(I->start),
291           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292         index += InstrSlots::NUM) {
293      // skip deleted instructions
294      while (index != end && !getInstructionFromIndex(index))
295        index += InstrSlots::NUM;
296      if (index == end) break;
297
298      MachineInstr *MI = getInstructionFromIndex(index);
299      unsigned SrcReg, DstReg;
300      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301        if (SrcReg == li.reg || DstReg == li.reg)
302          continue;
303      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304        MachineOperand& mop = MI->getOperand(i);
305        if (!mop.isRegister())
306          continue;
307        unsigned PhysReg = mop.getReg();
308        if (PhysReg == 0 || PhysReg == li.reg)
309          continue;
310        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311          if (!vrm.hasPhys(PhysReg))
312            continue;
313          PhysReg = vrm.getPhys(PhysReg);
314        }
315        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
316          return true;
317      }
318    }
319  }
320
321  return false;
322}
323
324void LiveIntervals::printRegName(unsigned reg) const {
325  if (TargetRegisterInfo::isPhysicalRegister(reg))
326    cerr << tri_->getName(reg);
327  else
328    cerr << "%reg" << reg;
329}
330
331void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332                                             MachineBasicBlock::iterator mi,
333                                             unsigned MIIdx, MachineOperand& MO,
334                                             unsigned MOIdx,
335                                             LiveInterval &interval) {
336  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
338
339  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340    DOUT << "is a implicit_def\n";
341    return;
342  }
343
344  // Virtual registers may be defined multiple times (due to phi
345  // elimination and 2-addr elimination).  Much of what we do only has to be
346  // done once for the vreg.  We use an empty interval to detect the first
347  // time we see a vreg.
348  if (interval.empty()) {
349    // Get the Idx of the defining instructions.
350    unsigned defIndex = getDefIndex(MIIdx);
351    VNInfo *ValNo;
352    MachineInstr *CopyMI = NULL;
353    unsigned SrcReg, DstReg;
354    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
355        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
356        tii_->isMoveInstr(*mi, SrcReg, DstReg))
357      CopyMI = mi;
358    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
359
360    assert(ValNo->id == 0 && "First value in interval is not 0?");
361
362    // Loop over all of the blocks that the vreg is defined in.  There are
363    // two cases we have to handle here.  The most common case is a vreg
364    // whose lifetime is contained within a basic block.  In this case there
365    // will be a single kill, in MBB, which comes after the definition.
366    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
367      // FIXME: what about dead vars?
368      unsigned killIdx;
369      if (vi.Kills[0] != mi)
370        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
371      else
372        killIdx = defIndex+1;
373
374      // If the kill happens after the definition, we have an intra-block
375      // live range.
376      if (killIdx > defIndex) {
377        assert(vi.AliveBlocks.none() &&
378               "Shouldn't be alive across any blocks!");
379        LiveRange LR(defIndex, killIdx, ValNo);
380        interval.addRange(LR);
381        DOUT << " +" << LR << "\n";
382        interval.addKill(ValNo, killIdx);
383        return;
384      }
385    }
386
387    // The other case we handle is when a virtual register lives to the end
388    // of the defining block, potentially live across some blocks, then is
389    // live into some number of blocks, but gets killed.  Start by adding a
390    // range that goes from this definition to the end of the defining block.
391    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
392    DOUT << " +" << NewLR;
393    interval.addRange(NewLR);
394
395    // Iterate over all of the blocks that the variable is completely
396    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
397    // live interval.
398    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
399      if (vi.AliveBlocks[i]) {
400        LiveRange LR(getMBBStartIdx(i),
401                     getMBBEndIdx(i)+1,  // MBB ends at -1.
402                     ValNo);
403        interval.addRange(LR);
404        DOUT << " +" << LR;
405      }
406    }
407
408    // Finally, this virtual register is live from the start of any killing
409    // block to the 'use' slot of the killing instruction.
410    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
411      MachineInstr *Kill = vi.Kills[i];
412      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
413      LiveRange LR(getMBBStartIdx(Kill->getParent()),
414                   killIdx, ValNo);
415      interval.addRange(LR);
416      interval.addKill(ValNo, killIdx);
417      DOUT << " +" << LR;
418    }
419
420  } else {
421    // If this is the second time we see a virtual register definition, it
422    // must be due to phi elimination or two addr elimination.  If this is
423    // the result of two address elimination, then the vreg is one of the
424    // def-and-use register operand.
425    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
426      // If this is a two-address definition, then we have already processed
427      // the live range.  The only problem is that we didn't realize there
428      // are actually two values in the live interval.  Because of this we
429      // need to take the LiveRegion that defines this register and split it
430      // into two values.
431      assert(interval.containsOneValue());
432      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
433      unsigned RedefIndex = getDefIndex(MIIdx);
434
435      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
436      VNInfo *OldValNo = OldLR->valno;
437
438      // Delete the initial value, which should be short and continuous,
439      // because the 2-addr copy must be in the same MBB as the redef.
440      interval.removeRange(DefIndex, RedefIndex);
441
442      // Two-address vregs should always only be redefined once.  This means
443      // that at this point, there should be exactly one value number in it.
444      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
445
446      // The new value number (#1) is defined by the instruction we claimed
447      // defined value #0.
448      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
449                                            VNInfoAllocator);
450
451      // Value#0 is now defined by the 2-addr instruction.
452      OldValNo->def  = RedefIndex;
453      OldValNo->copy = 0;
454
455      // Add the new live interval which replaces the range for the input copy.
456      LiveRange LR(DefIndex, RedefIndex, ValNo);
457      DOUT << " replace range with " << LR;
458      interval.addRange(LR);
459      interval.addKill(ValNo, RedefIndex);
460
461      // If this redefinition is dead, we need to add a dummy unit live
462      // range covering the def slot.
463      if (MO.isDead())
464        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
465
466      DOUT << " RESULT: ";
467      interval.print(DOUT, tri_);
468
469    } else {
470      // Otherwise, this must be because of phi elimination.  If this is the
471      // first redefinition of the vreg that we have seen, go back and change
472      // the live range in the PHI block to be a different value number.
473      if (interval.containsOneValue()) {
474        assert(vi.Kills.size() == 1 &&
475               "PHI elimination vreg should have one kill, the PHI itself!");
476
477        // Remove the old range that we now know has an incorrect number.
478        VNInfo *VNI = interval.getValNumInfo(0);
479        MachineInstr *Killer = vi.Kills[0];
480        unsigned Start = getMBBStartIdx(Killer->getParent());
481        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
482        DOUT << " Removing [" << Start << "," << End << "] from: ";
483        interval.print(DOUT, tri_); DOUT << "\n";
484        interval.removeRange(Start, End);
485        VNI->hasPHIKill = true;
486        DOUT << " RESULT: "; interval.print(DOUT, tri_);
487
488        // Replace the interval with one of a NEW value number.  Note that this
489        // value number isn't actually defined by an instruction, weird huh? :)
490        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
491        DOUT << " replace range with " << LR;
492        interval.addRange(LR);
493        interval.addKill(LR.valno, End);
494        DOUT << " RESULT: "; interval.print(DOUT, tri_);
495      }
496
497      // In the case of PHI elimination, each variable definition is only
498      // live until the end of the block.  We've already taken care of the
499      // rest of the live range.
500      unsigned defIndex = getDefIndex(MIIdx);
501
502      VNInfo *ValNo;
503      MachineInstr *CopyMI = NULL;
504      unsigned SrcReg, DstReg;
505      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
506          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
507          tii_->isMoveInstr(*mi, SrcReg, DstReg))
508        CopyMI = mi;
509      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
510
511      unsigned killIndex = getMBBEndIdx(mbb) + 1;
512      LiveRange LR(defIndex, killIndex, ValNo);
513      interval.addRange(LR);
514      interval.addKill(ValNo, killIndex);
515      ValNo->hasPHIKill = true;
516      DOUT << " +" << LR;
517    }
518  }
519
520  DOUT << '\n';
521}
522
523void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
524                                              MachineBasicBlock::iterator mi,
525                                              unsigned MIIdx,
526                                              MachineOperand& MO,
527                                              LiveInterval &interval,
528                                              MachineInstr *CopyMI) {
529  // A physical register cannot be live across basic block, so its
530  // lifetime must end somewhere in its defining basic block.
531  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
532
533  unsigned baseIndex = MIIdx;
534  unsigned start = getDefIndex(baseIndex);
535  unsigned end = start;
536
537  // If it is not used after definition, it is considered dead at
538  // the instruction defining it. Hence its interval is:
539  // [defSlot(def), defSlot(def)+1)
540  if (MO.isDead()) {
541    DOUT << " dead";
542    end = getDefIndex(start) + 1;
543    goto exit;
544  }
545
546  // If it is not dead on definition, it must be killed by a
547  // subsequent instruction. Hence its interval is:
548  // [defSlot(def), useSlot(kill)+1)
549  baseIndex += InstrSlots::NUM;
550  while (++mi != MBB->end()) {
551    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
552           getInstructionFromIndex(baseIndex) == 0)
553      baseIndex += InstrSlots::NUM;
554    if (mi->killsRegister(interval.reg, tri_)) {
555      DOUT << " killed";
556      end = getUseIndex(baseIndex) + 1;
557      goto exit;
558    } else if (mi->modifiesRegister(interval.reg, tri_)) {
559      // Another instruction redefines the register before it is ever read.
560      // Then the register is essentially dead at the instruction that defines
561      // it. Hence its interval is:
562      // [defSlot(def), defSlot(def)+1)
563      DOUT << " dead";
564      end = getDefIndex(start) + 1;
565      goto exit;
566    }
567
568    baseIndex += InstrSlots::NUM;
569  }
570
571  // The only case we should have a dead physreg here without a killing or
572  // instruction where we know it's dead is if it is live-in to the function
573  // and never used.
574  assert(!CopyMI && "physreg was not killed in defining block!");
575  end = getDefIndex(start) + 1;  // It's dead.
576
577exit:
578  assert(start < end && "did not find end of interval?");
579
580  // Already exists? Extend old live interval.
581  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
582  VNInfo *ValNo = (OldLR != interval.end())
583    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
584  LiveRange LR(start, end, ValNo);
585  interval.addRange(LR);
586  interval.addKill(LR.valno, end);
587  DOUT << " +" << LR << '\n';
588}
589
590void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
591                                      MachineBasicBlock::iterator MI,
592                                      unsigned MIIdx,
593                                      MachineOperand& MO,
594                                      unsigned MOIdx) {
595  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
596    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
597                             getOrCreateInterval(MO.getReg()));
598  else if (allocatableRegs_[MO.getReg()]) {
599    MachineInstr *CopyMI = NULL;
600    unsigned SrcReg, DstReg;
601    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
602        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
603        tii_->isMoveInstr(*MI, SrcReg, DstReg))
604      CopyMI = MI;
605    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606                              getOrCreateInterval(MO.getReg()), CopyMI);
607    // Def of a register also defines its sub-registers.
608    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609      // If MI also modifies the sub-register explicitly, avoid processing it
610      // more than once. Do not pass in TRI here so it checks for exact match.
611      if (!MI->modifiesRegister(*AS))
612        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613                                  getOrCreateInterval(*AS), 0);
614  }
615}
616
617void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
618                                         unsigned MIIdx,
619                                         LiveInterval &interval, bool isAlias) {
620  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
621
622  // Look for kills, if it reaches a def before it's killed, then it shouldn't
623  // be considered a livein.
624  MachineBasicBlock::iterator mi = MBB->begin();
625  unsigned baseIndex = MIIdx;
626  unsigned start = baseIndex;
627  unsigned end = start;
628  while (mi != MBB->end()) {
629    if (mi->killsRegister(interval.reg, tri_)) {
630      DOUT << " killed";
631      end = getUseIndex(baseIndex) + 1;
632      goto exit;
633    } else if (mi->modifiesRegister(interval.reg, tri_)) {
634      // Another instruction redefines the register before it is ever read.
635      // Then the register is essentially dead at the instruction that defines
636      // it. Hence its interval is:
637      // [defSlot(def), defSlot(def)+1)
638      DOUT << " dead";
639      end = getDefIndex(start) + 1;
640      goto exit;
641    }
642
643    baseIndex += InstrSlots::NUM;
644    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
645           getInstructionFromIndex(baseIndex) == 0)
646      baseIndex += InstrSlots::NUM;
647    ++mi;
648  }
649
650exit:
651  // Live-in register might not be used at all.
652  if (end == MIIdx) {
653    if (isAlias) {
654      DOUT << " dead";
655      end = getDefIndex(MIIdx) + 1;
656    } else {
657      DOUT << " live through";
658      end = baseIndex;
659    }
660  }
661
662  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
663  interval.addRange(LR);
664  interval.addKill(LR.valno, end);
665  DOUT << " +" << LR << '\n';
666}
667
668/// computeIntervals - computes the live intervals for virtual
669/// registers. for some ordering of the machine instructions [1,N] a
670/// live interval is an interval [i, j) where 1 <= i <= j < N for
671/// which a variable is live
672void LiveIntervals::computeIntervals() {
673  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
674       << "********** Function: "
675       << ((Value*)mf_->getFunction())->getName() << '\n';
676  // Track the index of the current machine instr.
677  unsigned MIIndex = 0;
678
679  // Skip over empty initial indices.
680  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
681         getInstructionFromIndex(MIIndex) == 0)
682    MIIndex += InstrSlots::NUM;
683
684  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
685       MBBI != E; ++MBBI) {
686    MachineBasicBlock *MBB = MBBI;
687    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
688
689    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
690
691    // Create intervals for live-ins to this BB first.
692    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
693           LE = MBB->livein_end(); LI != LE; ++LI) {
694      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
695      // Multiple live-ins can alias the same register.
696      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
697        if (!hasInterval(*AS))
698          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
699                               true);
700    }
701
702    for (; MI != miEnd; ++MI) {
703      DOUT << MIIndex << "\t" << *MI;
704
705      // Handle defs.
706      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707        MachineOperand &MO = MI->getOperand(i);
708        // handle register defs - build intervals
709        if (MO.isRegister() && MO.getReg() && MO.isDef())
710          handleRegisterDef(MBB, MI, MIIndex, MO, i);
711      }
712
713      MIIndex += InstrSlots::NUM;
714
715      // Skip over empty indices.
716      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
717             getInstructionFromIndex(MIIndex) == 0)
718        MIIndex += InstrSlots::NUM;
719    }
720  }
721}
722
723bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
724                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
725  std::vector<IdxMBBPair>::const_iterator I =
726    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
727
728  bool ResVal = false;
729  while (I != Idx2MBBMap.end()) {
730    if (LR.end <= I->first)
731      break;
732    MBBs.push_back(I->second);
733    ResVal = true;
734    ++I;
735  }
736  return ResVal;
737}
738
739
740LiveInterval* LiveIntervals::createInterval(unsigned reg) {
741  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
742                       HUGE_VALF : 0.0F;
743  return new LiveInterval(reg, Weight);
744}
745
746/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
747/// copy field and returns the source register that defines it.
748unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
749  if (!VNI->copy)
750    return 0;
751
752  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
753    return VNI->copy->getOperand(1).getReg();
754  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
755    return VNI->copy->getOperand(2).getReg();
756  unsigned SrcReg, DstReg;
757  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
758    return SrcReg;
759  assert(0 && "Unrecognized copy instruction!");
760  return 0;
761}
762
763//===----------------------------------------------------------------------===//
764// Register allocator hooks.
765//
766
767/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768/// allow one) virtual register operand, then its uses are implicitly using
769/// the register. Returns the virtual register.
770unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771                                            MachineInstr *MI) const {
772  unsigned RegOp = 0;
773  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774    MachineOperand &MO = MI->getOperand(i);
775    if (!MO.isRegister() || !MO.isUse())
776      continue;
777    unsigned Reg = MO.getReg();
778    if (Reg == 0 || Reg == li.reg)
779      continue;
780    // FIXME: For now, only remat MI with at most one register operand.
781    assert(!RegOp &&
782           "Can't rematerialize instruction with multiple register operand!");
783    RegOp = MO.getReg();
784#ifndef NDEBUG
785    break;
786#endif
787  }
788  return RegOp;
789}
790
791/// isValNoAvailableAt - Return true if the val# of the specified interval
792/// which reaches the given instruction also reaches the specified use index.
793bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794                                       unsigned UseIdx) const {
795  unsigned Index = getInstructionIndex(MI);
796  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798  return UI != li.end() && UI->valno == ValNo;
799}
800
801/// isReMaterializable - Returns true if the definition MI of the specified
802/// val# of the specified interval is re-materializable.
803bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804                                       const VNInfo *ValNo, MachineInstr *MI,
805                                       bool &isLoad) {
806  if (DisableReMat)
807    return false;
808
809  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
810    return true;
811
812  int FrameIdx = 0;
813  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
814      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
815    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
816    // this but remember this is not safe to fold into a two-address
817    // instruction.
818    // This is a load from fixed stack slot. It can be rematerialized.
819    return true;
820
821  // If the target-specific rules don't identify an instruction as
822  // being trivially rematerializable, use some target-independent
823  // rules.
824  if (!MI->getDesc().isRematerializable() ||
825      !tii_->isTriviallyReMaterializable(MI)) {
826    if (!EnableAggressiveRemat)
827      return false;
828
829    // If the instruction accesses memory but the memoperands have been lost,
830    // we can't analyze it.
831    const TargetInstrDesc &TID = MI->getDesc();
832    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
833      return false;
834
835    // Avoid instructions obviously unsafe for remat.
836    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
837      return false;
838
839    // If the instruction accesses memory and the memory could be non-constant,
840    // assume the instruction is not rematerializable.
841    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
842         E = MI->memoperands_end(); I != E; ++I) {
843      const MachineMemOperand &MMO = *I;
844      if (MMO.isVolatile() || MMO.isStore())
845        return false;
846      const Value *V = MMO.getValue();
847      if (!V)
848        return false;
849      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
850        if (!PSV->isConstant(mf_->getFrameInfo()))
851          return false;
852      } else if (!aa_->pointsToConstantMemory(V))
853        return false;
854    }
855
856    // If any of the registers accessed are non-constant, conservatively assume
857    // the instruction is not rematerializable.
858    unsigned ImpUse = 0;
859    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
860      const MachineOperand &MO = MI->getOperand(i);
861      if (MO.isReg()) {
862        unsigned Reg = MO.getReg();
863        if (Reg == 0)
864          continue;
865        if (TargetRegisterInfo::isPhysicalRegister(Reg))
866          return false;
867
868        // Only allow one def, and that in the first operand.
869        if (MO.isDef() != (i == 0))
870          return false;
871
872        // Only allow constant-valued registers.
873        bool IsLiveIn = mri_->isLiveIn(Reg);
874        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
875                                          E = mri_->def_end();
876
877        // For the def, it should be the only def.
878        if (MO.isDef() && (next(I) != E || IsLiveIn))
879          return false;
880
881        if (MO.isUse()) {
882          // Only allow one use other register use, as that's all the
883          // remat mechanisms support currently.
884          if (Reg != li.reg) {
885            if (ImpUse == 0)
886              ImpUse = Reg;
887            else if (Reg != ImpUse)
888              return false;
889          }
890          // For uses, there should be only one associate def.
891          if (I != E && (next(I) != E || IsLiveIn))
892            return false;
893        }
894      }
895    }
896  }
897
898  unsigned ImpUse = getReMatImplicitUse(li, MI);
899  if (ImpUse) {
900    const LiveInterval &ImpLi = getInterval(ImpUse);
901    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
902           re = mri_->use_end(); ri != re; ++ri) {
903      MachineInstr *UseMI = &*ri;
904      unsigned UseIdx = getInstructionIndex(UseMI);
905      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
906        continue;
907      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
908        return false;
909    }
910  }
911  return true;
912}
913
914/// isReMaterializable - Returns true if every definition of MI of every
915/// val# of the specified interval is re-materializable.
916bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
917  isLoad = false;
918  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
919       i != e; ++i) {
920    const VNInfo *VNI = *i;
921    unsigned DefIdx = VNI->def;
922    if (DefIdx == ~1U)
923      continue; // Dead val#.
924    // Is the def for the val# rematerializable?
925    if (DefIdx == ~0u)
926      return false;
927    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
928    bool DefIsLoad = false;
929    if (!ReMatDefMI ||
930        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
931      return false;
932    isLoad |= DefIsLoad;
933  }
934  return true;
935}
936
937/// FilterFoldedOps - Filter out two-address use operands. Return
938/// true if it finds any issue with the operands that ought to prevent
939/// folding.
940static bool FilterFoldedOps(MachineInstr *MI,
941                            SmallVector<unsigned, 2> &Ops,
942                            unsigned &MRInfo,
943                            SmallVector<unsigned, 2> &FoldOps) {
944  const TargetInstrDesc &TID = MI->getDesc();
945
946  MRInfo = 0;
947  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
948    unsigned OpIdx = Ops[i];
949    MachineOperand &MO = MI->getOperand(OpIdx);
950    // FIXME: fold subreg use.
951    if (MO.getSubReg())
952      return true;
953    if (MO.isDef())
954      MRInfo |= (unsigned)VirtRegMap::isMod;
955    else {
956      // Filter out two-address use operand(s).
957      if (!MO.isImplicit() &&
958          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
959        MRInfo = VirtRegMap::isModRef;
960        continue;
961      }
962      MRInfo |= (unsigned)VirtRegMap::isRef;
963    }
964    FoldOps.push_back(OpIdx);
965  }
966  return false;
967}
968
969
970/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
971/// slot / to reg or any rematerialized load into ith operand of specified
972/// MI. If it is successul, MI is updated with the newly created MI and
973/// returns true.
974bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
975                                         VirtRegMap &vrm, MachineInstr *DefMI,
976                                         unsigned InstrIdx,
977                                         SmallVector<unsigned, 2> &Ops,
978                                         bool isSS, int Slot, unsigned Reg) {
979  // If it is an implicit def instruction, just delete it.
980  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
981    RemoveMachineInstrFromMaps(MI);
982    vrm.RemoveMachineInstrFromMaps(MI);
983    MI->eraseFromParent();
984    ++numFolds;
985    return true;
986  }
987
988  // Filter the list of operand indexes that are to be folded. Abort if
989  // any operand will prevent folding.
990  unsigned MRInfo = 0;
991  SmallVector<unsigned, 2> FoldOps;
992  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
993    return false;
994
995  // The only time it's safe to fold into a two address instruction is when
996  // it's folding reload and spill from / into a spill stack slot.
997  if (DefMI && (MRInfo & VirtRegMap::isMod))
998    return false;
999
1000  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1001                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1002  if (fmi) {
1003    // Remember this instruction uses the spill slot.
1004    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1005
1006    // Attempt to fold the memory reference into the instruction. If
1007    // we can do this, we don't need to insert spill code.
1008    MachineBasicBlock &MBB = *MI->getParent();
1009    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1010      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1011    vrm.transferSpillPts(MI, fmi);
1012    vrm.transferRestorePts(MI, fmi);
1013    vrm.transferEmergencySpills(MI, fmi);
1014    mi2iMap_.erase(MI);
1015    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1016    mi2iMap_[fmi] = InstrIdx;
1017    MI = MBB.insert(MBB.erase(MI), fmi);
1018    ++numFolds;
1019    return true;
1020  }
1021  return false;
1022}
1023
1024/// canFoldMemoryOperand - Returns true if the specified load / store
1025/// folding is possible.
1026bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1027                                         SmallVector<unsigned, 2> &Ops,
1028                                         bool ReMat) const {
1029  // Filter the list of operand indexes that are to be folded. Abort if
1030  // any operand will prevent folding.
1031  unsigned MRInfo = 0;
1032  SmallVector<unsigned, 2> FoldOps;
1033  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1034    return false;
1035
1036  // It's only legal to remat for a use, not a def.
1037  if (ReMat && (MRInfo & VirtRegMap::isMod))
1038    return false;
1039
1040  return tii_->canFoldMemoryOperand(MI, FoldOps);
1041}
1042
1043bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1044  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1045  for (LiveInterval::Ranges::const_iterator
1046         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1047    std::vector<IdxMBBPair>::const_iterator II =
1048      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1049    if (II == Idx2MBBMap.end())
1050      continue;
1051    if (I->end > II->first)  // crossing a MBB.
1052      return false;
1053    MBBs.insert(II->second);
1054    if (MBBs.size() > 1)
1055      return false;
1056  }
1057  return true;
1058}
1059
1060/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1061/// interval on to-be re-materialized operands of MI) with new register.
1062void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1063                                       MachineInstr *MI, unsigned NewVReg,
1064                                       VirtRegMap &vrm) {
1065  // There is an implicit use. That means one of the other operand is
1066  // being remat'ed and the remat'ed instruction has li.reg as an
1067  // use operand. Make sure we rewrite that as well.
1068  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1069    MachineOperand &MO = MI->getOperand(i);
1070    if (!MO.isRegister())
1071      continue;
1072    unsigned Reg = MO.getReg();
1073    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1074      continue;
1075    if (!vrm.isReMaterialized(Reg))
1076      continue;
1077    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1078    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1079    if (UseMO)
1080      UseMO->setReg(NewVReg);
1081  }
1082}
1083
1084/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1085/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1086bool LiveIntervals::
1087rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1088                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1089                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1090                 unsigned Slot, int LdSlot,
1091                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1092                 VirtRegMap &vrm,
1093                 const TargetRegisterClass* rc,
1094                 SmallVector<int, 4> &ReMatIds,
1095                 const MachineLoopInfo *loopInfo,
1096                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1097                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1098                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1099  MachineBasicBlock *MBB = MI->getParent();
1100  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1101  bool CanFold = false;
1102 RestartInstruction:
1103  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1104    MachineOperand& mop = MI->getOperand(i);
1105    if (!mop.isRegister())
1106      continue;
1107    unsigned Reg = mop.getReg();
1108    unsigned RegI = Reg;
1109    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1110      continue;
1111    if (Reg != li.reg)
1112      continue;
1113
1114    bool TryFold = !DefIsReMat;
1115    bool FoldSS = true; // Default behavior unless it's a remat.
1116    int FoldSlot = Slot;
1117    if (DefIsReMat) {
1118      // If this is the rematerializable definition MI itself and
1119      // all of its uses are rematerialized, simply delete it.
1120      if (MI == ReMatOrigDefMI && CanDelete) {
1121        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1122        DOUT << MI << '\n';
1123        RemoveMachineInstrFromMaps(MI);
1124        vrm.RemoveMachineInstrFromMaps(MI);
1125        MI->eraseFromParent();
1126        break;
1127      }
1128
1129      // If def for this use can't be rematerialized, then try folding.
1130      // If def is rematerializable and it's a load, also try folding.
1131      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1132      if (isLoad) {
1133        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1134        FoldSS = isLoadSS;
1135        FoldSlot = LdSlot;
1136      }
1137    }
1138
1139    // Scan all of the operands of this instruction rewriting operands
1140    // to use NewVReg instead of li.reg as appropriate.  We do this for
1141    // two reasons:
1142    //
1143    //   1. If the instr reads the same spilled vreg multiple times, we
1144    //      want to reuse the NewVReg.
1145    //   2. If the instr is a two-addr instruction, we are required to
1146    //      keep the src/dst regs pinned.
1147    //
1148    // Keep track of whether we replace a use and/or def so that we can
1149    // create the spill interval with the appropriate range.
1150
1151    HasUse = mop.isUse();
1152    HasDef = mop.isDef();
1153    SmallVector<unsigned, 2> Ops;
1154    Ops.push_back(i);
1155    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1156      const MachineOperand &MOj = MI->getOperand(j);
1157      if (!MOj.isRegister())
1158        continue;
1159      unsigned RegJ = MOj.getReg();
1160      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1161        continue;
1162      if (RegJ == RegI) {
1163        Ops.push_back(j);
1164        HasUse |= MOj.isUse();
1165        HasDef |= MOj.isDef();
1166      }
1167    }
1168
1169    if (HasUse && !li.liveAt(getUseIndex(index)))
1170      // Must be defined by an implicit def. It should not be spilled. Note,
1171      // this is for correctness reason. e.g.
1172      // 8   %reg1024<def> = IMPLICIT_DEF
1173      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1174      // The live range [12, 14) are not part of the r1024 live interval since
1175      // it's defined by an implicit def. It will not conflicts with live
1176      // interval of r1025. Now suppose both registers are spilled, you can
1177      // easily see a situation where both registers are reloaded before
1178      // the INSERT_SUBREG and both target registers that would overlap.
1179      HasUse = false;
1180
1181    // Update stack slot spill weight if we are splitting.
1182    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1183      if (!TrySplit)
1184      SSWeight += Weight;
1185
1186    if (!TryFold)
1187      CanFold = false;
1188    else {
1189      // Do not fold load / store here if we are splitting. We'll find an
1190      // optimal point to insert a load / store later.
1191      if (!TrySplit) {
1192        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1193                                 Ops, FoldSS, FoldSlot, Reg)) {
1194          // Folding the load/store can completely change the instruction in
1195          // unpredictable ways, rescan it from the beginning.
1196          HasUse = false;
1197          HasDef = false;
1198          CanFold = false;
1199          if (isRemoved(MI)) {
1200            SSWeight -= Weight;
1201            break;
1202          }
1203          goto RestartInstruction;
1204        }
1205      } else {
1206        // We'll try to fold it later if it's profitable.
1207        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1208      }
1209    }
1210
1211    // Create a new virtual register for the spill interval.
1212    bool CreatedNewVReg = false;
1213    if (NewVReg == 0) {
1214      NewVReg = mri_->createVirtualRegister(rc);
1215      vrm.grow();
1216      CreatedNewVReg = true;
1217    }
1218    mop.setReg(NewVReg);
1219    if (mop.isImplicit())
1220      rewriteImplicitOps(li, MI, NewVReg, vrm);
1221
1222    // Reuse NewVReg for other reads.
1223    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1224      MachineOperand &mopj = MI->getOperand(Ops[j]);
1225      mopj.setReg(NewVReg);
1226      if (mopj.isImplicit())
1227        rewriteImplicitOps(li, MI, NewVReg, vrm);
1228    }
1229
1230    if (CreatedNewVReg) {
1231      if (DefIsReMat) {
1232        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1233        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1234          // Each valnum may have its own remat id.
1235          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1236        } else {
1237          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1238        }
1239        if (!CanDelete || (HasUse && HasDef)) {
1240          // If this is a two-addr instruction then its use operands are
1241          // rematerializable but its def is not. It should be assigned a
1242          // stack slot.
1243          vrm.assignVirt2StackSlot(NewVReg, Slot);
1244        }
1245      } else {
1246        vrm.assignVirt2StackSlot(NewVReg, Slot);
1247      }
1248    } else if (HasUse && HasDef &&
1249               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1250      // If this interval hasn't been assigned a stack slot (because earlier
1251      // def is a deleted remat def), do it now.
1252      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1253      vrm.assignVirt2StackSlot(NewVReg, Slot);
1254    }
1255
1256    // Re-matting an instruction with virtual register use. Add the
1257    // register as an implicit use on the use MI.
1258    if (DefIsReMat && ImpUse)
1259      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1260
1261    // create a new register interval for this spill / remat.
1262    LiveInterval &nI = getOrCreateInterval(NewVReg);
1263    if (CreatedNewVReg) {
1264      NewLIs.push_back(&nI);
1265      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1266      if (TrySplit)
1267        vrm.setIsSplitFromReg(NewVReg, li.reg);
1268    }
1269
1270    if (HasUse) {
1271      if (CreatedNewVReg) {
1272        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1273                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1274        DOUT << " +" << LR;
1275        nI.addRange(LR);
1276      } else {
1277        // Extend the split live interval to this def / use.
1278        unsigned End = getUseIndex(index)+1;
1279        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1280                     nI.getValNumInfo(nI.getNumValNums()-1));
1281        DOUT << " +" << LR;
1282        nI.addRange(LR);
1283      }
1284    }
1285    if (HasDef) {
1286      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1287                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1288      DOUT << " +" << LR;
1289      nI.addRange(LR);
1290    }
1291
1292    DOUT << "\t\t\t\tAdded new interval: ";
1293    nI.print(DOUT, tri_);
1294    DOUT << '\n';
1295  }
1296  return CanFold;
1297}
1298bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1299                                   const VNInfo *VNI,
1300                                   MachineBasicBlock *MBB, unsigned Idx) const {
1301  unsigned End = getMBBEndIdx(MBB);
1302  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1303    unsigned KillIdx = VNI->kills[j];
1304    if (KillIdx > Idx && KillIdx < End)
1305      return true;
1306  }
1307  return false;
1308}
1309
1310/// RewriteInfo - Keep track of machine instrs that will be rewritten
1311/// during spilling.
1312namespace {
1313  struct RewriteInfo {
1314    unsigned Index;
1315    MachineInstr *MI;
1316    bool HasUse;
1317    bool HasDef;
1318    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1319      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1320  };
1321
1322  struct RewriteInfoCompare {
1323    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1324      return LHS.Index < RHS.Index;
1325    }
1326  };
1327}
1328
1329void LiveIntervals::
1330rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1331                    LiveInterval::Ranges::const_iterator &I,
1332                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1333                    unsigned Slot, int LdSlot,
1334                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1335                    VirtRegMap &vrm,
1336                    const TargetRegisterClass* rc,
1337                    SmallVector<int, 4> &ReMatIds,
1338                    const MachineLoopInfo *loopInfo,
1339                    BitVector &SpillMBBs,
1340                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1341                    BitVector &RestoreMBBs,
1342                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1343                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1344                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1345  bool AllCanFold = true;
1346  unsigned NewVReg = 0;
1347  unsigned start = getBaseIndex(I->start);
1348  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1349
1350  // First collect all the def / use in this live range that will be rewritten.
1351  // Make sure they are sorted according to instruction index.
1352  std::vector<RewriteInfo> RewriteMIs;
1353  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1354         re = mri_->reg_end(); ri != re; ) {
1355    MachineInstr *MI = &*ri;
1356    MachineOperand &O = ri.getOperand();
1357    ++ri;
1358    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1359    unsigned index = getInstructionIndex(MI);
1360    if (index < start || index >= end)
1361      continue;
1362    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1363      // Must be defined by an implicit def. It should not be spilled. Note,
1364      // this is for correctness reason. e.g.
1365      // 8   %reg1024<def> = IMPLICIT_DEF
1366      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1367      // The live range [12, 14) are not part of the r1024 live interval since
1368      // it's defined by an implicit def. It will not conflicts with live
1369      // interval of r1025. Now suppose both registers are spilled, you can
1370      // easily see a situation where both registers are reloaded before
1371      // the INSERT_SUBREG and both target registers that would overlap.
1372      continue;
1373    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1374  }
1375  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1376
1377  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1378  // Now rewrite the defs and uses.
1379  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1380    RewriteInfo &rwi = RewriteMIs[i];
1381    ++i;
1382    unsigned index = rwi.Index;
1383    bool MIHasUse = rwi.HasUse;
1384    bool MIHasDef = rwi.HasDef;
1385    MachineInstr *MI = rwi.MI;
1386    // If MI def and/or use the same register multiple times, then there
1387    // are multiple entries.
1388    unsigned NumUses = MIHasUse;
1389    while (i != e && RewriteMIs[i].MI == MI) {
1390      assert(RewriteMIs[i].Index == index);
1391      bool isUse = RewriteMIs[i].HasUse;
1392      if (isUse) ++NumUses;
1393      MIHasUse |= isUse;
1394      MIHasDef |= RewriteMIs[i].HasDef;
1395      ++i;
1396    }
1397    MachineBasicBlock *MBB = MI->getParent();
1398
1399    if (ImpUse && MI != ReMatDefMI) {
1400      // Re-matting an instruction with virtual register use. Update the
1401      // register interval's spill weight to HUGE_VALF to prevent it from
1402      // being spilled.
1403      LiveInterval &ImpLi = getInterval(ImpUse);
1404      ImpLi.weight = HUGE_VALF;
1405    }
1406
1407    unsigned MBBId = MBB->getNumber();
1408    unsigned ThisVReg = 0;
1409    if (TrySplit) {
1410      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1411      if (NVI != MBBVRegsMap.end()) {
1412        ThisVReg = NVI->second;
1413        // One common case:
1414        // x = use
1415        // ...
1416        // ...
1417        // def = ...
1418        //     = use
1419        // It's better to start a new interval to avoid artifically
1420        // extend the new interval.
1421        if (MIHasDef && !MIHasUse) {
1422          MBBVRegsMap.erase(MBB->getNumber());
1423          ThisVReg = 0;
1424        }
1425      }
1426    }
1427
1428    bool IsNew = ThisVReg == 0;
1429    if (IsNew) {
1430      // This ends the previous live interval. If all of its def / use
1431      // can be folded, give it a low spill weight.
1432      if (NewVReg && TrySplit && AllCanFold) {
1433        LiveInterval &nI = getOrCreateInterval(NewVReg);
1434        nI.weight /= 10.0F;
1435      }
1436      AllCanFold = true;
1437    }
1438    NewVReg = ThisVReg;
1439
1440    bool HasDef = false;
1441    bool HasUse = false;
1442    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1443                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1444                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1445                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1446                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1447    if (!HasDef && !HasUse)
1448      continue;
1449
1450    AllCanFold &= CanFold;
1451
1452    // Update weight of spill interval.
1453    LiveInterval &nI = getOrCreateInterval(NewVReg);
1454    if (!TrySplit) {
1455      // The spill weight is now infinity as it cannot be spilled again.
1456      nI.weight = HUGE_VALF;
1457      continue;
1458    }
1459
1460    // Keep track of the last def and first use in each MBB.
1461    if (HasDef) {
1462      if (MI != ReMatOrigDefMI || !CanDelete) {
1463        bool HasKill = false;
1464        if (!HasUse)
1465          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1466        else {
1467          // If this is a two-address code, then this index starts a new VNInfo.
1468          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1469          if (VNI)
1470            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1471        }
1472        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1473          SpillIdxes.find(MBBId);
1474        if (!HasKill) {
1475          if (SII == SpillIdxes.end()) {
1476            std::vector<SRInfo> S;
1477            S.push_back(SRInfo(index, NewVReg, true));
1478            SpillIdxes.insert(std::make_pair(MBBId, S));
1479          } else if (SII->second.back().vreg != NewVReg) {
1480            SII->second.push_back(SRInfo(index, NewVReg, true));
1481          } else if ((int)index > SII->second.back().index) {
1482            // If there is an earlier def and this is a two-address
1483            // instruction, then it's not possible to fold the store (which
1484            // would also fold the load).
1485            SRInfo &Info = SII->second.back();
1486            Info.index = index;
1487            Info.canFold = !HasUse;
1488          }
1489          SpillMBBs.set(MBBId);
1490        } else if (SII != SpillIdxes.end() &&
1491                   SII->second.back().vreg == NewVReg &&
1492                   (int)index > SII->second.back().index) {
1493          // There is an earlier def that's not killed (must be two-address).
1494          // The spill is no longer needed.
1495          SII->second.pop_back();
1496          if (SII->second.empty()) {
1497            SpillIdxes.erase(MBBId);
1498            SpillMBBs.reset(MBBId);
1499          }
1500        }
1501      }
1502    }
1503
1504    if (HasUse) {
1505      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1506        SpillIdxes.find(MBBId);
1507      if (SII != SpillIdxes.end() &&
1508          SII->second.back().vreg == NewVReg &&
1509          (int)index > SII->second.back().index)
1510        // Use(s) following the last def, it's not safe to fold the spill.
1511        SII->second.back().canFold = false;
1512      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1513        RestoreIdxes.find(MBBId);
1514      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1515        // If we are splitting live intervals, only fold if it's the first
1516        // use and there isn't another use later in the MBB.
1517        RII->second.back().canFold = false;
1518      else if (IsNew) {
1519        // Only need a reload if there isn't an earlier def / use.
1520        if (RII == RestoreIdxes.end()) {
1521          std::vector<SRInfo> Infos;
1522          Infos.push_back(SRInfo(index, NewVReg, true));
1523          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1524        } else {
1525          RII->second.push_back(SRInfo(index, NewVReg, true));
1526        }
1527        RestoreMBBs.set(MBBId);
1528      }
1529    }
1530
1531    // Update spill weight.
1532    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1533    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1534  }
1535
1536  if (NewVReg && TrySplit && AllCanFold) {
1537    // If all of its def / use can be folded, give it a low spill weight.
1538    LiveInterval &nI = getOrCreateInterval(NewVReg);
1539    nI.weight /= 10.0F;
1540  }
1541}
1542
1543bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1544                        BitVector &RestoreMBBs,
1545                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1546  if (!RestoreMBBs[Id])
1547    return false;
1548  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1549  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1550    if (Restores[i].index == index &&
1551        Restores[i].vreg == vr &&
1552        Restores[i].canFold)
1553      return true;
1554  return false;
1555}
1556
1557void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1558                        BitVector &RestoreMBBs,
1559                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1560  if (!RestoreMBBs[Id])
1561    return;
1562  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1563  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1564    if (Restores[i].index == index && Restores[i].vreg)
1565      Restores[i].index = -1;
1566}
1567
1568/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1569/// spilled and create empty intervals for their uses.
1570void
1571LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1572                                    const TargetRegisterClass* rc,
1573                                    std::vector<LiveInterval*> &NewLIs) {
1574  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1575         re = mri_->reg_end(); ri != re; ) {
1576    MachineOperand &O = ri.getOperand();
1577    MachineInstr *MI = &*ri;
1578    ++ri;
1579    if (O.isDef()) {
1580      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1581             "Register def was not rewritten?");
1582      RemoveMachineInstrFromMaps(MI);
1583      vrm.RemoveMachineInstrFromMaps(MI);
1584      MI->eraseFromParent();
1585    } else {
1586      // This must be an use of an implicit_def so it's not part of the live
1587      // interval. Create a new empty live interval for it.
1588      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1589      unsigned NewVReg = mri_->createVirtualRegister(rc);
1590      vrm.grow();
1591      vrm.setIsImplicitlyDefined(NewVReg);
1592      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1593      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1594        MachineOperand &MO = MI->getOperand(i);
1595        if (MO.isReg() && MO.getReg() == li.reg)
1596          MO.setReg(NewVReg);
1597      }
1598    }
1599  }
1600}
1601
1602namespace {
1603  struct LISorter {
1604    bool operator()(LiveInterval* A, LiveInterval* B) {
1605      return A->beginNumber() < B->beginNumber();
1606    }
1607  };
1608}
1609
1610std::vector<LiveInterval*> LiveIntervals::
1611addIntervalsForSpillsFast(const LiveInterval &li,
1612                          const MachineLoopInfo *loopInfo,
1613                          VirtRegMap &vrm, float& SSWeight) {
1614  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1615
1616  std::vector<LiveInterval*> added;
1617
1618  assert(li.weight != HUGE_VALF &&
1619         "attempt to spill already spilled interval!");
1620
1621  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1622  DEBUG(li.dump());
1623  DOUT << '\n';
1624
1625  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1626
1627  SSWeight = 0.0f;
1628
1629  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1630  while (RI != mri_->reg_end()) {
1631    MachineInstr* MI = &*RI;
1632
1633    SmallVector<unsigned, 2> Indices;
1634    bool HasUse = false;
1635    bool HasDef = false;
1636
1637    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1638      MachineOperand& mop = MI->getOperand(i);
1639      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1640
1641      HasUse |= MI->getOperand(i).isUse();
1642      HasDef |= MI->getOperand(i).isDef();
1643
1644      Indices.push_back(i);
1645    }
1646
1647    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1648                              Indices, true, slot, li.reg)) {
1649      unsigned NewVReg = mri_->createVirtualRegister(rc);
1650      vrm.grow();
1651      vrm.assignVirt2StackSlot(NewVReg, slot);
1652
1653      // create a new register for this spill
1654      LiveInterval &nI = getOrCreateInterval(NewVReg);
1655
1656      // the spill weight is now infinity as it
1657      // cannot be spilled again
1658      nI.weight = HUGE_VALF;
1659
1660      // Rewrite register operands to use the new vreg.
1661      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1662           E = Indices.end(); I != E; ++I) {
1663        MI->getOperand(*I).setReg(NewVReg);
1664
1665        if (MI->getOperand(*I).isUse())
1666          MI->getOperand(*I).setIsKill(true);
1667      }
1668
1669      // Fill in  the new live interval.
1670      unsigned index = getInstructionIndex(MI);
1671      if (HasUse) {
1672        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1673                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1674        DOUT << " +" << LR;
1675        nI.addRange(LR);
1676        vrm.addRestorePoint(NewVReg, MI);
1677      }
1678      if (HasDef) {
1679        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1680                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1681        DOUT << " +" << LR;
1682        nI.addRange(LR);
1683        vrm.addSpillPoint(NewVReg, true, MI);
1684      }
1685
1686      added.push_back(&nI);
1687
1688      DOUT << "\t\t\t\tadded new interval: ";
1689      DEBUG(nI.dump());
1690      DOUT << '\n';
1691
1692      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1693      if (HasUse) {
1694        if (HasDef)
1695          SSWeight += getSpillWeight(true, true, loopDepth);
1696        else
1697          SSWeight += getSpillWeight(false, true, loopDepth);
1698      } else
1699        SSWeight += getSpillWeight(true, false, loopDepth);
1700    }
1701
1702
1703    RI = mri_->reg_begin(li.reg);
1704  }
1705
1706  // Clients expect the new intervals to be returned in sorted order.
1707  std::sort(added.begin(), added.end(), LISorter());
1708
1709  return added;
1710}
1711
1712std::vector<LiveInterval*> LiveIntervals::
1713addIntervalsForSpills(const LiveInterval &li,
1714                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1715                      float &SSWeight) {
1716
1717  if (EnableFastSpilling)
1718    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1719
1720  assert(li.weight != HUGE_VALF &&
1721         "attempt to spill already spilled interval!");
1722
1723  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1724  li.print(DOUT, tri_);
1725  DOUT << '\n';
1726
1727  // Spill slot weight.
1728  SSWeight = 0.0f;
1729
1730  // Each bit specify whether it a spill is required in the MBB.
1731  BitVector SpillMBBs(mf_->getNumBlockIDs());
1732  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1733  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1734  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1735  DenseMap<unsigned,unsigned> MBBVRegsMap;
1736  std::vector<LiveInterval*> NewLIs;
1737  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1738
1739  unsigned NumValNums = li.getNumValNums();
1740  SmallVector<MachineInstr*, 4> ReMatDefs;
1741  ReMatDefs.resize(NumValNums, NULL);
1742  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1743  ReMatOrigDefs.resize(NumValNums, NULL);
1744  SmallVector<int, 4> ReMatIds;
1745  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1746  BitVector ReMatDelete(NumValNums);
1747  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1748
1749  // Spilling a split live interval. It cannot be split any further. Also,
1750  // it's also guaranteed to be a single val# / range interval.
1751  if (vrm.getPreSplitReg(li.reg)) {
1752    vrm.setIsSplitFromReg(li.reg, 0);
1753    // Unset the split kill marker on the last use.
1754    unsigned KillIdx = vrm.getKillPoint(li.reg);
1755    if (KillIdx) {
1756      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1757      assert(KillMI && "Last use disappeared?");
1758      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1759      assert(KillOp != -1 && "Last use disappeared?");
1760      KillMI->getOperand(KillOp).setIsKill(false);
1761    }
1762    vrm.removeKillPoint(li.reg);
1763    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1764    Slot = vrm.getStackSlot(li.reg);
1765    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1766    MachineInstr *ReMatDefMI = DefIsReMat ?
1767      vrm.getReMaterializedMI(li.reg) : NULL;
1768    int LdSlot = 0;
1769    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1770    bool isLoad = isLoadSS ||
1771      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1772    bool IsFirstRange = true;
1773    for (LiveInterval::Ranges::const_iterator
1774           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1775      // If this is a split live interval with multiple ranges, it means there
1776      // are two-address instructions that re-defined the value. Only the
1777      // first def can be rematerialized!
1778      if (IsFirstRange) {
1779        // Note ReMatOrigDefMI has already been deleted.
1780        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1781                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1782                             false, vrm, rc, ReMatIds, loopInfo,
1783                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1784                             MBBVRegsMap, NewLIs, SSWeight);
1785      } else {
1786        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1787                             Slot, 0, false, false, false,
1788                             false, vrm, rc, ReMatIds, loopInfo,
1789                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1790                             MBBVRegsMap, NewLIs, SSWeight);
1791      }
1792      IsFirstRange = false;
1793    }
1794
1795    SSWeight = 0.0f;  // Already accounted for when split.
1796    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1797    return NewLIs;
1798  }
1799
1800  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1801  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1802    TrySplit = false;
1803  if (TrySplit)
1804    ++numSplits;
1805  bool NeedStackSlot = false;
1806  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1807       i != e; ++i) {
1808    const VNInfo *VNI = *i;
1809    unsigned VN = VNI->id;
1810    unsigned DefIdx = VNI->def;
1811    if (DefIdx == ~1U)
1812      continue; // Dead val#.
1813    // Is the def for the val# rematerializable?
1814    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1815      ? 0 : getInstructionFromIndex(DefIdx);
1816    bool dummy;
1817    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1818      // Remember how to remat the def of this val#.
1819      ReMatOrigDefs[VN] = ReMatDefMI;
1820      // Original def may be modified so we have to make a copy here.
1821      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1822      ClonedMIs.push_back(Clone);
1823      ReMatDefs[VN] = Clone;
1824
1825      bool CanDelete = true;
1826      if (VNI->hasPHIKill) {
1827        // A kill is a phi node, not all of its uses can be rematerialized.
1828        // It must not be deleted.
1829        CanDelete = false;
1830        // Need a stack slot if there is any live range where uses cannot be
1831        // rematerialized.
1832        NeedStackSlot = true;
1833      }
1834      if (CanDelete)
1835        ReMatDelete.set(VN);
1836    } else {
1837      // Need a stack slot if there is any live range where uses cannot be
1838      // rematerialized.
1839      NeedStackSlot = true;
1840    }
1841  }
1842
1843  // One stack slot per live interval.
1844  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1845    Slot = vrm.assignVirt2StackSlot(li.reg);
1846
1847  // Create new intervals and rewrite defs and uses.
1848  for (LiveInterval::Ranges::const_iterator
1849         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1850    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1851    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1852    bool DefIsReMat = ReMatDefMI != NULL;
1853    bool CanDelete = ReMatDelete[I->valno->id];
1854    int LdSlot = 0;
1855    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1856    bool isLoad = isLoadSS ||
1857      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1858    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1859                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1860                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1861                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1862                               MBBVRegsMap, NewLIs, SSWeight);
1863  }
1864
1865  // Insert spills / restores if we are splitting.
1866  if (!TrySplit) {
1867    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1868    return NewLIs;
1869  }
1870
1871  SmallPtrSet<LiveInterval*, 4> AddedKill;
1872  SmallVector<unsigned, 2> Ops;
1873  if (NeedStackSlot) {
1874    int Id = SpillMBBs.find_first();
1875    while (Id != -1) {
1876      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1877      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1878      std::vector<SRInfo> &spills = SpillIdxes[Id];
1879      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1880        int index = spills[i].index;
1881        unsigned VReg = spills[i].vreg;
1882        LiveInterval &nI = getOrCreateInterval(VReg);
1883        bool isReMat = vrm.isReMaterialized(VReg);
1884        MachineInstr *MI = getInstructionFromIndex(index);
1885        bool CanFold = false;
1886        bool FoundUse = false;
1887        Ops.clear();
1888        if (spills[i].canFold) {
1889          CanFold = true;
1890          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1891            MachineOperand &MO = MI->getOperand(j);
1892            if (!MO.isRegister() || MO.getReg() != VReg)
1893              continue;
1894
1895            Ops.push_back(j);
1896            if (MO.isDef())
1897              continue;
1898            if (isReMat ||
1899                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1900                                                RestoreMBBs, RestoreIdxes))) {
1901              // MI has two-address uses of the same register. If the use
1902              // isn't the first and only use in the BB, then we can't fold
1903              // it. FIXME: Move this to rewriteInstructionsForSpills.
1904              CanFold = false;
1905              break;
1906            }
1907            FoundUse = true;
1908          }
1909        }
1910        // Fold the store into the def if possible.
1911        bool Folded = false;
1912        if (CanFold && !Ops.empty()) {
1913          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1914            Folded = true;
1915            if (FoundUse > 0) {
1916              // Also folded uses, do not issue a load.
1917              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1918              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1919            }
1920            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1921          }
1922        }
1923
1924        // Otherwise tell the spiller to issue a spill.
1925        if (!Folded) {
1926          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1927          bool isKill = LR->end == getStoreIndex(index);
1928          if (!MI->registerDefIsDead(nI.reg))
1929            // No need to spill a dead def.
1930            vrm.addSpillPoint(VReg, isKill, MI);
1931          if (isKill)
1932            AddedKill.insert(&nI);
1933        }
1934
1935        // Update spill slot weight.
1936        if (!isReMat)
1937          SSWeight += getSpillWeight(true, false, loopDepth);
1938      }
1939      Id = SpillMBBs.find_next(Id);
1940    }
1941  }
1942
1943  int Id = RestoreMBBs.find_first();
1944  while (Id != -1) {
1945    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1946    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1947
1948    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1949    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1950      int index = restores[i].index;
1951      if (index == -1)
1952        continue;
1953      unsigned VReg = restores[i].vreg;
1954      LiveInterval &nI = getOrCreateInterval(VReg);
1955      bool isReMat = vrm.isReMaterialized(VReg);
1956      MachineInstr *MI = getInstructionFromIndex(index);
1957      bool CanFold = false;
1958      Ops.clear();
1959      if (restores[i].canFold) {
1960        CanFold = true;
1961        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1962          MachineOperand &MO = MI->getOperand(j);
1963          if (!MO.isRegister() || MO.getReg() != VReg)
1964            continue;
1965
1966          if (MO.isDef()) {
1967            // If this restore were to be folded, it would have been folded
1968            // already.
1969            CanFold = false;
1970            break;
1971          }
1972          Ops.push_back(j);
1973        }
1974      }
1975
1976      // Fold the load into the use if possible.
1977      bool Folded = false;
1978      if (CanFold && !Ops.empty()) {
1979        if (!isReMat)
1980          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1981        else {
1982          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1983          int LdSlot = 0;
1984          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1985          // If the rematerializable def is a load, also try to fold it.
1986          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1987            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1988                                          Ops, isLoadSS, LdSlot, VReg);
1989          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1990          if (ImpUse) {
1991            // Re-matting an instruction with virtual register use. Add the
1992            // register as an implicit use on the use MI and update the register
1993            // interval's spill weight to HUGE_VALF to prevent it from being
1994            // spilled.
1995            LiveInterval &ImpLi = getInterval(ImpUse);
1996            ImpLi.weight = HUGE_VALF;
1997            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1998          }
1999        }
2000      }
2001      // If folding is not possible / failed, then tell the spiller to issue a
2002      // load / rematerialization for us.
2003      if (Folded)
2004        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2005      else
2006        vrm.addRestorePoint(VReg, MI);
2007
2008      // Update spill slot weight.
2009      if (!isReMat)
2010        SSWeight += getSpillWeight(false, true, loopDepth);
2011    }
2012    Id = RestoreMBBs.find_next(Id);
2013  }
2014
2015  // Finalize intervals: add kills, finalize spill weights, and filter out
2016  // dead intervals.
2017  std::vector<LiveInterval*> RetNewLIs;
2018  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2019    LiveInterval *LI = NewLIs[i];
2020    if (!LI->empty()) {
2021      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2022      if (!AddedKill.count(LI)) {
2023        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2024        unsigned LastUseIdx = getBaseIndex(LR->end);
2025        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2026        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2027        assert(UseIdx != -1);
2028        if (LastUse->getOperand(UseIdx).isImplicit() ||
2029            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2030          LastUse->getOperand(UseIdx).setIsKill();
2031          vrm.addKillPoint(LI->reg, LastUseIdx);
2032        }
2033      }
2034      RetNewLIs.push_back(LI);
2035    }
2036  }
2037
2038  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2039  return RetNewLIs;
2040}
2041
2042/// hasAllocatableSuperReg - Return true if the specified physical register has
2043/// any super register that's allocatable.
2044bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2045  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2046    if (allocatableRegs_[*AS] && hasInterval(*AS))
2047      return true;
2048  return false;
2049}
2050
2051/// getRepresentativeReg - Find the largest super register of the specified
2052/// physical register.
2053unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2054  // Find the largest super-register that is allocatable.
2055  unsigned BestReg = Reg;
2056  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2057    unsigned SuperReg = *AS;
2058    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2059      BestReg = SuperReg;
2060      break;
2061    }
2062  }
2063  return BestReg;
2064}
2065
2066/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2067/// specified interval that conflicts with the specified physical register.
2068unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2069                                                   unsigned PhysReg) const {
2070  unsigned NumConflicts = 0;
2071  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2072  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2073         E = mri_->reg_end(); I != E; ++I) {
2074    MachineOperand &O = I.getOperand();
2075    MachineInstr *MI = O.getParent();
2076    unsigned Index = getInstructionIndex(MI);
2077    if (pli.liveAt(Index))
2078      ++NumConflicts;
2079  }
2080  return NumConflicts;
2081}
2082
2083/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2084/// around all defs and uses of the specified interval.
2085void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2086                                            unsigned PhysReg, VirtRegMap &vrm) {
2087  unsigned SpillReg = getRepresentativeReg(PhysReg);
2088
2089  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2090    // If there are registers which alias PhysReg, but which are not a
2091    // sub-register of the chosen representative super register. Assert
2092    // since we can't handle it yet.
2093    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2094           tri_->isSuperRegister(*AS, SpillReg));
2095
2096  LiveInterval &pli = getInterval(SpillReg);
2097  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2098  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2099         E = mri_->reg_end(); I != E; ++I) {
2100    MachineOperand &O = I.getOperand();
2101    MachineInstr *MI = O.getParent();
2102    if (SeenMIs.count(MI))
2103      continue;
2104    SeenMIs.insert(MI);
2105    unsigned Index = getInstructionIndex(MI);
2106    if (pli.liveAt(Index)) {
2107      vrm.addEmergencySpill(SpillReg, MI);
2108      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2109      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2110        if (!hasInterval(*AS))
2111          continue;
2112        LiveInterval &spli = getInterval(*AS);
2113        if (spli.liveAt(Index))
2114          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2115      }
2116    }
2117  }
2118}
2119
2120LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2121                                                   MachineInstr* startInst) {
2122  LiveInterval& Interval = getOrCreateInterval(reg);
2123  VNInfo* VN = Interval.getNextValue(
2124            getInstructionIndex(startInst) + InstrSlots::DEF,
2125            startInst, getVNInfoAllocator());
2126  VN->hasPHIKill = true;
2127  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2128  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2129               getMBBEndIdx(startInst->getParent()) + 1, VN);
2130  Interval.addRange(LR);
2131
2132  return LR;
2133}
2134