LiveIntervalAnalysis.cpp revision 7fbad27cfb7298c707e50af10609d463900d7211
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39// Hidden options for help debugging.
40static cl::opt<bool> DisableReMat("disable-rematerialization",
41                                  cl::init(false), cl::Hidden);
42
43static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44                               cl::init(true), cl::Hidden);
45static cl::opt<int> SplitLimit("split-limit",
46                               cl::init(-1), cl::Hidden);
47
48STATISTIC(numIntervals, "Number of original intervals");
49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
51STATISTIC(numSplits   , "Number of intervals split");
52
53char LiveIntervals::ID = 0;
54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
55
56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57  AU.addPreserved<LiveVariables>();
58  AU.addRequired<LiveVariables>();
59  AU.addPreservedID(MachineLoopInfoID);
60  AU.addPreservedID(MachineDominatorsID);
61  AU.addPreservedID(PHIEliminationID);
62  AU.addRequiredID(PHIEliminationID);
63  AU.addRequiredID(TwoAddressInstructionPassID);
64  MachineFunctionPass::getAnalysisUsage(AU);
65}
66
67void LiveIntervals::releaseMemory() {
68  MBB2IdxMap.clear();
69  Idx2MBBMap.clear();
70  mi2iMap_.clear();
71  i2miMap_.clear();
72  r2iMap_.clear();
73  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74  VNInfoAllocator.Reset();
75  while (!ClonedMIs.empty()) {
76    MachineInstr *MI = ClonedMIs.back();
77    ClonedMIs.pop_back();
78    mf_->DeleteMachineInstr(MI);
79  }
80}
81
82void LiveIntervals::computeNumbering() {
83  Index2MiMap OldI2MI = i2miMap_;
84  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
85
86  Idx2MBBMap.clear();
87  MBB2IdxMap.clear();
88  mi2iMap_.clear();
89  i2miMap_.clear();
90
91  FunctionSize = 0;
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    // Insert an empty slot at the beginning of each block.
103    MIIndex += InstrSlots::NUM;
104    i2miMap_.push_back(0);
105
106    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
107         I != E; ++I) {
108      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
109      assert(inserted && "multiple MachineInstr -> index mappings");
110      i2miMap_.push_back(I);
111      MIIndex += InstrSlots::NUM;
112      FunctionSize++;
113
114      // Insert an empty slot after every instruction.
115      MIIndex += InstrSlots::NUM;
116      i2miMap_.push_back(0);
117    }
118
119    // Set the MBB2IdxMap entry for this MBB.
120    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
121    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
122  }
123  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
124
125  if (!OldI2MI.empty())
126    for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
127      for (LiveInterval::iterator LI = OI->second.begin(),
128           LE = OI->second.end(); LI != LE; ++LI) {
129
130        // Remap the start index of the live range to the corresponding new
131        // number, or our best guess at what it _should_ correspond to if the
132        // original instruction has been erased.  This is either the following
133        // instruction or its predecessor.
134        unsigned index = LI->start / InstrSlots::NUM;
135        unsigned offset = LI->start % InstrSlots::NUM;
136        if (offset == InstrSlots::LOAD) {
137          std::vector<IdxMBBPair>::const_iterator I =
138                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
139          // Take the pair containing the index
140          std::vector<IdxMBBPair>::const_iterator J =
141                    ((I != OldI2MBB.end() && I->first > index) ||
142                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
143
144          LI->start = getMBBStartIdx(J->second);
145        } else {
146          LI->start = mi2iMap_[OldI2MI[index]] + offset;
147        }
148
149        // Remap the ending index in the same way that we remapped the start,
150        // except for the final step where we always map to the immediately
151        // following instruction.
152        index = LI->end / InstrSlots::NUM;
153        offset  = LI->end % InstrSlots::NUM;
154        if (offset == InstrSlots::STORE) {
155          std::vector<IdxMBBPair>::const_iterator I =
156                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
157          // Take the pair containing the index
158          std::vector<IdxMBBPair>::const_iterator J =
159                    ((I != OldI2MBB.end() && I->first > index) ||
160                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
161
162          LI->end = getMBBEndIdx(J->second) + 1;
163        } else {
164          LI->end = mi2iMap_[OldI2MI[index]] + offset;
165        }
166
167        // Remap the VNInfo def index, which works the same as the
168        // start indices above.
169        VNInfo* vni = LI->valno;
170        index = vni->def / InstrSlots::NUM;
171        offset = vni->def % InstrSlots::NUM;
172        if (offset == InstrSlots::LOAD) {
173          std::vector<IdxMBBPair>::const_iterator I =
174                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
175          // Take the pair containing the index
176          std::vector<IdxMBBPair>::const_iterator J =
177                    ((I != OldI2MBB.end() && I->first > index) ||
178                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
179
180          vni->def = getMBBStartIdx(J->second);
181
182        } else {
183          vni->def = mi2iMap_[OldI2MI[index]] + offset;
184        }
185
186        // Remap the VNInfo kill indices, which works the same as
187        // the end indices above.
188        for (size_t i = 0; i < vni->kills.size(); ++i) {
189          index = vni->kills[i] / InstrSlots::NUM;
190          offset = vni->kills[i] % InstrSlots::NUM;
191          if (OldI2MI[vni->kills[i] / InstrSlots::NUM]) {
192            std::vector<IdxMBBPair>::const_iterator I =
193                    std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
194            // Take the pair containing the index
195            std::vector<IdxMBBPair>::const_iterator J =
196                      ((I != OldI2MBB.end() && I->first > index) ||
197                      (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
198
199            vni->kills[i] = getMBBEndIdx(J->second) + 1;
200          } else {
201            vni->kills[i] = mi2iMap_[OldI2MI[index]] + offset;
202          }
203        }
204      }
205}
206
207/// runOnMachineFunction - Register allocate the whole function
208///
209bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
210  mf_ = &fn;
211  mri_ = &mf_->getRegInfo();
212  tm_ = &fn.getTarget();
213  tri_ = tm_->getRegisterInfo();
214  tii_ = tm_->getInstrInfo();
215  lv_ = &getAnalysis<LiveVariables>();
216  allocatableRegs_ = tri_->getAllocatableSet(fn);
217
218  computeNumbering();
219  computeIntervals();
220
221  numIntervals += getNumIntervals();
222
223  DOUT << "********** INTERVALS **********\n";
224  for (iterator I = begin(), E = end(); I != E; ++I) {
225    I->second.print(DOUT, tri_);
226    DOUT << "\n";
227  }
228
229  numIntervalsAfter += getNumIntervals();
230  DEBUG(dump());
231  return true;
232}
233
234/// print - Implement the dump method.
235void LiveIntervals::print(std::ostream &O, const Module* ) const {
236  O << "********** INTERVALS **********\n";
237  for (const_iterator I = begin(), E = end(); I != E; ++I) {
238    I->second.print(O, tri_);
239    O << "\n";
240  }
241
242  O << "********** MACHINEINSTRS **********\n";
243  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
244       mbbi != mbbe; ++mbbi) {
245    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
246    for (MachineBasicBlock::iterator mii = mbbi->begin(),
247           mie = mbbi->end(); mii != mie; ++mii) {
248      O << getInstructionIndex(mii) << '\t' << *mii;
249    }
250  }
251}
252
253/// conflictsWithPhysRegDef - Returns true if the specified register
254/// is defined during the duration of the specified interval.
255bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
256                                            VirtRegMap &vrm, unsigned reg) {
257  for (LiveInterval::Ranges::const_iterator
258         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
259    for (unsigned index = getBaseIndex(I->start),
260           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
261         index += InstrSlots::NUM) {
262      // skip deleted instructions
263      while (index != end && !getInstructionFromIndex(index))
264        index += InstrSlots::NUM;
265      if (index == end) break;
266
267      MachineInstr *MI = getInstructionFromIndex(index);
268      unsigned SrcReg, DstReg;
269      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
270        if (SrcReg == li.reg || DstReg == li.reg)
271          continue;
272      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
273        MachineOperand& mop = MI->getOperand(i);
274        if (!mop.isRegister())
275          continue;
276        unsigned PhysReg = mop.getReg();
277        if (PhysReg == 0 || PhysReg == li.reg)
278          continue;
279        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
280          if (!vrm.hasPhys(PhysReg))
281            continue;
282          PhysReg = vrm.getPhys(PhysReg);
283        }
284        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
285          return true;
286      }
287    }
288  }
289
290  return false;
291}
292
293void LiveIntervals::printRegName(unsigned reg) const {
294  if (TargetRegisterInfo::isPhysicalRegister(reg))
295    cerr << tri_->getName(reg);
296  else
297    cerr << "%reg" << reg;
298}
299
300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
301                                             MachineBasicBlock::iterator mi,
302                                             unsigned MIIdx, MachineOperand& MO,
303                                             unsigned MOIdx,
304                                             LiveInterval &interval) {
305  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
306  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
307
308  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
309    DOUT << "is a implicit_def\n";
310    return;
311  }
312
313  // Virtual registers may be defined multiple times (due to phi
314  // elimination and 2-addr elimination).  Much of what we do only has to be
315  // done once for the vreg.  We use an empty interval to detect the first
316  // time we see a vreg.
317  if (interval.empty()) {
318    // Get the Idx of the defining instructions.
319    unsigned defIndex = getDefIndex(MIIdx);
320    VNInfo *ValNo;
321    MachineInstr *CopyMI = NULL;
322    unsigned SrcReg, DstReg;
323    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
324        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
325        tii_->isMoveInstr(*mi, SrcReg, DstReg))
326      CopyMI = mi;
327    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
328
329    assert(ValNo->id == 0 && "First value in interval is not 0?");
330
331    // Loop over all of the blocks that the vreg is defined in.  There are
332    // two cases we have to handle here.  The most common case is a vreg
333    // whose lifetime is contained within a basic block.  In this case there
334    // will be a single kill, in MBB, which comes after the definition.
335    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
336      // FIXME: what about dead vars?
337      unsigned killIdx;
338      if (vi.Kills[0] != mi)
339        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
340      else
341        killIdx = defIndex+1;
342
343      // If the kill happens after the definition, we have an intra-block
344      // live range.
345      if (killIdx > defIndex) {
346        assert(vi.AliveBlocks.none() &&
347               "Shouldn't be alive across any blocks!");
348        LiveRange LR(defIndex, killIdx, ValNo);
349        interval.addRange(LR);
350        DOUT << " +" << LR << "\n";
351        interval.addKill(ValNo, killIdx);
352        return;
353      }
354    }
355
356    // The other case we handle is when a virtual register lives to the end
357    // of the defining block, potentially live across some blocks, then is
358    // live into some number of blocks, but gets killed.  Start by adding a
359    // range that goes from this definition to the end of the defining block.
360    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
361    DOUT << " +" << NewLR;
362    interval.addRange(NewLR);
363
364    // Iterate over all of the blocks that the variable is completely
365    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
366    // live interval.
367    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
368      if (vi.AliveBlocks[i]) {
369        LiveRange LR(getMBBStartIdx(i),
370                     getMBBEndIdx(i)+1,  // MBB ends at -1.
371                     ValNo);
372        interval.addRange(LR);
373        DOUT << " +" << LR;
374      }
375    }
376
377    // Finally, this virtual register is live from the start of any killing
378    // block to the 'use' slot of the killing instruction.
379    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
380      MachineInstr *Kill = vi.Kills[i];
381      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
382      LiveRange LR(getMBBStartIdx(Kill->getParent()),
383                   killIdx, ValNo);
384      interval.addRange(LR);
385      interval.addKill(ValNo, killIdx);
386      DOUT << " +" << LR;
387    }
388
389  } else {
390    // If this is the second time we see a virtual register definition, it
391    // must be due to phi elimination or two addr elimination.  If this is
392    // the result of two address elimination, then the vreg is one of the
393    // def-and-use register operand.
394    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
395      // If this is a two-address definition, then we have already processed
396      // the live range.  The only problem is that we didn't realize there
397      // are actually two values in the live interval.  Because of this we
398      // need to take the LiveRegion that defines this register and split it
399      // into two values.
400      assert(interval.containsOneValue());
401      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
402      unsigned RedefIndex = getDefIndex(MIIdx);
403
404      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
405      VNInfo *OldValNo = OldLR->valno;
406
407      // Delete the initial value, which should be short and continuous,
408      // because the 2-addr copy must be in the same MBB as the redef.
409      interval.removeRange(DefIndex, RedefIndex);
410
411      // Two-address vregs should always only be redefined once.  This means
412      // that at this point, there should be exactly one value number in it.
413      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
414
415      // The new value number (#1) is defined by the instruction we claimed
416      // defined value #0.
417      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
418                                            VNInfoAllocator);
419
420      // Value#0 is now defined by the 2-addr instruction.
421      OldValNo->def  = RedefIndex;
422      OldValNo->copy = 0;
423
424      // Add the new live interval which replaces the range for the input copy.
425      LiveRange LR(DefIndex, RedefIndex, ValNo);
426      DOUT << " replace range with " << LR;
427      interval.addRange(LR);
428      interval.addKill(ValNo, RedefIndex);
429
430      // If this redefinition is dead, we need to add a dummy unit live
431      // range covering the def slot.
432      if (MO.isDead())
433        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
434
435      DOUT << " RESULT: ";
436      interval.print(DOUT, tri_);
437
438    } else {
439      // Otherwise, this must be because of phi elimination.  If this is the
440      // first redefinition of the vreg that we have seen, go back and change
441      // the live range in the PHI block to be a different value number.
442      if (interval.containsOneValue()) {
443        assert(vi.Kills.size() == 1 &&
444               "PHI elimination vreg should have one kill, the PHI itself!");
445
446        // Remove the old range that we now know has an incorrect number.
447        VNInfo *VNI = interval.getValNumInfo(0);
448        MachineInstr *Killer = vi.Kills[0];
449        unsigned Start = getMBBStartIdx(Killer->getParent());
450        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
451        DOUT << " Removing [" << Start << "," << End << "] from: ";
452        interval.print(DOUT, tri_); DOUT << "\n";
453        interval.removeRange(Start, End);
454        VNI->hasPHIKill = true;
455        DOUT << " RESULT: "; interval.print(DOUT, tri_);
456
457        // Replace the interval with one of a NEW value number.  Note that this
458        // value number isn't actually defined by an instruction, weird huh? :)
459        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
460        DOUT << " replace range with " << LR;
461        interval.addRange(LR);
462        interval.addKill(LR.valno, End);
463        DOUT << " RESULT: "; interval.print(DOUT, tri_);
464      }
465
466      // In the case of PHI elimination, each variable definition is only
467      // live until the end of the block.  We've already taken care of the
468      // rest of the live range.
469      unsigned defIndex = getDefIndex(MIIdx);
470
471      VNInfo *ValNo;
472      MachineInstr *CopyMI = NULL;
473      unsigned SrcReg, DstReg;
474      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
475          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
476          tii_->isMoveInstr(*mi, SrcReg, DstReg))
477        CopyMI = mi;
478      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
479
480      unsigned killIndex = getMBBEndIdx(mbb) + 1;
481      LiveRange LR(defIndex, killIndex, ValNo);
482      interval.addRange(LR);
483      interval.addKill(ValNo, killIndex);
484      ValNo->hasPHIKill = true;
485      DOUT << " +" << LR;
486    }
487  }
488
489  DOUT << '\n';
490}
491
492void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
493                                              MachineBasicBlock::iterator mi,
494                                              unsigned MIIdx,
495                                              MachineOperand& MO,
496                                              LiveInterval &interval,
497                                              MachineInstr *CopyMI) {
498  // A physical register cannot be live across basic block, so its
499  // lifetime must end somewhere in its defining basic block.
500  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
501
502  unsigned baseIndex = MIIdx;
503  unsigned start = getDefIndex(baseIndex);
504  unsigned end = start;
505
506  // If it is not used after definition, it is considered dead at
507  // the instruction defining it. Hence its interval is:
508  // [defSlot(def), defSlot(def)+1)
509  if (MO.isDead()) {
510    DOUT << " dead";
511    end = getDefIndex(start) + 1;
512    goto exit;
513  }
514
515  // If it is not dead on definition, it must be killed by a
516  // subsequent instruction. Hence its interval is:
517  // [defSlot(def), useSlot(kill)+1)
518  baseIndex += InstrSlots::NUM;
519  while (++mi != MBB->end()) {
520    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
521           getInstructionFromIndex(baseIndex) == 0)
522      baseIndex += InstrSlots::NUM;
523    if (mi->killsRegister(interval.reg, tri_)) {
524      DOUT << " killed";
525      end = getUseIndex(baseIndex) + 1;
526      goto exit;
527    } else if (mi->modifiesRegister(interval.reg, tri_)) {
528      // Another instruction redefines the register before it is ever read.
529      // Then the register is essentially dead at the instruction that defines
530      // it. Hence its interval is:
531      // [defSlot(def), defSlot(def)+1)
532      DOUT << " dead";
533      end = getDefIndex(start) + 1;
534      goto exit;
535    }
536
537    baseIndex += InstrSlots::NUM;
538  }
539
540  // The only case we should have a dead physreg here without a killing or
541  // instruction where we know it's dead is if it is live-in to the function
542  // and never used.
543  assert(!CopyMI && "physreg was not killed in defining block!");
544  end = getDefIndex(start) + 1;  // It's dead.
545
546exit:
547  assert(start < end && "did not find end of interval?");
548
549  // Already exists? Extend old live interval.
550  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
551  VNInfo *ValNo = (OldLR != interval.end())
552    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
553  LiveRange LR(start, end, ValNo);
554  interval.addRange(LR);
555  interval.addKill(LR.valno, end);
556  DOUT << " +" << LR << '\n';
557}
558
559void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
560                                      MachineBasicBlock::iterator MI,
561                                      unsigned MIIdx,
562                                      MachineOperand& MO,
563                                      unsigned MOIdx) {
564  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
565    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
566                             getOrCreateInterval(MO.getReg()));
567  else if (allocatableRegs_[MO.getReg()]) {
568    MachineInstr *CopyMI = NULL;
569    unsigned SrcReg, DstReg;
570    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
571        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
572        tii_->isMoveInstr(*MI, SrcReg, DstReg))
573      CopyMI = MI;
574    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
575                              getOrCreateInterval(MO.getReg()), CopyMI);
576    // Def of a register also defines its sub-registers.
577    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
578      // If MI also modifies the sub-register explicitly, avoid processing it
579      // more than once. Do not pass in TRI here so it checks for exact match.
580      if (!MI->modifiesRegister(*AS))
581        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
582                                  getOrCreateInterval(*AS), 0);
583  }
584}
585
586void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
587                                         unsigned MIIdx,
588                                         LiveInterval &interval, bool isAlias) {
589  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
590
591  // Look for kills, if it reaches a def before it's killed, then it shouldn't
592  // be considered a livein.
593  MachineBasicBlock::iterator mi = MBB->begin();
594  unsigned baseIndex = MIIdx;
595  unsigned start = baseIndex;
596  unsigned end = start;
597  while (mi != MBB->end()) {
598    if (mi->killsRegister(interval.reg, tri_)) {
599      DOUT << " killed";
600      end = getUseIndex(baseIndex) + 1;
601      goto exit;
602    } else if (mi->modifiesRegister(interval.reg, tri_)) {
603      // Another instruction redefines the register before it is ever read.
604      // Then the register is essentially dead at the instruction that defines
605      // it. Hence its interval is:
606      // [defSlot(def), defSlot(def)+1)
607      DOUT << " dead";
608      end = getDefIndex(start) + 1;
609      goto exit;
610    }
611
612    baseIndex += InstrSlots::NUM;
613    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
614           getInstructionFromIndex(baseIndex) == 0)
615      baseIndex += InstrSlots::NUM;
616    ++mi;
617  }
618
619exit:
620  // Live-in register might not be used at all.
621  if (end == MIIdx) {
622    if (isAlias) {
623      DOUT << " dead";
624      end = getDefIndex(MIIdx) + 1;
625    } else {
626      DOUT << " live through";
627      end = baseIndex;
628    }
629  }
630
631  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
632  interval.addRange(LR);
633  interval.addKill(LR.valno, end);
634  DOUT << " +" << LR << '\n';
635}
636
637/// computeIntervals - computes the live intervals for virtual
638/// registers. for some ordering of the machine instructions [1,N] a
639/// live interval is an interval [i, j) where 1 <= i <= j < N for
640/// which a variable is live
641void LiveIntervals::computeIntervals() {
642  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
643       << "********** Function: "
644       << ((Value*)mf_->getFunction())->getName() << '\n';
645  // Track the index of the current machine instr.
646  unsigned MIIndex = 0;
647
648  // Skip over empty initial indices.
649  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
650         getInstructionFromIndex(MIIndex) == 0)
651    MIIndex += InstrSlots::NUM;
652
653  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
654       MBBI != E; ++MBBI) {
655    MachineBasicBlock *MBB = MBBI;
656    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
657
658    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
659
660    // Create intervals for live-ins to this BB first.
661    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
662           LE = MBB->livein_end(); LI != LE; ++LI) {
663      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
664      // Multiple live-ins can alias the same register.
665      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
666        if (!hasInterval(*AS))
667          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
668                               true);
669    }
670
671    for (; MI != miEnd; ++MI) {
672      DOUT << MIIndex << "\t" << *MI;
673
674      // Handle defs.
675      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
676        MachineOperand &MO = MI->getOperand(i);
677        // handle register defs - build intervals
678        if (MO.isRegister() && MO.getReg() && MO.isDef())
679          handleRegisterDef(MBB, MI, MIIndex, MO, i);
680      }
681
682      MIIndex += InstrSlots::NUM;
683
684      // Skip over empty indices.
685      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
686             getInstructionFromIndex(MIIndex) == 0)
687        MIIndex += InstrSlots::NUM;
688    }
689  }
690}
691
692bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
693                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
694  std::vector<IdxMBBPair>::const_iterator I =
695    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
696
697  bool ResVal = false;
698  while (I != Idx2MBBMap.end()) {
699    if (LR.end <= I->first)
700      break;
701    MBBs.push_back(I->second);
702    ResVal = true;
703    ++I;
704  }
705  return ResVal;
706}
707
708
709LiveInterval LiveIntervals::createInterval(unsigned reg) {
710  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
711                       HUGE_VALF : 0.0F;
712  return LiveInterval(reg, Weight);
713}
714
715/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
716/// copy field and returns the source register that defines it.
717unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
718  if (!VNI->copy)
719    return 0;
720
721  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
722    return VNI->copy->getOperand(1).getReg();
723  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
724    return VNI->copy->getOperand(2).getReg();
725  unsigned SrcReg, DstReg;
726  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
727    return SrcReg;
728  assert(0 && "Unrecognized copy instruction!");
729  return 0;
730}
731
732//===----------------------------------------------------------------------===//
733// Register allocator hooks.
734//
735
736/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
737/// allow one) virtual register operand, then its uses are implicitly using
738/// the register. Returns the virtual register.
739unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
740                                            MachineInstr *MI) const {
741  unsigned RegOp = 0;
742  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
743    MachineOperand &MO = MI->getOperand(i);
744    if (!MO.isRegister() || !MO.isUse())
745      continue;
746    unsigned Reg = MO.getReg();
747    if (Reg == 0 || Reg == li.reg)
748      continue;
749    // FIXME: For now, only remat MI with at most one register operand.
750    assert(!RegOp &&
751           "Can't rematerialize instruction with multiple register operand!");
752    RegOp = MO.getReg();
753    break;
754  }
755  return RegOp;
756}
757
758/// isValNoAvailableAt - Return true if the val# of the specified interval
759/// which reaches the given instruction also reaches the specified use index.
760bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
761                                       unsigned UseIdx) const {
762  unsigned Index = getInstructionIndex(MI);
763  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
764  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
765  return UI != li.end() && UI->valno == ValNo;
766}
767
768/// isReMaterializable - Returns true if the definition MI of the specified
769/// val# of the specified interval is re-materializable.
770bool LiveIntervals::isReMaterializable(const LiveInterval &li,
771                                       const VNInfo *ValNo, MachineInstr *MI,
772                                       bool &isLoad) {
773  if (DisableReMat)
774    return false;
775
776  isLoad = false;
777  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
778    return true;
779
780  int FrameIdx = 0;
781  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
782      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
783    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
784    // this but remember this is not safe to fold into a two-address
785    // instruction.
786    // This is a load from fixed stack slot. It can be rematerialized.
787    return true;
788
789  if (tii_->isTriviallyReMaterializable(MI)) {
790    const TargetInstrDesc &TID = MI->getDesc();
791    isLoad = TID.isSimpleLoad();
792
793    unsigned ImpUse = getReMatImplicitUse(li, MI);
794    if (ImpUse) {
795      const LiveInterval &ImpLi = getInterval(ImpUse);
796      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
797             re = mri_->use_end(); ri != re; ++ri) {
798        MachineInstr *UseMI = &*ri;
799        unsigned UseIdx = getInstructionIndex(UseMI);
800        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
801          continue;
802        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
803          return false;
804      }
805    }
806    return true;
807  }
808
809  return false;
810}
811
812/// isReMaterializable - Returns true if every definition of MI of every
813/// val# of the specified interval is re-materializable.
814bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
815  isLoad = false;
816  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
817       i != e; ++i) {
818    const VNInfo *VNI = *i;
819    unsigned DefIdx = VNI->def;
820    if (DefIdx == ~1U)
821      continue; // Dead val#.
822    // Is the def for the val# rematerializable?
823    if (DefIdx == ~0u)
824      return false;
825    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
826    bool DefIsLoad = false;
827    if (!ReMatDefMI ||
828        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
829      return false;
830    isLoad |= DefIsLoad;
831  }
832  return true;
833}
834
835/// FilterFoldedOps - Filter out two-address use operands. Return
836/// true if it finds any issue with the operands that ought to prevent
837/// folding.
838static bool FilterFoldedOps(MachineInstr *MI,
839                            SmallVector<unsigned, 2> &Ops,
840                            unsigned &MRInfo,
841                            SmallVector<unsigned, 2> &FoldOps) {
842  const TargetInstrDesc &TID = MI->getDesc();
843
844  MRInfo = 0;
845  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
846    unsigned OpIdx = Ops[i];
847    MachineOperand &MO = MI->getOperand(OpIdx);
848    // FIXME: fold subreg use.
849    if (MO.getSubReg())
850      return true;
851    if (MO.isDef())
852      MRInfo |= (unsigned)VirtRegMap::isMod;
853    else {
854      // Filter out two-address use operand(s).
855      if (!MO.isImplicit() &&
856          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
857        MRInfo = VirtRegMap::isModRef;
858        continue;
859      }
860      MRInfo |= (unsigned)VirtRegMap::isRef;
861    }
862    FoldOps.push_back(OpIdx);
863  }
864  return false;
865}
866
867
868/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
869/// slot / to reg or any rematerialized load into ith operand of specified
870/// MI. If it is successul, MI is updated with the newly created MI and
871/// returns true.
872bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
873                                         VirtRegMap &vrm, MachineInstr *DefMI,
874                                         unsigned InstrIdx,
875                                         SmallVector<unsigned, 2> &Ops,
876                                         bool isSS, int Slot, unsigned Reg) {
877  // If it is an implicit def instruction, just delete it.
878  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
879    RemoveMachineInstrFromMaps(MI);
880    vrm.RemoveMachineInstrFromMaps(MI);
881    MI->eraseFromParent();
882    ++numFolds;
883    return true;
884  }
885
886  // Filter the list of operand indexes that are to be folded. Abort if
887  // any operand will prevent folding.
888  unsigned MRInfo = 0;
889  SmallVector<unsigned, 2> FoldOps;
890  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
891    return false;
892
893  // The only time it's safe to fold into a two address instruction is when
894  // it's folding reload and spill from / into a spill stack slot.
895  if (DefMI && (MRInfo & VirtRegMap::isMod))
896    return false;
897
898  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
899                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
900  if (fmi) {
901    // Remember this instruction uses the spill slot.
902    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
903
904    // Attempt to fold the memory reference into the instruction. If
905    // we can do this, we don't need to insert spill code.
906    MachineBasicBlock &MBB = *MI->getParent();
907    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
908      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
909    vrm.transferSpillPts(MI, fmi);
910    vrm.transferRestorePts(MI, fmi);
911    vrm.transferEmergencySpills(MI, fmi);
912    mi2iMap_.erase(MI);
913    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
914    mi2iMap_[fmi] = InstrIdx;
915    MI = MBB.insert(MBB.erase(MI), fmi);
916    ++numFolds;
917    return true;
918  }
919  return false;
920}
921
922/// canFoldMemoryOperand - Returns true if the specified load / store
923/// folding is possible.
924bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
925                                         SmallVector<unsigned, 2> &Ops,
926                                         bool ReMat) const {
927  // Filter the list of operand indexes that are to be folded. Abort if
928  // any operand will prevent folding.
929  unsigned MRInfo = 0;
930  SmallVector<unsigned, 2> FoldOps;
931  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
932    return false;
933
934  // It's only legal to remat for a use, not a def.
935  if (ReMat && (MRInfo & VirtRegMap::isMod))
936    return false;
937
938  return tii_->canFoldMemoryOperand(MI, FoldOps);
939}
940
941bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
942  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
943  for (LiveInterval::Ranges::const_iterator
944         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
945    std::vector<IdxMBBPair>::const_iterator II =
946      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
947    if (II == Idx2MBBMap.end())
948      continue;
949    if (I->end > II->first)  // crossing a MBB.
950      return false;
951    MBBs.insert(II->second);
952    if (MBBs.size() > 1)
953      return false;
954  }
955  return true;
956}
957
958/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
959/// interval on to-be re-materialized operands of MI) with new register.
960void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
961                                       MachineInstr *MI, unsigned NewVReg,
962                                       VirtRegMap &vrm) {
963  // There is an implicit use. That means one of the other operand is
964  // being remat'ed and the remat'ed instruction has li.reg as an
965  // use operand. Make sure we rewrite that as well.
966  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
967    MachineOperand &MO = MI->getOperand(i);
968    if (!MO.isRegister())
969      continue;
970    unsigned Reg = MO.getReg();
971    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
972      continue;
973    if (!vrm.isReMaterialized(Reg))
974      continue;
975    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
976    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
977    if (UseMO)
978      UseMO->setReg(NewVReg);
979  }
980}
981
982/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
983/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
984bool LiveIntervals::
985rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
986                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
987                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
988                 unsigned Slot, int LdSlot,
989                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
990                 VirtRegMap &vrm,
991                 const TargetRegisterClass* rc,
992                 SmallVector<int, 4> &ReMatIds,
993                 const MachineLoopInfo *loopInfo,
994                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
995                 std::map<unsigned,unsigned> &MBBVRegsMap,
996                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
997  MachineBasicBlock *MBB = MI->getParent();
998  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
999  bool CanFold = false;
1000 RestartInstruction:
1001  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1002    MachineOperand& mop = MI->getOperand(i);
1003    if (!mop.isRegister())
1004      continue;
1005    unsigned Reg = mop.getReg();
1006    unsigned RegI = Reg;
1007    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1008      continue;
1009    if (Reg != li.reg)
1010      continue;
1011
1012    bool TryFold = !DefIsReMat;
1013    bool FoldSS = true; // Default behavior unless it's a remat.
1014    int FoldSlot = Slot;
1015    if (DefIsReMat) {
1016      // If this is the rematerializable definition MI itself and
1017      // all of its uses are rematerialized, simply delete it.
1018      if (MI == ReMatOrigDefMI && CanDelete) {
1019        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1020        DOUT << MI << '\n';
1021        RemoveMachineInstrFromMaps(MI);
1022        vrm.RemoveMachineInstrFromMaps(MI);
1023        MI->eraseFromParent();
1024        break;
1025      }
1026
1027      // If def for this use can't be rematerialized, then try folding.
1028      // If def is rematerializable and it's a load, also try folding.
1029      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1030      if (isLoad) {
1031        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1032        FoldSS = isLoadSS;
1033        FoldSlot = LdSlot;
1034      }
1035    }
1036
1037    // Scan all of the operands of this instruction rewriting operands
1038    // to use NewVReg instead of li.reg as appropriate.  We do this for
1039    // two reasons:
1040    //
1041    //   1. If the instr reads the same spilled vreg multiple times, we
1042    //      want to reuse the NewVReg.
1043    //   2. If the instr is a two-addr instruction, we are required to
1044    //      keep the src/dst regs pinned.
1045    //
1046    // Keep track of whether we replace a use and/or def so that we can
1047    // create the spill interval with the appropriate range.
1048
1049    HasUse = mop.isUse();
1050    HasDef = mop.isDef();
1051    SmallVector<unsigned, 2> Ops;
1052    Ops.push_back(i);
1053    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1054      const MachineOperand &MOj = MI->getOperand(j);
1055      if (!MOj.isRegister())
1056        continue;
1057      unsigned RegJ = MOj.getReg();
1058      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1059        continue;
1060      if (RegJ == RegI) {
1061        Ops.push_back(j);
1062        HasUse |= MOj.isUse();
1063        HasDef |= MOj.isDef();
1064      }
1065    }
1066
1067    if (HasUse && !li.liveAt(getUseIndex(index)))
1068      // Must be defined by an implicit def. It should not be spilled. Note,
1069      // this is for correctness reason. e.g.
1070      // 8   %reg1024<def> = IMPLICIT_DEF
1071      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1072      // The live range [12, 14) are not part of the r1024 live interval since
1073      // it's defined by an implicit def. It will not conflicts with live
1074      // interval of r1025. Now suppose both registers are spilled, you can
1075      // easily see a situation where both registers are reloaded before
1076      // the INSERT_SUBREG and both target registers that would overlap.
1077      HasUse = false;
1078
1079    // Update stack slot spill weight if we are splitting.
1080    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1081      if (!TrySplit)
1082      SSWeight += Weight;
1083
1084    if (!TryFold)
1085      CanFold = false;
1086    else {
1087      // Do not fold load / store here if we are splitting. We'll find an
1088      // optimal point to insert a load / store later.
1089      if (!TrySplit) {
1090        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1091                                 Ops, FoldSS, FoldSlot, Reg)) {
1092          // Folding the load/store can completely change the instruction in
1093          // unpredictable ways, rescan it from the beginning.
1094          HasUse = false;
1095          HasDef = false;
1096          CanFold = false;
1097          if (isRemoved(MI)) {
1098            SSWeight -= Weight;
1099            break;
1100          }
1101          goto RestartInstruction;
1102        }
1103      } else {
1104        // We'll try to fold it later if it's profitable.
1105        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1106      }
1107    }
1108
1109    // Create a new virtual register for the spill interval.
1110    bool CreatedNewVReg = false;
1111    if (NewVReg == 0) {
1112      NewVReg = mri_->createVirtualRegister(rc);
1113      vrm.grow();
1114      CreatedNewVReg = true;
1115    }
1116    mop.setReg(NewVReg);
1117    if (mop.isImplicit())
1118      rewriteImplicitOps(li, MI, NewVReg, vrm);
1119
1120    // Reuse NewVReg for other reads.
1121    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1122      MachineOperand &mopj = MI->getOperand(Ops[j]);
1123      mopj.setReg(NewVReg);
1124      if (mopj.isImplicit())
1125        rewriteImplicitOps(li, MI, NewVReg, vrm);
1126    }
1127
1128    if (CreatedNewVReg) {
1129      if (DefIsReMat) {
1130        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1131        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1132          // Each valnum may have its own remat id.
1133          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1134        } else {
1135          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1136        }
1137        if (!CanDelete || (HasUse && HasDef)) {
1138          // If this is a two-addr instruction then its use operands are
1139          // rematerializable but its def is not. It should be assigned a
1140          // stack slot.
1141          vrm.assignVirt2StackSlot(NewVReg, Slot);
1142        }
1143      } else {
1144        vrm.assignVirt2StackSlot(NewVReg, Slot);
1145      }
1146    } else if (HasUse && HasDef &&
1147               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1148      // If this interval hasn't been assigned a stack slot (because earlier
1149      // def is a deleted remat def), do it now.
1150      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1151      vrm.assignVirt2StackSlot(NewVReg, Slot);
1152    }
1153
1154    // Re-matting an instruction with virtual register use. Add the
1155    // register as an implicit use on the use MI.
1156    if (DefIsReMat && ImpUse)
1157      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1158
1159    // create a new register interval for this spill / remat.
1160    LiveInterval &nI = getOrCreateInterval(NewVReg);
1161    if (CreatedNewVReg) {
1162      NewLIs.push_back(&nI);
1163      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1164      if (TrySplit)
1165        vrm.setIsSplitFromReg(NewVReg, li.reg);
1166    }
1167
1168    if (HasUse) {
1169      if (CreatedNewVReg) {
1170        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1171                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1172        DOUT << " +" << LR;
1173        nI.addRange(LR);
1174      } else {
1175        // Extend the split live interval to this def / use.
1176        unsigned End = getUseIndex(index)+1;
1177        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1178                     nI.getValNumInfo(nI.getNumValNums()-1));
1179        DOUT << " +" << LR;
1180        nI.addRange(LR);
1181      }
1182    }
1183    if (HasDef) {
1184      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1185                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1186      DOUT << " +" << LR;
1187      nI.addRange(LR);
1188    }
1189
1190    DOUT << "\t\t\t\tAdded new interval: ";
1191    nI.print(DOUT, tri_);
1192    DOUT << '\n';
1193  }
1194  return CanFold;
1195}
1196bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1197                                   const VNInfo *VNI,
1198                                   MachineBasicBlock *MBB, unsigned Idx) const {
1199  unsigned End = getMBBEndIdx(MBB);
1200  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1201    unsigned KillIdx = VNI->kills[j];
1202    if (KillIdx > Idx && KillIdx < End)
1203      return true;
1204  }
1205  return false;
1206}
1207
1208/// RewriteInfo - Keep track of machine instrs that will be rewritten
1209/// during spilling.
1210namespace {
1211  struct RewriteInfo {
1212    unsigned Index;
1213    MachineInstr *MI;
1214    bool HasUse;
1215    bool HasDef;
1216    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1217      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1218  };
1219
1220  struct RewriteInfoCompare {
1221    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1222      return LHS.Index < RHS.Index;
1223    }
1224  };
1225}
1226
1227void LiveIntervals::
1228rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1229                    LiveInterval::Ranges::const_iterator &I,
1230                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1231                    unsigned Slot, int LdSlot,
1232                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1233                    VirtRegMap &vrm,
1234                    const TargetRegisterClass* rc,
1235                    SmallVector<int, 4> &ReMatIds,
1236                    const MachineLoopInfo *loopInfo,
1237                    BitVector &SpillMBBs,
1238                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1239                    BitVector &RestoreMBBs,
1240                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1241                    std::map<unsigned,unsigned> &MBBVRegsMap,
1242                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1243  bool AllCanFold = true;
1244  unsigned NewVReg = 0;
1245  unsigned start = getBaseIndex(I->start);
1246  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1247
1248  // First collect all the def / use in this live range that will be rewritten.
1249  // Make sure they are sorted according to instruction index.
1250  std::vector<RewriteInfo> RewriteMIs;
1251  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1252         re = mri_->reg_end(); ri != re; ) {
1253    MachineInstr *MI = &*ri;
1254    MachineOperand &O = ri.getOperand();
1255    ++ri;
1256    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1257    unsigned index = getInstructionIndex(MI);
1258    if (index < start || index >= end)
1259      continue;
1260    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1261      // Must be defined by an implicit def. It should not be spilled. Note,
1262      // this is for correctness reason. e.g.
1263      // 8   %reg1024<def> = IMPLICIT_DEF
1264      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1265      // The live range [12, 14) are not part of the r1024 live interval since
1266      // it's defined by an implicit def. It will not conflicts with live
1267      // interval of r1025. Now suppose both registers are spilled, you can
1268      // easily see a situation where both registers are reloaded before
1269      // the INSERT_SUBREG and both target registers that would overlap.
1270      continue;
1271    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1272  }
1273  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1274
1275  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1276  // Now rewrite the defs and uses.
1277  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1278    RewriteInfo &rwi = RewriteMIs[i];
1279    ++i;
1280    unsigned index = rwi.Index;
1281    bool MIHasUse = rwi.HasUse;
1282    bool MIHasDef = rwi.HasDef;
1283    MachineInstr *MI = rwi.MI;
1284    // If MI def and/or use the same register multiple times, then there
1285    // are multiple entries.
1286    unsigned NumUses = MIHasUse;
1287    while (i != e && RewriteMIs[i].MI == MI) {
1288      assert(RewriteMIs[i].Index == index);
1289      bool isUse = RewriteMIs[i].HasUse;
1290      if (isUse) ++NumUses;
1291      MIHasUse |= isUse;
1292      MIHasDef |= RewriteMIs[i].HasDef;
1293      ++i;
1294    }
1295    MachineBasicBlock *MBB = MI->getParent();
1296
1297    if (ImpUse && MI != ReMatDefMI) {
1298      // Re-matting an instruction with virtual register use. Update the
1299      // register interval's spill weight to HUGE_VALF to prevent it from
1300      // being spilled.
1301      LiveInterval &ImpLi = getInterval(ImpUse);
1302      ImpLi.weight = HUGE_VALF;
1303    }
1304
1305    unsigned MBBId = MBB->getNumber();
1306    unsigned ThisVReg = 0;
1307    if (TrySplit) {
1308      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1309      if (NVI != MBBVRegsMap.end()) {
1310        ThisVReg = NVI->second;
1311        // One common case:
1312        // x = use
1313        // ...
1314        // ...
1315        // def = ...
1316        //     = use
1317        // It's better to start a new interval to avoid artifically
1318        // extend the new interval.
1319        if (MIHasDef && !MIHasUse) {
1320          MBBVRegsMap.erase(MBB->getNumber());
1321          ThisVReg = 0;
1322        }
1323      }
1324    }
1325
1326    bool IsNew = ThisVReg == 0;
1327    if (IsNew) {
1328      // This ends the previous live interval. If all of its def / use
1329      // can be folded, give it a low spill weight.
1330      if (NewVReg && TrySplit && AllCanFold) {
1331        LiveInterval &nI = getOrCreateInterval(NewVReg);
1332        nI.weight /= 10.0F;
1333      }
1334      AllCanFold = true;
1335    }
1336    NewVReg = ThisVReg;
1337
1338    bool HasDef = false;
1339    bool HasUse = false;
1340    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1341                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1342                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1343                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1344                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1345    if (!HasDef && !HasUse)
1346      continue;
1347
1348    AllCanFold &= CanFold;
1349
1350    // Update weight of spill interval.
1351    LiveInterval &nI = getOrCreateInterval(NewVReg);
1352    if (!TrySplit) {
1353      // The spill weight is now infinity as it cannot be spilled again.
1354      nI.weight = HUGE_VALF;
1355      continue;
1356    }
1357
1358    // Keep track of the last def and first use in each MBB.
1359    if (HasDef) {
1360      if (MI != ReMatOrigDefMI || !CanDelete) {
1361        bool HasKill = false;
1362        if (!HasUse)
1363          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1364        else {
1365          // If this is a two-address code, then this index starts a new VNInfo.
1366          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1367          if (VNI)
1368            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1369        }
1370        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1371          SpillIdxes.find(MBBId);
1372        if (!HasKill) {
1373          if (SII == SpillIdxes.end()) {
1374            std::vector<SRInfo> S;
1375            S.push_back(SRInfo(index, NewVReg, true));
1376            SpillIdxes.insert(std::make_pair(MBBId, S));
1377          } else if (SII->second.back().vreg != NewVReg) {
1378            SII->second.push_back(SRInfo(index, NewVReg, true));
1379          } else if ((int)index > SII->second.back().index) {
1380            // If there is an earlier def and this is a two-address
1381            // instruction, then it's not possible to fold the store (which
1382            // would also fold the load).
1383            SRInfo &Info = SII->second.back();
1384            Info.index = index;
1385            Info.canFold = !HasUse;
1386          }
1387          SpillMBBs.set(MBBId);
1388        } else if (SII != SpillIdxes.end() &&
1389                   SII->second.back().vreg == NewVReg &&
1390                   (int)index > SII->second.back().index) {
1391          // There is an earlier def that's not killed (must be two-address).
1392          // The spill is no longer needed.
1393          SII->second.pop_back();
1394          if (SII->second.empty()) {
1395            SpillIdxes.erase(MBBId);
1396            SpillMBBs.reset(MBBId);
1397          }
1398        }
1399      }
1400    }
1401
1402    if (HasUse) {
1403      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1404        SpillIdxes.find(MBBId);
1405      if (SII != SpillIdxes.end() &&
1406          SII->second.back().vreg == NewVReg &&
1407          (int)index > SII->second.back().index)
1408        // Use(s) following the last def, it's not safe to fold the spill.
1409        SII->second.back().canFold = false;
1410      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1411        RestoreIdxes.find(MBBId);
1412      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1413        // If we are splitting live intervals, only fold if it's the first
1414        // use and there isn't another use later in the MBB.
1415        RII->second.back().canFold = false;
1416      else if (IsNew) {
1417        // Only need a reload if there isn't an earlier def / use.
1418        if (RII == RestoreIdxes.end()) {
1419          std::vector<SRInfo> Infos;
1420          Infos.push_back(SRInfo(index, NewVReg, true));
1421          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1422        } else {
1423          RII->second.push_back(SRInfo(index, NewVReg, true));
1424        }
1425        RestoreMBBs.set(MBBId);
1426      }
1427    }
1428
1429    // Update spill weight.
1430    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1431    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1432  }
1433
1434  if (NewVReg && TrySplit && AllCanFold) {
1435    // If all of its def / use can be folded, give it a low spill weight.
1436    LiveInterval &nI = getOrCreateInterval(NewVReg);
1437    nI.weight /= 10.0F;
1438  }
1439}
1440
1441bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1442                        BitVector &RestoreMBBs,
1443                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1444  if (!RestoreMBBs[Id])
1445    return false;
1446  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1447  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1448    if (Restores[i].index == index &&
1449        Restores[i].vreg == vr &&
1450        Restores[i].canFold)
1451      return true;
1452  return false;
1453}
1454
1455void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1456                        BitVector &RestoreMBBs,
1457                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1458  if (!RestoreMBBs[Id])
1459    return;
1460  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1461  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1462    if (Restores[i].index == index && Restores[i].vreg)
1463      Restores[i].index = -1;
1464}
1465
1466/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1467/// spilled and create empty intervals for their uses.
1468void
1469LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1470                                    const TargetRegisterClass* rc,
1471                                    std::vector<LiveInterval*> &NewLIs) {
1472  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1473         re = mri_->reg_end(); ri != re; ) {
1474    MachineOperand &O = ri.getOperand();
1475    MachineInstr *MI = &*ri;
1476    ++ri;
1477    if (O.isDef()) {
1478      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1479             "Register def was not rewritten?");
1480      RemoveMachineInstrFromMaps(MI);
1481      vrm.RemoveMachineInstrFromMaps(MI);
1482      MI->eraseFromParent();
1483    } else {
1484      // This must be an use of an implicit_def so it's not part of the live
1485      // interval. Create a new empty live interval for it.
1486      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1487      unsigned NewVReg = mri_->createVirtualRegister(rc);
1488      vrm.grow();
1489      vrm.setIsImplicitlyDefined(NewVReg);
1490      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1491      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1492        MachineOperand &MO = MI->getOperand(i);
1493        if (MO.isReg() && MO.getReg() == li.reg)
1494          MO.setReg(NewVReg);
1495      }
1496    }
1497  }
1498}
1499
1500
1501std::vector<LiveInterval*> LiveIntervals::
1502addIntervalsForSpills(const LiveInterval &li,
1503                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1504                      float &SSWeight) {
1505  assert(li.weight != HUGE_VALF &&
1506         "attempt to spill already spilled interval!");
1507
1508  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1509  li.print(DOUT, tri_);
1510  DOUT << '\n';
1511
1512  // Spill slot weight.
1513  SSWeight = 0.0f;
1514
1515  // Each bit specify whether it a spill is required in the MBB.
1516  BitVector SpillMBBs(mf_->getNumBlockIDs());
1517  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1518  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1519  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1520  std::map<unsigned,unsigned> MBBVRegsMap;
1521  std::vector<LiveInterval*> NewLIs;
1522  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1523
1524  unsigned NumValNums = li.getNumValNums();
1525  SmallVector<MachineInstr*, 4> ReMatDefs;
1526  ReMatDefs.resize(NumValNums, NULL);
1527  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1528  ReMatOrigDefs.resize(NumValNums, NULL);
1529  SmallVector<int, 4> ReMatIds;
1530  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1531  BitVector ReMatDelete(NumValNums);
1532  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1533
1534  // Spilling a split live interval. It cannot be split any further. Also,
1535  // it's also guaranteed to be a single val# / range interval.
1536  if (vrm.getPreSplitReg(li.reg)) {
1537    vrm.setIsSplitFromReg(li.reg, 0);
1538    // Unset the split kill marker on the last use.
1539    unsigned KillIdx = vrm.getKillPoint(li.reg);
1540    if (KillIdx) {
1541      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1542      assert(KillMI && "Last use disappeared?");
1543      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1544      assert(KillOp != -1 && "Last use disappeared?");
1545      KillMI->getOperand(KillOp).setIsKill(false);
1546    }
1547    vrm.removeKillPoint(li.reg);
1548    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1549    Slot = vrm.getStackSlot(li.reg);
1550    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1551    MachineInstr *ReMatDefMI = DefIsReMat ?
1552      vrm.getReMaterializedMI(li.reg) : NULL;
1553    int LdSlot = 0;
1554    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1555    bool isLoad = isLoadSS ||
1556      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1557    bool IsFirstRange = true;
1558    for (LiveInterval::Ranges::const_iterator
1559           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1560      // If this is a split live interval with multiple ranges, it means there
1561      // are two-address instructions that re-defined the value. Only the
1562      // first def can be rematerialized!
1563      if (IsFirstRange) {
1564        // Note ReMatOrigDefMI has already been deleted.
1565        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1566                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1567                             false, vrm, rc, ReMatIds, loopInfo,
1568                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1569                             MBBVRegsMap, NewLIs, SSWeight);
1570      } else {
1571        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1572                             Slot, 0, false, false, false,
1573                             false, vrm, rc, ReMatIds, loopInfo,
1574                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1575                             MBBVRegsMap, NewLIs, SSWeight);
1576      }
1577      IsFirstRange = false;
1578    }
1579
1580    SSWeight = 0.0f;  // Already accounted for when split.
1581    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1582    return NewLIs;
1583  }
1584
1585  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1586  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1587    TrySplit = false;
1588  if (TrySplit)
1589    ++numSplits;
1590  bool NeedStackSlot = false;
1591  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1592       i != e; ++i) {
1593    const VNInfo *VNI = *i;
1594    unsigned VN = VNI->id;
1595    unsigned DefIdx = VNI->def;
1596    if (DefIdx == ~1U)
1597      continue; // Dead val#.
1598    // Is the def for the val# rematerializable?
1599    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1600      ? 0 : getInstructionFromIndex(DefIdx);
1601    bool dummy;
1602    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1603      // Remember how to remat the def of this val#.
1604      ReMatOrigDefs[VN] = ReMatDefMI;
1605      // Original def may be modified so we have to make a copy here.
1606      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1607      ClonedMIs.push_back(Clone);
1608      ReMatDefs[VN] = Clone;
1609
1610      bool CanDelete = true;
1611      if (VNI->hasPHIKill) {
1612        // A kill is a phi node, not all of its uses can be rematerialized.
1613        // It must not be deleted.
1614        CanDelete = false;
1615        // Need a stack slot if there is any live range where uses cannot be
1616        // rematerialized.
1617        NeedStackSlot = true;
1618      }
1619      if (CanDelete)
1620        ReMatDelete.set(VN);
1621    } else {
1622      // Need a stack slot if there is any live range where uses cannot be
1623      // rematerialized.
1624      NeedStackSlot = true;
1625    }
1626  }
1627
1628  // One stack slot per live interval.
1629  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1630    Slot = vrm.assignVirt2StackSlot(li.reg);
1631
1632  // Create new intervals and rewrite defs and uses.
1633  for (LiveInterval::Ranges::const_iterator
1634         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1635    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1636    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1637    bool DefIsReMat = ReMatDefMI != NULL;
1638    bool CanDelete = ReMatDelete[I->valno->id];
1639    int LdSlot = 0;
1640    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1641    bool isLoad = isLoadSS ||
1642      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1643    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1644                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1645                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1646                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1647                               MBBVRegsMap, NewLIs, SSWeight);
1648  }
1649
1650  // Insert spills / restores if we are splitting.
1651  if (!TrySplit) {
1652    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1653    return NewLIs;
1654  }
1655
1656  SmallPtrSet<LiveInterval*, 4> AddedKill;
1657  SmallVector<unsigned, 2> Ops;
1658  if (NeedStackSlot) {
1659    int Id = SpillMBBs.find_first();
1660    while (Id != -1) {
1661      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1662      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1663      std::vector<SRInfo> &spills = SpillIdxes[Id];
1664      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1665        int index = spills[i].index;
1666        unsigned VReg = spills[i].vreg;
1667        LiveInterval &nI = getOrCreateInterval(VReg);
1668        bool isReMat = vrm.isReMaterialized(VReg);
1669        MachineInstr *MI = getInstructionFromIndex(index);
1670        bool CanFold = false;
1671        bool FoundUse = false;
1672        Ops.clear();
1673        if (spills[i].canFold) {
1674          CanFold = true;
1675          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1676            MachineOperand &MO = MI->getOperand(j);
1677            if (!MO.isRegister() || MO.getReg() != VReg)
1678              continue;
1679
1680            Ops.push_back(j);
1681            if (MO.isDef())
1682              continue;
1683            if (isReMat ||
1684                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1685                                                RestoreMBBs, RestoreIdxes))) {
1686              // MI has two-address uses of the same register. If the use
1687              // isn't the first and only use in the BB, then we can't fold
1688              // it. FIXME: Move this to rewriteInstructionsForSpills.
1689              CanFold = false;
1690              break;
1691            }
1692            FoundUse = true;
1693          }
1694        }
1695        // Fold the store into the def if possible.
1696        bool Folded = false;
1697        if (CanFold && !Ops.empty()) {
1698          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1699            Folded = true;
1700            if (FoundUse > 0) {
1701              // Also folded uses, do not issue a load.
1702              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1703              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1704            }
1705            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1706          }
1707        }
1708
1709        // Otherwise tell the spiller to issue a spill.
1710        if (!Folded) {
1711          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1712          bool isKill = LR->end == getStoreIndex(index);
1713          if (!MI->registerDefIsDead(nI.reg))
1714            // No need to spill a dead def.
1715            vrm.addSpillPoint(VReg, isKill, MI);
1716          if (isKill)
1717            AddedKill.insert(&nI);
1718        }
1719
1720        // Update spill slot weight.
1721        if (!isReMat)
1722          SSWeight += getSpillWeight(true, false, loopDepth);
1723      }
1724      Id = SpillMBBs.find_next(Id);
1725    }
1726  }
1727
1728  int Id = RestoreMBBs.find_first();
1729  while (Id != -1) {
1730    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1731    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1732
1733    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1734    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1735      int index = restores[i].index;
1736      if (index == -1)
1737        continue;
1738      unsigned VReg = restores[i].vreg;
1739      LiveInterval &nI = getOrCreateInterval(VReg);
1740      bool isReMat = vrm.isReMaterialized(VReg);
1741      MachineInstr *MI = getInstructionFromIndex(index);
1742      bool CanFold = false;
1743      Ops.clear();
1744      if (restores[i].canFold) {
1745        CanFold = true;
1746        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1747          MachineOperand &MO = MI->getOperand(j);
1748          if (!MO.isRegister() || MO.getReg() != VReg)
1749            continue;
1750
1751          if (MO.isDef()) {
1752            // If this restore were to be folded, it would have been folded
1753            // already.
1754            CanFold = false;
1755            break;
1756          }
1757          Ops.push_back(j);
1758        }
1759      }
1760
1761      // Fold the load into the use if possible.
1762      bool Folded = false;
1763      if (CanFold && !Ops.empty()) {
1764        if (!isReMat)
1765          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1766        else {
1767          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1768          int LdSlot = 0;
1769          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1770          // If the rematerializable def is a load, also try to fold it.
1771          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1772            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1773                                          Ops, isLoadSS, LdSlot, VReg);
1774          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1775          if (ImpUse) {
1776            // Re-matting an instruction with virtual register use. Add the
1777            // register as an implicit use on the use MI and update the register
1778            // interval's spill weight to HUGE_VALF to prevent it from being
1779            // spilled.
1780            LiveInterval &ImpLi = getInterval(ImpUse);
1781            ImpLi.weight = HUGE_VALF;
1782            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1783          }
1784        }
1785      }
1786      // If folding is not possible / failed, then tell the spiller to issue a
1787      // load / rematerialization for us.
1788      if (Folded)
1789        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1790      else
1791        vrm.addRestorePoint(VReg, MI);
1792
1793      // Update spill slot weight.
1794      if (!isReMat)
1795        SSWeight += getSpillWeight(false, true, loopDepth);
1796    }
1797    Id = RestoreMBBs.find_next(Id);
1798  }
1799
1800  // Finalize intervals: add kills, finalize spill weights, and filter out
1801  // dead intervals.
1802  std::vector<LiveInterval*> RetNewLIs;
1803  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1804    LiveInterval *LI = NewLIs[i];
1805    if (!LI->empty()) {
1806      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1807      if (!AddedKill.count(LI)) {
1808        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1809        unsigned LastUseIdx = getBaseIndex(LR->end);
1810        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1811        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1812        assert(UseIdx != -1);
1813        if (LastUse->getOperand(UseIdx).isImplicit() ||
1814            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1815          LastUse->getOperand(UseIdx).setIsKill();
1816          vrm.addKillPoint(LI->reg, LastUseIdx);
1817        }
1818      }
1819      RetNewLIs.push_back(LI);
1820    }
1821  }
1822
1823  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1824  return RetNewLIs;
1825}
1826
1827/// hasAllocatableSuperReg - Return true if the specified physical register has
1828/// any super register that's allocatable.
1829bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1830  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1831    if (allocatableRegs_[*AS] && hasInterval(*AS))
1832      return true;
1833  return false;
1834}
1835
1836/// getRepresentativeReg - Find the largest super register of the specified
1837/// physical register.
1838unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1839  // Find the largest super-register that is allocatable.
1840  unsigned BestReg = Reg;
1841  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1842    unsigned SuperReg = *AS;
1843    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1844      BestReg = SuperReg;
1845      break;
1846    }
1847  }
1848  return BestReg;
1849}
1850
1851/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1852/// specified interval that conflicts with the specified physical register.
1853unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1854                                                   unsigned PhysReg) const {
1855  unsigned NumConflicts = 0;
1856  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1857  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1858         E = mri_->reg_end(); I != E; ++I) {
1859    MachineOperand &O = I.getOperand();
1860    MachineInstr *MI = O.getParent();
1861    unsigned Index = getInstructionIndex(MI);
1862    if (pli.liveAt(Index))
1863      ++NumConflicts;
1864  }
1865  return NumConflicts;
1866}
1867
1868/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1869/// around all defs and uses of the specified interval.
1870void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1871                                            unsigned PhysReg, VirtRegMap &vrm) {
1872  unsigned SpillReg = getRepresentativeReg(PhysReg);
1873
1874  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1875    // If there are registers which alias PhysReg, but which are not a
1876    // sub-register of the chosen representative super register. Assert
1877    // since we can't handle it yet.
1878    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1879           tri_->isSuperRegister(*AS, SpillReg));
1880
1881  LiveInterval &pli = getInterval(SpillReg);
1882  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1883  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1884         E = mri_->reg_end(); I != E; ++I) {
1885    MachineOperand &O = I.getOperand();
1886    MachineInstr *MI = O.getParent();
1887    if (SeenMIs.count(MI))
1888      continue;
1889    SeenMIs.insert(MI);
1890    unsigned Index = getInstructionIndex(MI);
1891    if (pli.liveAt(Index)) {
1892      vrm.addEmergencySpill(SpillReg, MI);
1893      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1894      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1895        if (!hasInterval(*AS))
1896          continue;
1897        LiveInterval &spli = getInterval(*AS);
1898        if (spli.liveAt(Index))
1899          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1900      }
1901    }
1902  }
1903}
1904
1905LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1906                                                   MachineInstr* startInst) {
1907  LiveInterval& Interval = getOrCreateInterval(reg);
1908  VNInfo* VN = Interval.getNextValue(
1909            getInstructionIndex(startInst) + InstrSlots::DEF,
1910            startInst, getVNInfoAllocator());
1911  VN->hasPHIKill = true;
1912  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1913  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1914               getMBBEndIdx(startInst->getParent()) + 1, VN);
1915  Interval.addRange(LR);
1916
1917  return LR;
1918}
1919