LiveIntervalAnalysis.cpp revision 320c630c1b55e17fa00249d499f974cb1a4238f8
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/SSARegMap.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(PHIEliminationID);
64  AU.addRequiredID(PHIEliminationID);
65  AU.addRequiredID(TwoAddressInstructionPassID);
66  MachineFunctionPass::getAnalysisUsage(AU);
67}
68
69void LiveIntervals::releaseMemory() {
70  Idx2MBBMap.clear();
71  mi2iMap_.clear();
72  i2miMap_.clear();
73  r2iMap_.clear();
74  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
75  VNInfoAllocator.Reset();
76  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
77    delete ClonedMIs[i];
78}
79
80namespace llvm {
81  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
82    return V < IM.first;
83  }
84
85  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
86    return IM.first < V;
87  }
88
89  struct Idx2MBBCompare {
90    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
91      return LHS.first < RHS.first;
92    }
93  };
94}
95
96/// runOnMachineFunction - Register allocate the whole function
97///
98bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
99  mf_ = &fn;
100  tm_ = &fn.getTarget();
101  mri_ = tm_->getRegisterInfo();
102  tii_ = tm_->getInstrInfo();
103  lv_ = &getAnalysis<LiveVariables>();
104  allocatableRegs_ = mri_->getAllocatableSet(fn);
105
106  // Number MachineInstrs and MachineBasicBlocks.
107  // Initialize MBB indexes to a sentinal.
108  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
109
110  unsigned MIIndex = 0;
111  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
112       MBB != E; ++MBB) {
113    unsigned StartIdx = MIIndex;
114
115    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
116         I != E; ++I) {
117      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
118      assert(inserted && "multiple MachineInstr -> index mappings");
119      i2miMap_.push_back(I);
120      MIIndex += InstrSlots::NUM;
121    }
122
123    // Set the MBB2IdxMap entry for this MBB.
124    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
125    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
126  }
127  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
128
129  computeIntervals();
130
131  numIntervals += getNumIntervals();
132
133  DOUT << "********** INTERVALS **********\n";
134  for (iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, mri_);
136    DOUT << "\n";
137  }
138
139  numIntervalsAfter += getNumIntervals();
140  DEBUG(dump());
141  return true;
142}
143
144/// print - Implement the dump method.
145void LiveIntervals::print(std::ostream &O, const Module* ) const {
146  O << "********** INTERVALS **********\n";
147  for (const_iterator I = begin(), E = end(); I != E; ++I) {
148    I->second.print(DOUT, mri_);
149    DOUT << "\n";
150  }
151
152  O << "********** MACHINEINSTRS **********\n";
153  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
154       mbbi != mbbe; ++mbbi) {
155    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
156    for (MachineBasicBlock::iterator mii = mbbi->begin(),
157           mie = mbbi->end(); mii != mie; ++mii) {
158      O << getInstructionIndex(mii) << '\t' << *mii;
159    }
160  }
161}
162
163/// conflictsWithPhysRegDef - Returns true if the specified register
164/// is defined during the duration of the specified interval.
165bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
166                                            VirtRegMap &vrm, unsigned reg) {
167  for (LiveInterval::Ranges::const_iterator
168         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
169    for (unsigned index = getBaseIndex(I->start),
170           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
171         index += InstrSlots::NUM) {
172      // skip deleted instructions
173      while (index != end && !getInstructionFromIndex(index))
174        index += InstrSlots::NUM;
175      if (index == end) break;
176
177      MachineInstr *MI = getInstructionFromIndex(index);
178      unsigned SrcReg, DstReg;
179      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
180        if (SrcReg == li.reg || DstReg == li.reg)
181          continue;
182      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
183        MachineOperand& mop = MI->getOperand(i);
184        if (!mop.isRegister())
185          continue;
186        unsigned PhysReg = mop.getReg();
187        if (PhysReg == 0 || PhysReg == li.reg)
188          continue;
189        if (MRegisterInfo::isVirtualRegister(PhysReg)) {
190          if (!vrm.hasPhys(PhysReg))
191            continue;
192          PhysReg = vrm.getPhys(PhysReg);
193        }
194        if (PhysReg && mri_->regsOverlap(PhysReg, reg))
195          return true;
196      }
197    }
198  }
199
200  return false;
201}
202
203void LiveIntervals::printRegName(unsigned reg) const {
204  if (MRegisterInfo::isPhysicalRegister(reg))
205    cerr << mri_->getName(reg);
206  else
207    cerr << "%reg" << reg;
208}
209
210void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
211                                             MachineBasicBlock::iterator mi,
212                                             unsigned MIIdx,
213                                             LiveInterval &interval) {
214  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
215  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
216
217  // Virtual registers may be defined multiple times (due to phi
218  // elimination and 2-addr elimination).  Much of what we do only has to be
219  // done once for the vreg.  We use an empty interval to detect the first
220  // time we see a vreg.
221  if (interval.empty()) {
222    // Get the Idx of the defining instructions.
223    unsigned defIndex = getDefIndex(MIIdx);
224    VNInfo *ValNo;
225    unsigned SrcReg, DstReg;
226    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
227      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
228    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
229      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
230                                    VNInfoAllocator);
231    else
232      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
233
234    assert(ValNo->id == 0 && "First value in interval is not 0?");
235
236    // Loop over all of the blocks that the vreg is defined in.  There are
237    // two cases we have to handle here.  The most common case is a vreg
238    // whose lifetime is contained within a basic block.  In this case there
239    // will be a single kill, in MBB, which comes after the definition.
240    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
241      // FIXME: what about dead vars?
242      unsigned killIdx;
243      if (vi.Kills[0] != mi)
244        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
245      else
246        killIdx = defIndex+1;
247
248      // If the kill happens after the definition, we have an intra-block
249      // live range.
250      if (killIdx > defIndex) {
251        assert(vi.AliveBlocks.none() &&
252               "Shouldn't be alive across any blocks!");
253        LiveRange LR(defIndex, killIdx, ValNo);
254        interval.addRange(LR);
255        DOUT << " +" << LR << "\n";
256        interval.addKill(ValNo, killIdx);
257        return;
258      }
259    }
260
261    // The other case we handle is when a virtual register lives to the end
262    // of the defining block, potentially live across some blocks, then is
263    // live into some number of blocks, but gets killed.  Start by adding a
264    // range that goes from this definition to the end of the defining block.
265    LiveRange NewLR(defIndex,
266                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
267                    ValNo);
268    DOUT << " +" << NewLR;
269    interval.addRange(NewLR);
270
271    // Iterate over all of the blocks that the variable is completely
272    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
273    // live interval.
274    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
275      if (vi.AliveBlocks[i]) {
276        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
277        if (!MBB->empty()) {
278          LiveRange LR(getMBBStartIdx(i),
279                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
280                       ValNo);
281          interval.addRange(LR);
282          DOUT << " +" << LR;
283        }
284      }
285    }
286
287    // Finally, this virtual register is live from the start of any killing
288    // block to the 'use' slot of the killing instruction.
289    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
290      MachineInstr *Kill = vi.Kills[i];
291      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
292      LiveRange LR(getMBBStartIdx(Kill->getParent()),
293                   killIdx, ValNo);
294      interval.addRange(LR);
295      interval.addKill(ValNo, killIdx);
296      DOUT << " +" << LR;
297    }
298
299  } else {
300    // If this is the second time we see a virtual register definition, it
301    // must be due to phi elimination or two addr elimination.  If this is
302    // the result of two address elimination, then the vreg is one of the
303    // def-and-use register operand.
304    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
305      // If this is a two-address definition, then we have already processed
306      // the live range.  The only problem is that we didn't realize there
307      // are actually two values in the live interval.  Because of this we
308      // need to take the LiveRegion that defines this register and split it
309      // into two values.
310      unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst));
311      unsigned RedefIndex = getDefIndex(MIIdx);
312
313      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
314      VNInfo *OldValNo = OldLR->valno;
315      unsigned OldEnd = OldLR->end;
316
317      // Delete the initial value, which should be short and continuous,
318      // because the 2-addr copy must be in the same MBB as the redef.
319      interval.removeRange(DefIndex, RedefIndex);
320
321      // Two-address vregs should always only be redefined once.  This means
322      // that at this point, there should be exactly one value number in it.
323      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
324
325      // The new value number (#1) is defined by the instruction we claimed
326      // defined value #0.
327      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
328      interval.copyValNumInfo(ValNo, OldValNo);
329
330      // Value#0 is now defined by the 2-addr instruction.
331      OldValNo->def = RedefIndex;
332      OldValNo->reg = 0;
333
334      // Add the new live interval which replaces the range for the input copy.
335      LiveRange LR(DefIndex, RedefIndex, ValNo);
336      DOUT << " replace range with " << LR;
337      interval.addRange(LR);
338      interval.addKill(ValNo, RedefIndex);
339      interval.removeKills(ValNo, RedefIndex, OldEnd);
340
341      // If this redefinition is dead, we need to add a dummy unit live
342      // range covering the def slot.
343      if (lv_->RegisterDefIsDead(mi, interval.reg))
344        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
345
346      DOUT << " RESULT: ";
347      interval.print(DOUT, mri_);
348
349    } else {
350      // Otherwise, this must be because of phi elimination.  If this is the
351      // first redefinition of the vreg that we have seen, go back and change
352      // the live range in the PHI block to be a different value number.
353      if (interval.containsOneValue()) {
354        assert(vi.Kills.size() == 1 &&
355               "PHI elimination vreg should have one kill, the PHI itself!");
356
357        // Remove the old range that we now know has an incorrect number.
358        VNInfo *VNI = interval.getValNumInfo(0);
359        MachineInstr *Killer = vi.Kills[0];
360        unsigned Start = getMBBStartIdx(Killer->getParent());
361        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
362        DOUT << " Removing [" << Start << "," << End << "] from: ";
363        interval.print(DOUT, mri_); DOUT << "\n";
364        interval.removeRange(Start, End);
365        interval.addKill(VNI, Start);
366        VNI->hasPHIKill = true;
367        DOUT << " RESULT: "; interval.print(DOUT, mri_);
368
369        // Replace the interval with one of a NEW value number.  Note that this
370        // value number isn't actually defined by an instruction, weird huh? :)
371        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
372        DOUT << " replace range with " << LR;
373        interval.addRange(LR);
374        interval.addKill(LR.valno, End);
375        DOUT << " RESULT: "; interval.print(DOUT, mri_);
376      }
377
378      // In the case of PHI elimination, each variable definition is only
379      // live until the end of the block.  We've already taken care of the
380      // rest of the live range.
381      unsigned defIndex = getDefIndex(MIIdx);
382
383      VNInfo *ValNo;
384      unsigned SrcReg, DstReg;
385      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
386        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
387      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
388        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
389                                      VNInfoAllocator);
390      else
391        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
392
393      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
394      LiveRange LR(defIndex, killIndex, ValNo);
395      interval.addRange(LR);
396      interval.addKill(ValNo, killIndex);
397      ValNo->hasPHIKill = true;
398      DOUT << " +" << LR;
399    }
400  }
401
402  DOUT << '\n';
403}
404
405void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
406                                              MachineBasicBlock::iterator mi,
407                                              unsigned MIIdx,
408                                              LiveInterval &interval,
409                                              unsigned SrcReg) {
410  // A physical register cannot be live across basic block, so its
411  // lifetime must end somewhere in its defining basic block.
412  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
413
414  unsigned baseIndex = MIIdx;
415  unsigned start = getDefIndex(baseIndex);
416  unsigned end = start;
417
418  // If it is not used after definition, it is considered dead at
419  // the instruction defining it. Hence its interval is:
420  // [defSlot(def), defSlot(def)+1)
421  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
422    DOUT << " dead";
423    end = getDefIndex(start) + 1;
424    goto exit;
425  }
426
427  // If it is not dead on definition, it must be killed by a
428  // subsequent instruction. Hence its interval is:
429  // [defSlot(def), useSlot(kill)+1)
430  while (++mi != MBB->end()) {
431    baseIndex += InstrSlots::NUM;
432    if (lv_->KillsRegister(mi, interval.reg)) {
433      DOUT << " killed";
434      end = getUseIndex(baseIndex) + 1;
435      goto exit;
436    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
437      // Another instruction redefines the register before it is ever read.
438      // Then the register is essentially dead at the instruction that defines
439      // it. Hence its interval is:
440      // [defSlot(def), defSlot(def)+1)
441      DOUT << " dead";
442      end = getDefIndex(start) + 1;
443      goto exit;
444    }
445  }
446
447  // The only case we should have a dead physreg here without a killing or
448  // instruction where we know it's dead is if it is live-in to the function
449  // and never used.
450  assert(!SrcReg && "physreg was not killed in defining block!");
451  end = getDefIndex(start) + 1;  // It's dead.
452
453exit:
454  assert(start < end && "did not find end of interval?");
455
456  // Already exists? Extend old live interval.
457  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
458  VNInfo *ValNo = (OldLR != interval.end())
459    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
460  LiveRange LR(start, end, ValNo);
461  interval.addRange(LR);
462  interval.addKill(LR.valno, end);
463  DOUT << " +" << LR << '\n';
464}
465
466void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
467                                      MachineBasicBlock::iterator MI,
468                                      unsigned MIIdx,
469                                      unsigned reg) {
470  if (MRegisterInfo::isVirtualRegister(reg))
471    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
472  else if (allocatableRegs_[reg]) {
473    unsigned SrcReg, DstReg;
474    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
475      SrcReg = MI->getOperand(1).getReg();
476    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
477      SrcReg = 0;
478    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
479    // Def of a register also defines its sub-registers.
480    for (const unsigned* AS = mri_->getSubRegisters(reg); *AS; ++AS)
481      // Avoid processing some defs more than once.
482      if (!MI->findRegisterDefOperand(*AS))
483        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
484  }
485}
486
487void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
488                                         unsigned MIIdx,
489                                         LiveInterval &interval, bool isAlias) {
490  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
491
492  // Look for kills, if it reaches a def before it's killed, then it shouldn't
493  // be considered a livein.
494  MachineBasicBlock::iterator mi = MBB->begin();
495  unsigned baseIndex = MIIdx;
496  unsigned start = baseIndex;
497  unsigned end = start;
498  while (mi != MBB->end()) {
499    if (lv_->KillsRegister(mi, interval.reg)) {
500      DOUT << " killed";
501      end = getUseIndex(baseIndex) + 1;
502      goto exit;
503    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
504      // Another instruction redefines the register before it is ever read.
505      // Then the register is essentially dead at the instruction that defines
506      // it. Hence its interval is:
507      // [defSlot(def), defSlot(def)+1)
508      DOUT << " dead";
509      end = getDefIndex(start) + 1;
510      goto exit;
511    }
512
513    baseIndex += InstrSlots::NUM;
514    ++mi;
515  }
516
517exit:
518  // Live-in register might not be used at all.
519  if (end == MIIdx) {
520    if (isAlias) {
521      DOUT << " dead";
522      end = getDefIndex(MIIdx) + 1;
523    } else {
524      DOUT << " live through";
525      end = baseIndex;
526    }
527  }
528
529  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
530  interval.addRange(LR);
531  interval.addKill(LR.valno, end);
532  DOUT << " +" << LR << '\n';
533}
534
535/// computeIntervals - computes the live intervals for virtual
536/// registers. for some ordering of the machine instructions [1,N] a
537/// live interval is an interval [i, j) where 1 <= i <= j < N for
538/// which a variable is live
539void LiveIntervals::computeIntervals() {
540  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
541       << "********** Function: "
542       << ((Value*)mf_->getFunction())->getName() << '\n';
543  // Track the index of the current machine instr.
544  unsigned MIIndex = 0;
545  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
546       MBBI != E; ++MBBI) {
547    MachineBasicBlock *MBB = MBBI;
548    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
549
550    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
551
552    // Create intervals for live-ins to this BB first.
553    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
554           LE = MBB->livein_end(); LI != LE; ++LI) {
555      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
556      // Multiple live-ins can alias the same register.
557      for (const unsigned* AS = mri_->getSubRegisters(*LI); *AS; ++AS)
558        if (!hasInterval(*AS))
559          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
560                               true);
561    }
562
563    for (; MI != miEnd; ++MI) {
564      DOUT << MIIndex << "\t" << *MI;
565
566      // Handle defs.
567      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
568        MachineOperand &MO = MI->getOperand(i);
569        // handle register defs - build intervals
570        if (MO.isRegister() && MO.getReg() && MO.isDef())
571          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
572      }
573
574      MIIndex += InstrSlots::NUM;
575    }
576  }
577}
578
579bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
580                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
581  std::vector<IdxMBBPair>::const_iterator I =
582    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
583
584  bool ResVal = false;
585  while (I != Idx2MBBMap.end()) {
586    if (LR.end <= I->first)
587      break;
588    MBBs.push_back(I->second);
589    ResVal = true;
590    ++I;
591  }
592  return ResVal;
593}
594
595
596LiveInterval LiveIntervals::createInterval(unsigned reg) {
597  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
598                       HUGE_VALF : 0.0F;
599  return LiveInterval(reg, Weight);
600}
601
602
603//===----------------------------------------------------------------------===//
604// Register allocator hooks.
605//
606
607/// isReMaterializable - Returns true if the definition MI of the specified
608/// val# of the specified interval is re-materializable.
609bool LiveIntervals::isReMaterializable(const LiveInterval &li,
610                                       const VNInfo *ValNo, MachineInstr *MI,
611                                       bool &isLoad) {
612  if (DisableReMat)
613    return false;
614
615  isLoad = false;
616  if (tii_->hasNoSideEffects(MI)) {
617    isLoad = MI->getInstrDescriptor()->Flags & M_LOAD_FLAG;
618    return true;
619  }
620
621  int FrameIdx = 0;
622  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
623      !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
624    return false;
625
626  // This is a load from fixed stack slot. It can be rematerialized unless it's
627  // re-defined by a two-address instruction.
628  isLoad = true;
629  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
630       i != e; ++i) {
631    const VNInfo *VNI = *i;
632    if (VNI == ValNo)
633      continue;
634    unsigned DefIdx = VNI->def;
635    if (DefIdx == ~1U)
636      continue; // Dead val#.
637    MachineInstr *DefMI = (DefIdx == ~0u)
638      ? NULL : getInstructionFromIndex(DefIdx);
639    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
640      isLoad = false;
641      return false;
642    }
643  }
644  return true;
645}
646
647/// isReMaterializable - Returns true if every definition of MI of every
648/// val# of the specified interval is re-materializable.
649bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
650  isLoad = false;
651  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
652       i != e; ++i) {
653    const VNInfo *VNI = *i;
654    unsigned DefIdx = VNI->def;
655    if (DefIdx == ~1U)
656      continue; // Dead val#.
657    // Is the def for the val# rematerializable?
658    if (DefIdx == ~0u)
659      return false;
660    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
661    bool DefIsLoad = false;
662    if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
663      return false;
664    isLoad |= DefIsLoad;
665  }
666  return true;
667}
668
669/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
670/// slot / to reg or any rematerialized load into ith operand of specified
671/// MI. If it is successul, MI is updated with the newly created MI and
672/// returns true.
673bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
674                                         VirtRegMap &vrm, MachineInstr *DefMI,
675                                         unsigned InstrIdx,
676                                         SmallVector<unsigned, 2> &Ops,
677                                         bool isSS, int Slot, unsigned Reg) {
678  unsigned MRInfo = 0;
679  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
680  SmallVector<unsigned, 2> FoldOps;
681  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
682    unsigned OpIdx = Ops[i];
683    // FIXME: fold subreg use.
684    if (MI->getOperand(OpIdx).getSubReg())
685      return false;
686    if (MI->getOperand(OpIdx).isDef())
687      MRInfo |= (unsigned)VirtRegMap::isMod;
688    else {
689      // Filter out two-address use operand(s).
690      if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
691        MRInfo = VirtRegMap::isModRef;
692        continue;
693      }
694      MRInfo |= (unsigned)VirtRegMap::isRef;
695    }
696    FoldOps.push_back(OpIdx);
697  }
698
699  MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
700                           : mri_->foldMemoryOperand(MI, FoldOps, DefMI);
701  if (fmi) {
702    // Attempt to fold the memory reference into the instruction. If
703    // we can do this, we don't need to insert spill code.
704    if (lv_)
705      lv_->instructionChanged(MI, fmi);
706    else
707      LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
708    MachineBasicBlock &MBB = *MI->getParent();
709    if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
710      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
711    vrm.transferSpillPts(MI, fmi);
712    vrm.transferRestorePts(MI, fmi);
713    mi2iMap_.erase(MI);
714    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
715    mi2iMap_[fmi] = InstrIdx;
716    MI = MBB.insert(MBB.erase(MI), fmi);
717    ++numFolds;
718    return true;
719  }
720  return false;
721}
722
723/// canFoldMemoryOperand - Returns true if the specified load / store
724/// folding is possible.
725bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
726                                         SmallVector<unsigned, 2> &Ops) const {
727  SmallVector<unsigned, 2> FoldOps;
728  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
729    unsigned OpIdx = Ops[i];
730    // FIXME: fold subreg use.
731    if (MI->getOperand(OpIdx).getSubReg())
732      return false;
733    FoldOps.push_back(OpIdx);
734  }
735
736  return mri_->canFoldMemoryOperand(MI, FoldOps);
737}
738
739bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
740  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
741  for (LiveInterval::Ranges::const_iterator
742         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
743    std::vector<IdxMBBPair>::const_iterator II =
744      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
745    if (II == Idx2MBBMap.end())
746      continue;
747    if (I->end > II->first)  // crossing a MBB.
748      return false;
749    MBBs.insert(II->second);
750    if (MBBs.size() > 1)
751      return false;
752  }
753  return true;
754}
755
756/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
757/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
758bool LiveIntervals::
759rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
760                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
761                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
762                 unsigned Slot, int LdSlot,
763                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
764                 VirtRegMap &vrm, SSARegMap *RegMap,
765                 const TargetRegisterClass* rc,
766                 SmallVector<int, 4> &ReMatIds,
767                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
768                 const LoopInfo *loopInfo,
769                 std::map<unsigned,unsigned> &MBBVRegsMap,
770                 std::vector<LiveInterval*> &NewLIs) {
771  bool CanFold = false;
772 RestartInstruction:
773  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
774    MachineOperand& mop = MI->getOperand(i);
775    if (!mop.isRegister())
776      continue;
777    unsigned Reg = mop.getReg();
778    unsigned RegI = Reg;
779    if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg))
780      continue;
781    if (Reg != li.reg)
782      continue;
783
784    bool TryFold = !DefIsReMat;
785    bool FoldSS = true; // Default behavior unless it's a remat.
786    int FoldSlot = Slot;
787    if (DefIsReMat) {
788      // If this is the rematerializable definition MI itself and
789      // all of its uses are rematerialized, simply delete it.
790      if (MI == ReMatOrigDefMI && CanDelete) {
791        DOUT << "\t\t\t\tErasing re-materlizable def: ";
792        DOUT << MI << '\n';
793        RemoveMachineInstrFromMaps(MI);
794        vrm.RemoveMachineInstrFromMaps(MI);
795        MI->eraseFromParent();
796        break;
797      }
798
799      // If def for this use can't be rematerialized, then try folding.
800      // If def is rematerializable and it's a load, also try folding.
801      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
802      if (isLoad) {
803        // Try fold loads (from stack slot, constant pool, etc.) into uses.
804        FoldSS = isLoadSS;
805        FoldSlot = LdSlot;
806      }
807    }
808
809    // Scan all of the operands of this instruction rewriting operands
810    // to use NewVReg instead of li.reg as appropriate.  We do this for
811    // two reasons:
812    //
813    //   1. If the instr reads the same spilled vreg multiple times, we
814    //      want to reuse the NewVReg.
815    //   2. If the instr is a two-addr instruction, we are required to
816    //      keep the src/dst regs pinned.
817    //
818    // Keep track of whether we replace a use and/or def so that we can
819    // create the spill interval with the appropriate range.
820
821    HasUse = mop.isUse();
822    HasDef = mop.isDef();
823    SmallVector<unsigned, 2> Ops;
824    Ops.push_back(i);
825    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
826      const MachineOperand &MOj = MI->getOperand(j);
827      if (!MOj.isRegister())
828        continue;
829      unsigned RegJ = MOj.getReg();
830      if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
831        continue;
832      if (RegJ == RegI) {
833        Ops.push_back(j);
834        HasUse |= MOj.isUse();
835        HasDef |= MOj.isDef();
836      }
837    }
838
839    if (TryFold) {
840      // Do not fold load / store here if we are splitting. We'll find an
841      // optimal point to insert a load / store later.
842      if (!TrySplit) {
843        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
844                                 Ops, FoldSS, FoldSlot, Reg)) {
845          // Folding the load/store can completely change the instruction in
846          // unpredictable ways, rescan it from the beginning.
847          HasUse = false;
848          HasDef = false;
849          CanFold = false;
850          goto RestartInstruction;
851        }
852      } else {
853        CanFold = canFoldMemoryOperand(MI, Ops);
854      }
855    } else CanFold = false;
856
857    // Create a new virtual register for the spill interval.
858    bool CreatedNewVReg = false;
859    if (NewVReg == 0) {
860      NewVReg = RegMap->createVirtualRegister(rc);
861      vrm.grow();
862      CreatedNewVReg = true;
863    }
864    mop.setReg(NewVReg);
865
866    // Reuse NewVReg for other reads.
867    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
868      MI->getOperand(Ops[j]).setReg(NewVReg);
869
870    if (CreatedNewVReg) {
871      if (DefIsReMat) {
872        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
873        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
874          // Each valnum may have its own remat id.
875          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
876        } else {
877          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
878        }
879        if (!CanDelete || (HasUse && HasDef)) {
880          // If this is a two-addr instruction then its use operands are
881          // rematerializable but its def is not. It should be assigned a
882          // stack slot.
883          vrm.assignVirt2StackSlot(NewVReg, Slot);
884        }
885      } else {
886        vrm.assignVirt2StackSlot(NewVReg, Slot);
887      }
888    } else if (HasUse && HasDef &&
889               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
890      // If this interval hasn't been assigned a stack slot (because earlier
891      // def is a deleted remat def), do it now.
892      assert(Slot != VirtRegMap::NO_STACK_SLOT);
893      vrm.assignVirt2StackSlot(NewVReg, Slot);
894    }
895
896    // create a new register interval for this spill / remat.
897    LiveInterval &nI = getOrCreateInterval(NewVReg);
898    if (CreatedNewVReg) {
899      NewLIs.push_back(&nI);
900      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
901      if (TrySplit)
902        vrm.setIsSplitFromReg(NewVReg, li.reg);
903    }
904
905    if (HasUse) {
906      if (CreatedNewVReg) {
907        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
908                     nI.getNextValue(~0U, 0, VNInfoAllocator));
909        DOUT << " +" << LR;
910        nI.addRange(LR);
911      } else {
912        // Extend the split live interval to this def / use.
913        unsigned End = getUseIndex(index)+1;
914        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
915                     nI.getValNumInfo(nI.getNumValNums()-1));
916        DOUT << " +" << LR;
917        nI.addRange(LR);
918      }
919    }
920    if (HasDef) {
921      LiveRange LR(getDefIndex(index), getStoreIndex(index),
922                   nI.getNextValue(~0U, 0, VNInfoAllocator));
923      DOUT << " +" << LR;
924      nI.addRange(LR);
925    }
926
927    DOUT << "\t\t\t\tAdded new interval: ";
928    nI.print(DOUT, mri_);
929    DOUT << '\n';
930  }
931  return CanFold;
932}
933bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
934                                   const VNInfo *VNI,
935                                   MachineBasicBlock *MBB, unsigned Idx) const {
936  unsigned End = getMBBEndIdx(MBB);
937  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
938    unsigned KillIdx = VNI->kills[j];
939    if (KillIdx > Idx && KillIdx < End)
940      return true;
941  }
942  return false;
943}
944
945static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
946  const VNInfo *VNI = NULL;
947  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
948         e = li.vni_end(); i != e; ++i)
949    if ((*i)->def == DefIdx) {
950      VNI = *i;
951      break;
952    }
953  return VNI;
954}
955
956void LiveIntervals::
957rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
958                    LiveInterval::Ranges::const_iterator &I,
959                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
960                    unsigned Slot, int LdSlot,
961                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
962                    VirtRegMap &vrm, SSARegMap *RegMap,
963                    const TargetRegisterClass* rc,
964                    SmallVector<int, 4> &ReMatIds,
965                    const LoopInfo *loopInfo,
966                    BitVector &SpillMBBs,
967                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
968                    BitVector &RestoreMBBs,
969                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
970                    std::map<unsigned,unsigned> &MBBVRegsMap,
971                    std::vector<LiveInterval*> &NewLIs) {
972  bool AllCanFold = true;
973  unsigned NewVReg = 0;
974  unsigned index = getBaseIndex(I->start);
975  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
976  for (; index != end; index += InstrSlots::NUM) {
977    // skip deleted instructions
978    while (index != end && !getInstructionFromIndex(index))
979      index += InstrSlots::NUM;
980    if (index == end) break;
981
982    MachineInstr *MI = getInstructionFromIndex(index);
983    MachineBasicBlock *MBB = MI->getParent();
984    unsigned ThisVReg = 0;
985    if (TrySplit) {
986      std::map<unsigned,unsigned>::const_iterator NVI =
987        MBBVRegsMap.find(MBB->getNumber());
988      if (NVI != MBBVRegsMap.end()) {
989        ThisVReg = NVI->second;
990        // One common case:
991        // x = use
992        // ...
993        // ...
994        // def = ...
995        //     = use
996        // It's better to start a new interval to avoid artifically
997        // extend the new interval.
998        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
999        bool MIHasUse = false;
1000        bool MIHasDef = false;
1001        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1002          MachineOperand& mop = MI->getOperand(i);
1003          if (!mop.isRegister() || mop.getReg() != li.reg)
1004            continue;
1005          if (mop.isUse())
1006            MIHasUse = true;
1007          else
1008            MIHasDef = true;
1009        }
1010        if (MIHasDef && !MIHasUse) {
1011          MBBVRegsMap.erase(MBB->getNumber());
1012          ThisVReg = 0;
1013        }
1014      }
1015    }
1016
1017    bool IsNew = ThisVReg == 0;
1018    if (IsNew) {
1019      // This ends the previous live interval. If all of its def / use
1020      // can be folded, give it a low spill weight.
1021      if (NewVReg && TrySplit && AllCanFold) {
1022        LiveInterval &nI = getOrCreateInterval(NewVReg);
1023        nI.weight /= 10.0F;
1024      }
1025      AllCanFold = true;
1026    }
1027    NewVReg = ThisVReg;
1028
1029    bool HasDef = false;
1030    bool HasUse = false;
1031    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1032                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1033                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1034                                CanDelete, vrm, RegMap, rc, ReMatIds, NewVReg,
1035                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1036    if (!HasDef && !HasUse)
1037      continue;
1038
1039    AllCanFold &= CanFold;
1040
1041    // Update weight of spill interval.
1042    LiveInterval &nI = getOrCreateInterval(NewVReg);
1043    if (!TrySplit) {
1044      // The spill weight is now infinity as it cannot be spilled again.
1045      nI.weight = HUGE_VALF;
1046      continue;
1047    }
1048
1049    // Keep track of the last def and first use in each MBB.
1050    unsigned MBBId = MBB->getNumber();
1051    if (HasDef) {
1052      if (MI != ReMatOrigDefMI || !CanDelete) {
1053        bool HasKill = false;
1054        if (!HasUse)
1055          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1056        else {
1057          // If this is a two-address code, then this index starts a new VNInfo.
1058          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1059          if (VNI)
1060            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1061        }
1062        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1063          SpillIdxes.find(MBBId);
1064        if (!HasKill) {
1065          if (SII == SpillIdxes.end()) {
1066            std::vector<SRInfo> S;
1067            S.push_back(SRInfo(index, NewVReg, true));
1068            SpillIdxes.insert(std::make_pair(MBBId, S));
1069          } else if (SII->second.back().vreg != NewVReg) {
1070            SII->second.push_back(SRInfo(index, NewVReg, true));
1071          } else if ((int)index > SII->second.back().index) {
1072            // If there is an earlier def and this is a two-address
1073            // instruction, then it's not possible to fold the store (which
1074            // would also fold the load).
1075            SRInfo &Info = SII->second.back();
1076            Info.index = index;
1077            Info.canFold = !HasUse;
1078          }
1079          SpillMBBs.set(MBBId);
1080        } else if (SII != SpillIdxes.end() &&
1081                   SII->second.back().vreg == NewVReg &&
1082                   (int)index > SII->second.back().index) {
1083          // There is an earlier def that's not killed (must be two-address).
1084          // The spill is no longer needed.
1085          SII->second.pop_back();
1086          if (SII->second.empty()) {
1087            SpillIdxes.erase(MBBId);
1088            SpillMBBs.reset(MBBId);
1089          }
1090        }
1091      }
1092    }
1093
1094    if (HasUse) {
1095      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1096        SpillIdxes.find(MBBId);
1097      if (SII != SpillIdxes.end() &&
1098          SII->second.back().vreg == NewVReg &&
1099          (int)index > SII->second.back().index)
1100        // Use(s) following the last def, it's not safe to fold the spill.
1101        SII->second.back().canFold = false;
1102      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1103        RestoreIdxes.find(MBBId);
1104      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1105        // If we are splitting live intervals, only fold if it's the first
1106        // use and there isn't another use later in the MBB.
1107        RII->second.back().canFold = false;
1108      else if (IsNew) {
1109        // Only need a reload if there isn't an earlier def / use.
1110        if (RII == RestoreIdxes.end()) {
1111          std::vector<SRInfo> Infos;
1112          Infos.push_back(SRInfo(index, NewVReg, true));
1113          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1114        } else {
1115          RII->second.push_back(SRInfo(index, NewVReg, true));
1116        }
1117        RestoreMBBs.set(MBBId);
1118      }
1119    }
1120
1121    // Update spill weight.
1122    unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
1123    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1124  }
1125
1126  if (NewVReg && TrySplit && AllCanFold) {
1127    // If all of its def / use can be folded, give it a low spill weight.
1128    LiveInterval &nI = getOrCreateInterval(NewVReg);
1129    nI.weight /= 10.0F;
1130  }
1131}
1132
1133bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1134                        BitVector &RestoreMBBs,
1135                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1136  if (!RestoreMBBs[Id])
1137    return false;
1138  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1139  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1140    if (Restores[i].index == index &&
1141        Restores[i].vreg == vr &&
1142        Restores[i].canFold)
1143      return true;
1144  return false;
1145}
1146
1147void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1148                        BitVector &RestoreMBBs,
1149                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1150  if (!RestoreMBBs[Id])
1151    return;
1152  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1153  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1154    if (Restores[i].index == index && Restores[i].vreg)
1155      Restores[i].index = -1;
1156}
1157
1158
1159std::vector<LiveInterval*> LiveIntervals::
1160addIntervalsForSpills(const LiveInterval &li,
1161                      const LoopInfo *loopInfo, VirtRegMap &vrm) {
1162  // Since this is called after the analysis is done we don't know if
1163  // LiveVariables is available
1164  lv_ = getAnalysisToUpdate<LiveVariables>();
1165
1166  assert(li.weight != HUGE_VALF &&
1167         "attempt to spill already spilled interval!");
1168
1169  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1170  li.print(DOUT, mri_);
1171  DOUT << '\n';
1172
1173  // Each bit specify whether it a spill is required in the MBB.
1174  BitVector SpillMBBs(mf_->getNumBlockIDs());
1175  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1176  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1177  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1178  std::map<unsigned,unsigned> MBBVRegsMap;
1179  std::vector<LiveInterval*> NewLIs;
1180  SSARegMap *RegMap = mf_->getSSARegMap();
1181  const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
1182
1183  unsigned NumValNums = li.getNumValNums();
1184  SmallVector<MachineInstr*, 4> ReMatDefs;
1185  ReMatDefs.resize(NumValNums, NULL);
1186  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1187  ReMatOrigDefs.resize(NumValNums, NULL);
1188  SmallVector<int, 4> ReMatIds;
1189  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1190  BitVector ReMatDelete(NumValNums);
1191  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1192
1193  // Spilling a split live interval. It cannot be split any further. Also,
1194  // it's also guaranteed to be a single val# / range interval.
1195  if (vrm.getPreSplitReg(li.reg)) {
1196    vrm.setIsSplitFromReg(li.reg, 0);
1197    // Unset the split kill marker on the last use.
1198    unsigned KillIdx = vrm.getKillPoint(li.reg);
1199    if (KillIdx) {
1200      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1201      assert(KillMI && "Last use disappeared?");
1202      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1203      assert(KillOp != -1 && "Last use disappeared?");
1204      KillMI->getOperand(KillOp).unsetIsKill();
1205    }
1206    vrm.removeKillPoint(li.reg);
1207    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1208    Slot = vrm.getStackSlot(li.reg);
1209    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1210    MachineInstr *ReMatDefMI = DefIsReMat ?
1211      vrm.getReMaterializedMI(li.reg) : NULL;
1212    int LdSlot = 0;
1213    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1214    bool isLoad = isLoadSS ||
1215      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1216    bool IsFirstRange = true;
1217    for (LiveInterval::Ranges::const_iterator
1218           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1219      // If this is a split live interval with multiple ranges, it means there
1220      // are two-address instructions that re-defined the value. Only the
1221      // first def can be rematerialized!
1222      if (IsFirstRange) {
1223        // Note ReMatOrigDefMI has already been deleted.
1224        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1225                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1226                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1227                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1228                             MBBVRegsMap, NewLIs);
1229      } else {
1230        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1231                             Slot, 0, false, false, false,
1232                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1233                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1234                             MBBVRegsMap, NewLIs);
1235      }
1236      IsFirstRange = false;
1237    }
1238    return NewLIs;
1239  }
1240
1241  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1242  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1243    TrySplit = false;
1244  if (TrySplit)
1245    ++numSplits;
1246  bool NeedStackSlot = false;
1247  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1248       i != e; ++i) {
1249    const VNInfo *VNI = *i;
1250    unsigned VN = VNI->id;
1251    unsigned DefIdx = VNI->def;
1252    if (DefIdx == ~1U)
1253      continue; // Dead val#.
1254    // Is the def for the val# rematerializable?
1255    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1256      ? 0 : getInstructionFromIndex(DefIdx);
1257    bool dummy;
1258    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1259      // Remember how to remat the def of this val#.
1260      ReMatOrigDefs[VN] = ReMatDefMI;
1261      // Original def may be modified so we have to make a copy here. vrm must
1262      // delete these!
1263      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1264
1265      bool CanDelete = true;
1266      if (VNI->hasPHIKill) {
1267        // A kill is a phi node, not all of its uses can be rematerialized.
1268        // It must not be deleted.
1269        CanDelete = false;
1270        // Need a stack slot if there is any live range where uses cannot be
1271        // rematerialized.
1272        NeedStackSlot = true;
1273      }
1274      if (CanDelete)
1275        ReMatDelete.set(VN);
1276    } else {
1277      // Need a stack slot if there is any live range where uses cannot be
1278      // rematerialized.
1279      NeedStackSlot = true;
1280    }
1281  }
1282
1283  // One stack slot per live interval.
1284  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1285    Slot = vrm.assignVirt2StackSlot(li.reg);
1286
1287  // Create new intervals and rewrite defs and uses.
1288  for (LiveInterval::Ranges::const_iterator
1289         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1290    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1291    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1292    bool DefIsReMat = ReMatDefMI != NULL;
1293    bool CanDelete = ReMatDelete[I->valno->id];
1294    int LdSlot = 0;
1295    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1296    bool isLoad = isLoadSS ||
1297      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1298    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1299                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1300                               CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
1301                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1302                               MBBVRegsMap, NewLIs);
1303  }
1304
1305  // Insert spills / restores if we are splitting.
1306  if (!TrySplit)
1307    return NewLIs;
1308
1309  SmallPtrSet<LiveInterval*, 4> AddedKill;
1310  SmallVector<unsigned, 2> Ops;
1311  if (NeedStackSlot) {
1312    int Id = SpillMBBs.find_first();
1313    while (Id != -1) {
1314      std::vector<SRInfo> &spills = SpillIdxes[Id];
1315      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1316        int index = spills[i].index;
1317        unsigned VReg = spills[i].vreg;
1318        LiveInterval &nI = getOrCreateInterval(VReg);
1319        bool isReMat = vrm.isReMaterialized(VReg);
1320        MachineInstr *MI = getInstructionFromIndex(index);
1321        bool CanFold = false;
1322        bool FoundUse = false;
1323        Ops.clear();
1324        if (spills[i].canFold) {
1325          CanFold = true;
1326          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1327            MachineOperand &MO = MI->getOperand(j);
1328            if (!MO.isRegister() || MO.getReg() != VReg)
1329              continue;
1330
1331            Ops.push_back(j);
1332            if (MO.isDef())
1333              continue;
1334            if (isReMat ||
1335                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1336                                                RestoreMBBs, RestoreIdxes))) {
1337              // MI has two-address uses of the same register. If the use
1338              // isn't the first and only use in the BB, then we can't fold
1339              // it. FIXME: Move this to rewriteInstructionsForSpills.
1340              CanFold = false;
1341              break;
1342            }
1343            FoundUse = true;
1344          }
1345        }
1346        // Fold the store into the def if possible.
1347        bool Folded = false;
1348        if (CanFold && !Ops.empty()) {
1349          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1350            Folded = true;
1351            if (FoundUse > 0) {
1352              // Also folded uses, do not issue a load.
1353              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1354              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1355            }
1356            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1357          }
1358        }
1359
1360        // Else tell the spiller to issue a spill.
1361        if (!Folded) {
1362          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1363          bool isKill = LR->end == getStoreIndex(index);
1364          vrm.addSpillPoint(VReg, isKill, MI);
1365          if (isKill)
1366            AddedKill.insert(&nI);
1367        }
1368      }
1369      Id = SpillMBBs.find_next(Id);
1370    }
1371  }
1372
1373  int Id = RestoreMBBs.find_first();
1374  while (Id != -1) {
1375    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1376    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1377      int index = restores[i].index;
1378      if (index == -1)
1379        continue;
1380      unsigned VReg = restores[i].vreg;
1381      LiveInterval &nI = getOrCreateInterval(VReg);
1382      MachineInstr *MI = getInstructionFromIndex(index);
1383      bool CanFold = false;
1384      Ops.clear();
1385      if (restores[i].canFold) {
1386        CanFold = true;
1387        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1388          MachineOperand &MO = MI->getOperand(j);
1389          if (!MO.isRegister() || MO.getReg() != VReg)
1390            continue;
1391
1392          if (MO.isDef()) {
1393            // If this restore were to be folded, it would have been folded
1394            // already.
1395            CanFold = false;
1396            break;
1397          }
1398          Ops.push_back(j);
1399        }
1400      }
1401
1402      // Fold the load into the use if possible.
1403      bool Folded = false;
1404      if (CanFold && !Ops.empty()) {
1405        if (!vrm.isReMaterialized(VReg))
1406          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1407        else {
1408          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1409          int LdSlot = 0;
1410          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1411          // If the rematerializable def is a load, also try to fold it.
1412          if (isLoadSS ||
1413              (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
1414            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1415                                          Ops, isLoadSS, LdSlot, VReg);
1416        }
1417      }
1418      // If folding is not possible / failed, then tell the spiller to issue a
1419      // load / rematerialization for us.
1420      if (Folded)
1421        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1422      else
1423        vrm.addRestorePoint(VReg, MI);
1424    }
1425    Id = RestoreMBBs.find_next(Id);
1426  }
1427
1428  // Finalize intervals: add kills, finalize spill weights, and filter out
1429  // dead intervals.
1430  std::vector<LiveInterval*> RetNewLIs;
1431  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1432    LiveInterval *LI = NewLIs[i];
1433    if (!LI->empty()) {
1434      LI->weight /= LI->getSize();
1435      if (!AddedKill.count(LI)) {
1436        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1437        unsigned LastUseIdx = getBaseIndex(LR->end);
1438        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1439        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1440        assert(UseIdx != -1);
1441        if (LastUse->getInstrDescriptor()->
1442            getOperandConstraint(UseIdx, TOI::TIED_TO) == -1) {
1443          LastUse->getOperand(UseIdx).setIsKill();
1444          vrm.addKillPoint(LI->reg, LastUseIdx);
1445        }
1446      }
1447      RetNewLIs.push_back(LI);
1448    }
1449  }
1450
1451  return RetNewLIs;
1452}
1453