LiveIntervalAnalysis.cpp revision 16ebb8ce739817c4def3872a2b5f800ce21cb5e6
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/SSARegMap.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(false), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(PHIEliminationID);
64  AU.addRequiredID(PHIEliminationID);
65  AU.addRequiredID(TwoAddressInstructionPassID);
66  MachineFunctionPass::getAnalysisUsage(AU);
67}
68
69void LiveIntervals::releaseMemory() {
70  Idx2MBBMap.clear();
71  mi2iMap_.clear();
72  i2miMap_.clear();
73  r2iMap_.clear();
74  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
75  VNInfoAllocator.Reset();
76  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
77    delete ClonedMIs[i];
78}
79
80namespace llvm {
81  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
82    return V < IM.first;
83  }
84
85  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
86    return IM.first < V;
87  }
88
89  struct Idx2MBBCompare {
90    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
91      return LHS.first < RHS.first;
92    }
93  };
94}
95
96/// runOnMachineFunction - Register allocate the whole function
97///
98bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
99  mf_ = &fn;
100  tm_ = &fn.getTarget();
101  mri_ = tm_->getRegisterInfo();
102  tii_ = tm_->getInstrInfo();
103  lv_ = &getAnalysis<LiveVariables>();
104  allocatableRegs_ = mri_->getAllocatableSet(fn);
105
106  // Number MachineInstrs and MachineBasicBlocks.
107  // Initialize MBB indexes to a sentinal.
108  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
109
110  unsigned MIIndex = 0;
111  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
112       MBB != E; ++MBB) {
113    unsigned StartIdx = MIIndex;
114
115    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
116         I != E; ++I) {
117      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
118      assert(inserted && "multiple MachineInstr -> index mappings");
119      i2miMap_.push_back(I);
120      MIIndex += InstrSlots::NUM;
121    }
122
123    // Set the MBB2IdxMap entry for this MBB.
124    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
125    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
126  }
127  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
128
129  computeIntervals();
130
131  numIntervals += getNumIntervals();
132
133  DOUT << "********** INTERVALS **********\n";
134  for (iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, mri_);
136    DOUT << "\n";
137  }
138
139  numIntervalsAfter += getNumIntervals();
140  DEBUG(dump());
141  return true;
142}
143
144/// print - Implement the dump method.
145void LiveIntervals::print(std::ostream &O, const Module* ) const {
146  O << "********** INTERVALS **********\n";
147  for (const_iterator I = begin(), E = end(); I != E; ++I) {
148    I->second.print(DOUT, mri_);
149    DOUT << "\n";
150  }
151
152  O << "********** MACHINEINSTRS **********\n";
153  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
154       mbbi != mbbe; ++mbbi) {
155    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
156    for (MachineBasicBlock::iterator mii = mbbi->begin(),
157           mie = mbbi->end(); mii != mie; ++mii) {
158      O << getInstructionIndex(mii) << '\t' << *mii;
159    }
160  }
161}
162
163/// conflictsWithPhysRegDef - Returns true if the specified register
164/// is defined during the duration of the specified interval.
165bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
166                                            VirtRegMap &vrm, unsigned reg) {
167  for (LiveInterval::Ranges::const_iterator
168         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
169    for (unsigned index = getBaseIndex(I->start),
170           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
171         index += InstrSlots::NUM) {
172      // skip deleted instructions
173      while (index != end && !getInstructionFromIndex(index))
174        index += InstrSlots::NUM;
175      if (index == end) break;
176
177      MachineInstr *MI = getInstructionFromIndex(index);
178      unsigned SrcReg, DstReg;
179      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
180        if (SrcReg == li.reg || DstReg == li.reg)
181          continue;
182      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
183        MachineOperand& mop = MI->getOperand(i);
184        if (!mop.isRegister())
185          continue;
186        unsigned PhysReg = mop.getReg();
187        if (PhysReg == 0 || PhysReg == li.reg)
188          continue;
189        if (MRegisterInfo::isVirtualRegister(PhysReg)) {
190          if (!vrm.hasPhys(PhysReg))
191            continue;
192          PhysReg = vrm.getPhys(PhysReg);
193        }
194        if (PhysReg && mri_->regsOverlap(PhysReg, reg))
195          return true;
196      }
197    }
198  }
199
200  return false;
201}
202
203void LiveIntervals::printRegName(unsigned reg) const {
204  if (MRegisterInfo::isPhysicalRegister(reg))
205    cerr << mri_->getName(reg);
206  else
207    cerr << "%reg" << reg;
208}
209
210void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
211                                             MachineBasicBlock::iterator mi,
212                                             unsigned MIIdx,
213                                             LiveInterval &interval) {
214  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
215  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
216
217  // Virtual registers may be defined multiple times (due to phi
218  // elimination and 2-addr elimination).  Much of what we do only has to be
219  // done once for the vreg.  We use an empty interval to detect the first
220  // time we see a vreg.
221  if (interval.empty()) {
222    // Get the Idx of the defining instructions.
223    unsigned defIndex = getDefIndex(MIIdx);
224    VNInfo *ValNo;
225    unsigned SrcReg, DstReg;
226    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
227      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
228    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
229      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
230                                    VNInfoAllocator);
231    else
232      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
233
234    assert(ValNo->id == 0 && "First value in interval is not 0?");
235
236    // Loop over all of the blocks that the vreg is defined in.  There are
237    // two cases we have to handle here.  The most common case is a vreg
238    // whose lifetime is contained within a basic block.  In this case there
239    // will be a single kill, in MBB, which comes after the definition.
240    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
241      // FIXME: what about dead vars?
242      unsigned killIdx;
243      if (vi.Kills[0] != mi)
244        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
245      else
246        killIdx = defIndex+1;
247
248      // If the kill happens after the definition, we have an intra-block
249      // live range.
250      if (killIdx > defIndex) {
251        assert(vi.AliveBlocks.none() &&
252               "Shouldn't be alive across any blocks!");
253        LiveRange LR(defIndex, killIdx, ValNo);
254        interval.addRange(LR);
255        DOUT << " +" << LR << "\n";
256        interval.addKill(ValNo, killIdx);
257        return;
258      }
259    }
260
261    // The other case we handle is when a virtual register lives to the end
262    // of the defining block, potentially live across some blocks, then is
263    // live into some number of blocks, but gets killed.  Start by adding a
264    // range that goes from this definition to the end of the defining block.
265    LiveRange NewLR(defIndex,
266                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
267                    ValNo);
268    DOUT << " +" << NewLR;
269    interval.addRange(NewLR);
270
271    // Iterate over all of the blocks that the variable is completely
272    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
273    // live interval.
274    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
275      if (vi.AliveBlocks[i]) {
276        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
277        if (!MBB->empty()) {
278          LiveRange LR(getMBBStartIdx(i),
279                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
280                       ValNo);
281          interval.addRange(LR);
282          DOUT << " +" << LR;
283        }
284      }
285    }
286
287    // Finally, this virtual register is live from the start of any killing
288    // block to the 'use' slot of the killing instruction.
289    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
290      MachineInstr *Kill = vi.Kills[i];
291      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
292      LiveRange LR(getMBBStartIdx(Kill->getParent()),
293                   killIdx, ValNo);
294      interval.addRange(LR);
295      interval.addKill(ValNo, killIdx);
296      DOUT << " +" << LR;
297    }
298
299  } else {
300    // If this is the second time we see a virtual register definition, it
301    // must be due to phi elimination or two addr elimination.  If this is
302    // the result of two address elimination, then the vreg is one of the
303    // def-and-use register operand.
304    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
305      // If this is a two-address definition, then we have already processed
306      // the live range.  The only problem is that we didn't realize there
307      // are actually two values in the live interval.  Because of this we
308      // need to take the LiveRegion that defines this register and split it
309      // into two values.
310      unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst));
311      unsigned RedefIndex = getDefIndex(MIIdx);
312
313      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
314      VNInfo *OldValNo = OldLR->valno;
315      unsigned OldEnd = OldLR->end;
316
317      // Delete the initial value, which should be short and continuous,
318      // because the 2-addr copy must be in the same MBB as the redef.
319      interval.removeRange(DefIndex, RedefIndex);
320
321      // Two-address vregs should always only be redefined once.  This means
322      // that at this point, there should be exactly one value number in it.
323      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
324
325      // The new value number (#1) is defined by the instruction we claimed
326      // defined value #0.
327      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
328      interval.copyValNumInfo(ValNo, OldValNo);
329
330      // Value#0 is now defined by the 2-addr instruction.
331      OldValNo->def = RedefIndex;
332      OldValNo->reg = 0;
333
334      // Add the new live interval which replaces the range for the input copy.
335      LiveRange LR(DefIndex, RedefIndex, ValNo);
336      DOUT << " replace range with " << LR;
337      interval.addRange(LR);
338      interval.addKill(ValNo, RedefIndex);
339      interval.removeKills(ValNo, RedefIndex, OldEnd);
340
341      // If this redefinition is dead, we need to add a dummy unit live
342      // range covering the def slot.
343      if (lv_->RegisterDefIsDead(mi, interval.reg))
344        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
345
346      DOUT << " RESULT: ";
347      interval.print(DOUT, mri_);
348
349    } else {
350      // Otherwise, this must be because of phi elimination.  If this is the
351      // first redefinition of the vreg that we have seen, go back and change
352      // the live range in the PHI block to be a different value number.
353      if (interval.containsOneValue()) {
354        assert(vi.Kills.size() == 1 &&
355               "PHI elimination vreg should have one kill, the PHI itself!");
356
357        // Remove the old range that we now know has an incorrect number.
358        VNInfo *VNI = interval.getValNumInfo(0);
359        MachineInstr *Killer = vi.Kills[0];
360        unsigned Start = getMBBStartIdx(Killer->getParent());
361        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
362        DOUT << " Removing [" << Start << "," << End << "] from: ";
363        interval.print(DOUT, mri_); DOUT << "\n";
364        interval.removeRange(Start, End);
365        interval.addKill(VNI, Start);
366        VNI->hasPHIKill = true;
367        DOUT << " RESULT: "; interval.print(DOUT, mri_);
368
369        // Replace the interval with one of a NEW value number.  Note that this
370        // value number isn't actually defined by an instruction, weird huh? :)
371        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
372        DOUT << " replace range with " << LR;
373        interval.addRange(LR);
374        interval.addKill(LR.valno, End);
375        DOUT << " RESULT: "; interval.print(DOUT, mri_);
376      }
377
378      // In the case of PHI elimination, each variable definition is only
379      // live until the end of the block.  We've already taken care of the
380      // rest of the live range.
381      unsigned defIndex = getDefIndex(MIIdx);
382
383      VNInfo *ValNo;
384      unsigned SrcReg, DstReg;
385      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
386        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
387      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
388        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
389                                      VNInfoAllocator);
390      else
391        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
392
393      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
394      LiveRange LR(defIndex, killIndex, ValNo);
395      interval.addRange(LR);
396      interval.addKill(ValNo, killIndex);
397      ValNo->hasPHIKill = true;
398      DOUT << " +" << LR;
399    }
400  }
401
402  DOUT << '\n';
403}
404
405void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
406                                              MachineBasicBlock::iterator mi,
407                                              unsigned MIIdx,
408                                              LiveInterval &interval,
409                                              unsigned SrcReg) {
410  // A physical register cannot be live across basic block, so its
411  // lifetime must end somewhere in its defining basic block.
412  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
413
414  unsigned baseIndex = MIIdx;
415  unsigned start = getDefIndex(baseIndex);
416  unsigned end = start;
417
418  // If it is not used after definition, it is considered dead at
419  // the instruction defining it. Hence its interval is:
420  // [defSlot(def), defSlot(def)+1)
421  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
422    DOUT << " dead";
423    end = getDefIndex(start) + 1;
424    goto exit;
425  }
426
427  // If it is not dead on definition, it must be killed by a
428  // subsequent instruction. Hence its interval is:
429  // [defSlot(def), useSlot(kill)+1)
430  while (++mi != MBB->end()) {
431    baseIndex += InstrSlots::NUM;
432    if (lv_->KillsRegister(mi, interval.reg)) {
433      DOUT << " killed";
434      end = getUseIndex(baseIndex) + 1;
435      goto exit;
436    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
437      // Another instruction redefines the register before it is ever read.
438      // Then the register is essentially dead at the instruction that defines
439      // it. Hence its interval is:
440      // [defSlot(def), defSlot(def)+1)
441      DOUT << " dead";
442      end = getDefIndex(start) + 1;
443      goto exit;
444    }
445  }
446
447  // The only case we should have a dead physreg here without a killing or
448  // instruction where we know it's dead is if it is live-in to the function
449  // and never used.
450  assert(!SrcReg && "physreg was not killed in defining block!");
451  end = getDefIndex(start) + 1;  // It's dead.
452
453exit:
454  assert(start < end && "did not find end of interval?");
455
456  // Already exists? Extend old live interval.
457  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
458  VNInfo *ValNo = (OldLR != interval.end())
459    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
460  LiveRange LR(start, end, ValNo);
461  interval.addRange(LR);
462  interval.addKill(LR.valno, end);
463  DOUT << " +" << LR << '\n';
464}
465
466void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
467                                      MachineBasicBlock::iterator MI,
468                                      unsigned MIIdx,
469                                      unsigned reg) {
470  if (MRegisterInfo::isVirtualRegister(reg))
471    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
472  else if (allocatableRegs_[reg]) {
473    unsigned SrcReg, DstReg;
474    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
475      SrcReg = MI->getOperand(1).getReg();
476    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
477      SrcReg = 0;
478    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
479    // Def of a register also defines its sub-registers.
480    for (const unsigned* AS = mri_->getSubRegisters(reg); *AS; ++AS)
481      // Avoid processing some defs more than once.
482      if (!MI->findRegisterDefOperand(*AS))
483        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
484  }
485}
486
487void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
488                                         unsigned MIIdx,
489                                         LiveInterval &interval, bool isAlias) {
490  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
491
492  // Look for kills, if it reaches a def before it's killed, then it shouldn't
493  // be considered a livein.
494  MachineBasicBlock::iterator mi = MBB->begin();
495  unsigned baseIndex = MIIdx;
496  unsigned start = baseIndex;
497  unsigned end = start;
498  while (mi != MBB->end()) {
499    if (lv_->KillsRegister(mi, interval.reg)) {
500      DOUT << " killed";
501      end = getUseIndex(baseIndex) + 1;
502      goto exit;
503    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
504      // Another instruction redefines the register before it is ever read.
505      // Then the register is essentially dead at the instruction that defines
506      // it. Hence its interval is:
507      // [defSlot(def), defSlot(def)+1)
508      DOUT << " dead";
509      end = getDefIndex(start) + 1;
510      goto exit;
511    }
512
513    baseIndex += InstrSlots::NUM;
514    ++mi;
515  }
516
517exit:
518  // Live-in register might not be used at all.
519  if (end == MIIdx) {
520    if (isAlias) {
521      DOUT << " dead";
522      end = getDefIndex(MIIdx) + 1;
523    } else {
524      DOUT << " live through";
525      end = baseIndex;
526    }
527  }
528
529  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
530  interval.addRange(LR);
531  interval.addKill(LR.valno, end);
532  DOUT << " +" << LR << '\n';
533}
534
535/// computeIntervals - computes the live intervals for virtual
536/// registers. for some ordering of the machine instructions [1,N] a
537/// live interval is an interval [i, j) where 1 <= i <= j < N for
538/// which a variable is live
539void LiveIntervals::computeIntervals() {
540  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
541       << "********** Function: "
542       << ((Value*)mf_->getFunction())->getName() << '\n';
543  // Track the index of the current machine instr.
544  unsigned MIIndex = 0;
545  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
546       MBBI != E; ++MBBI) {
547    MachineBasicBlock *MBB = MBBI;
548    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
549
550    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
551
552    // Create intervals for live-ins to this BB first.
553    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
554           LE = MBB->livein_end(); LI != LE; ++LI) {
555      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
556      // Multiple live-ins can alias the same register.
557      for (const unsigned* AS = mri_->getSubRegisters(*LI); *AS; ++AS)
558        if (!hasInterval(*AS))
559          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
560                               true);
561    }
562
563    for (; MI != miEnd; ++MI) {
564      DOUT << MIIndex << "\t" << *MI;
565
566      // Handle defs.
567      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
568        MachineOperand &MO = MI->getOperand(i);
569        // handle register defs - build intervals
570        if (MO.isRegister() && MO.getReg() && MO.isDef())
571          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
572      }
573
574      MIIndex += InstrSlots::NUM;
575    }
576  }
577}
578
579bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
580                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
581  std::vector<IdxMBBPair>::const_iterator I =
582    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
583
584  bool ResVal = false;
585  while (I != Idx2MBBMap.end()) {
586    if (LR.end <= I->first)
587      break;
588    MBBs.push_back(I->second);
589    ResVal = true;
590    ++I;
591  }
592  return ResVal;
593}
594
595
596LiveInterval LiveIntervals::createInterval(unsigned reg) {
597  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
598                       HUGE_VALF : 0.0F;
599  return LiveInterval(reg, Weight);
600}
601
602
603//===----------------------------------------------------------------------===//
604// Register allocator hooks.
605//
606
607/// isReMaterializable - Returns true if the definition MI of the specified
608/// val# of the specified interval is re-materializable.
609bool LiveIntervals::isReMaterializable(const LiveInterval &li,
610                                       const VNInfo *ValNo, MachineInstr *MI) {
611  if (DisableReMat)
612    return false;
613
614  if (tii_->isTriviallyReMaterializable(MI))
615    return true;
616
617  int FrameIdx = 0;
618  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
619      !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
620    return false;
621
622  // This is a load from fixed stack slot. It can be rematerialized unless it's
623  // re-defined by a two-address instruction.
624  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
625       i != e; ++i) {
626    const VNInfo *VNI = *i;
627    if (VNI == ValNo)
628      continue;
629    unsigned DefIdx = VNI->def;
630    if (DefIdx == ~1U)
631      continue; // Dead val#.
632    MachineInstr *DefMI = (DefIdx == ~0u)
633      ? NULL : getInstructionFromIndex(DefIdx);
634    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg))
635      return false;
636  }
637  return true;
638}
639
640/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
641/// slot / to reg or any rematerialized load into ith operand of specified
642/// MI. If it is successul, MI is updated with the newly created MI and
643/// returns true.
644bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
645                                         VirtRegMap &vrm, MachineInstr *DefMI,
646                                         unsigned InstrIdx, unsigned OpIdx,
647                                         unsigned NumUses,
648                                         bool isSS, int Slot, unsigned Reg) {
649  // FIXME: fold subreg use
650  if (MI->getOperand(OpIdx).getSubReg())
651    return false;
652
653  // FIXME: It may be possible to fold load when there are multiple uses.
654  // e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0
655  if (NumUses > 1)
656    return false;
657
658  MachineInstr *fmi = isSS
659    ? mri_->foldMemoryOperand(MI, OpIdx, Slot)
660    : mri_->foldMemoryOperand(MI, OpIdx, DefMI);
661  if (fmi) {
662    // Attempt to fold the memory reference into the instruction. If
663    // we can do this, we don't need to insert spill code.
664    if (lv_)
665      lv_->instructionChanged(MI, fmi);
666    else
667      LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
668    MachineBasicBlock &MBB = *MI->getParent();
669    if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
670      vrm.virtFolded(Reg, MI, OpIdx, fmi);
671    vrm.transferSpillPts(MI, fmi);
672    vrm.transferRestorePts(MI, fmi);
673    mi2iMap_.erase(MI);
674    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
675    mi2iMap_[fmi] = InstrIdx;
676    MI = MBB.insert(MBB.erase(MI), fmi);
677    ++numFolds;
678    return true;
679  }
680  return false;
681}
682
683bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
684  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
685  for (LiveInterval::Ranges::const_iterator
686         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
687    std::vector<IdxMBBPair>::const_iterator II =
688      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
689    if (II == Idx2MBBMap.end())
690      continue;
691    if (I->end > II->first)  // crossing a MBB.
692      return false;
693    MBBs.insert(II->second);
694    if (MBBs.size() > 1)
695      return false;
696  }
697  return true;
698}
699
700/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
701/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
702void LiveIntervals::
703rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
704                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
705                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
706                 unsigned Slot, int LdSlot,
707                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
708                 VirtRegMap &vrm, SSARegMap *RegMap,
709                 const TargetRegisterClass* rc,
710                 SmallVector<int, 4> &ReMatIds,
711                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
712                 const LoopInfo *loopInfo,
713                 std::map<unsigned,unsigned> &MBBVRegsMap,
714                 std::vector<LiveInterval*> &NewLIs) {
715 RestartInstruction:
716  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
717    MachineOperand& mop = MI->getOperand(i);
718    if (!mop.isRegister())
719      continue;
720    unsigned Reg = mop.getReg();
721    unsigned RegI = Reg;
722    if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg))
723      continue;
724    if (Reg != li.reg)
725      continue;
726
727    bool TryFold = !DefIsReMat;
728    bool FoldSS = true; // Default behavior unless it's a remat.
729    int FoldSlot = Slot;
730    if (DefIsReMat) {
731      // If this is the rematerializable definition MI itself and
732      // all of its uses are rematerialized, simply delete it.
733      if (MI == ReMatOrigDefMI && CanDelete) {
734        DOUT << "\t\t\t\tErasing re-materlizable def: ";
735        DOUT << MI << '\n';
736        RemoveMachineInstrFromMaps(MI);
737        vrm.RemoveMachineInstrFromMaps(MI);
738        MI->eraseFromParent();
739        break;
740      }
741
742      // If def for this use can't be rematerialized, then try folding.
743      // If def is rematerializable and it's a load, also try folding.
744      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
745      if (isLoad) {
746        // Try fold loads (from stack slot, constant pool, etc.) into uses.
747        FoldSS = isLoadSS;
748        FoldSlot = LdSlot;
749      }
750    }
751
752    // Do not fold load / store here if we are splitting. We'll find an
753    // optimal point to insert a load / store later.
754    if (TryFold)
755      TryFold = !TrySplit && NewVReg == 0;
756
757    // Scan all of the operands of this instruction rewriting operands
758    // to use NewVReg instead of li.reg as appropriate.  We do this for
759    // two reasons:
760    //
761    //   1. If the instr reads the same spilled vreg multiple times, we
762    //      want to reuse the NewVReg.
763    //   2. If the instr is a two-addr instruction, we are required to
764    //      keep the src/dst regs pinned.
765    //
766    // Keep track of whether we replace a use and/or def so that we can
767    // create the spill interval with the appropriate range.
768
769    HasUse = mop.isUse();
770    HasDef = mop.isDef();
771    unsigned NumUses = HasUse;
772    std::vector<unsigned> UpdateOps;
773    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
774      if (!MI->getOperand(j).isRegister())
775        continue;
776      unsigned RegJ = MI->getOperand(j).getReg();
777      if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
778        continue;
779      if (RegJ == RegI) {
780        UpdateOps.push_back(j);
781        if (MI->getOperand(j).isUse())
782          ++NumUses;
783        HasUse |= MI->getOperand(j).isUse();
784        HasDef |= MI->getOperand(j).isDef();
785      }
786    }
787
788    if (TryFold &&
789        tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i,
790                             NumUses, FoldSS, FoldSlot, Reg)) {
791      // Folding the load/store can completely change the instruction in
792      // unpredictable ways, rescan it from the beginning.
793      HasUse = false;
794      HasDef = false;
795      goto RestartInstruction;
796    }
797
798    // Create a new virtual register for the spill interval.
799    bool CreatedNewVReg = false;
800    if (NewVReg == 0) {
801      NewVReg = RegMap->createVirtualRegister(rc);
802      vrm.grow();
803      CreatedNewVReg = true;
804    }
805    mop.setReg(NewVReg);
806
807    // Reuse NewVReg for other reads.
808    for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j)
809      MI->getOperand(UpdateOps[j]).setReg(NewVReg);
810
811    if (CreatedNewVReg) {
812      if (DefIsReMat) {
813        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
814        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
815          // Each valnum may have its own remat id.
816          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
817        } else {
818          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
819        }
820        if (!CanDelete || (HasUse && HasDef)) {
821          // If this is a two-addr instruction then its use operands are
822          // rematerializable but its def is not. It should be assigned a
823          // stack slot.
824          vrm.assignVirt2StackSlot(NewVReg, Slot);
825        }
826      } else {
827        vrm.assignVirt2StackSlot(NewVReg, Slot);
828      }
829    } else if (HasUse && HasDef &&
830               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
831      // If this interval hasn't been assigned a stack slot (because earlier
832      // def is a deleted remat def), do it now.
833      assert(Slot != VirtRegMap::NO_STACK_SLOT);
834      vrm.assignVirt2StackSlot(NewVReg, Slot);
835    }
836
837    // create a new register interval for this spill / remat.
838    LiveInterval &nI = getOrCreateInterval(NewVReg);
839    if (CreatedNewVReg) {
840      NewLIs.push_back(&nI);
841      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
842      if (TrySplit)
843        vrm.setIsSplitFromReg(NewVReg, li.reg);
844    }
845
846    if (HasUse) {
847      if (CreatedNewVReg) {
848        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
849                     nI.getNextValue(~0U, 0, VNInfoAllocator));
850        DOUT << " +" << LR;
851        nI.addRange(LR);
852      } else {
853        // Extend the split live interval to this def / use.
854        unsigned End = getUseIndex(index)+1;
855        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
856                     nI.getValNumInfo(nI.getNumValNums()-1));
857        DOUT << " +" << LR;
858        nI.addRange(LR);
859      }
860    }
861    if (HasDef) {
862      LiveRange LR(getDefIndex(index), getStoreIndex(index),
863                   nI.getNextValue(~0U, 0, VNInfoAllocator));
864      DOUT << " +" << LR;
865      nI.addRange(LR);
866    }
867
868    DOUT << "\t\t\t\tAdded new interval: ";
869    nI.print(DOUT, mri_);
870    DOUT << '\n';
871  }
872}
873
874bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
875                                   const VNInfo *VNI,
876                                   MachineBasicBlock *MBB, unsigned Idx) const {
877  unsigned End = getMBBEndIdx(MBB);
878  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
879    unsigned KillIdx = VNI->kills[j];
880    if (KillIdx > Idx && KillIdx < End)
881      return true;
882  }
883  return false;
884}
885
886static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
887  const VNInfo *VNI = NULL;
888  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
889         e = li.vni_end(); i != e; ++i)
890    if ((*i)->def == DefIdx) {
891      VNI = *i;
892      break;
893    }
894  return VNI;
895}
896
897void LiveIntervals::
898rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
899                    LiveInterval::Ranges::const_iterator &I,
900                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
901                    unsigned Slot, int LdSlot,
902                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
903                    VirtRegMap &vrm, SSARegMap *RegMap,
904                    const TargetRegisterClass* rc,
905                    SmallVector<int, 4> &ReMatIds,
906                    const LoopInfo *loopInfo,
907                    BitVector &SpillMBBs,
908                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
909                    BitVector &RestoreMBBs,
910                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
911                    std::map<unsigned,unsigned> &MBBVRegsMap,
912                    std::vector<LiveInterval*> &NewLIs) {
913  unsigned NewVReg = 0;
914  unsigned index = getBaseIndex(I->start);
915  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
916  bool TrySplitMI = TrySplit && vrm.getPreSplitReg(li.reg) == 0;
917  for (; index != end; index += InstrSlots::NUM) {
918    // skip deleted instructions
919    while (index != end && !getInstructionFromIndex(index))
920      index += InstrSlots::NUM;
921    if (index == end) break;
922
923    MachineInstr *MI = getInstructionFromIndex(index);
924    MachineBasicBlock *MBB = MI->getParent();
925    NewVReg = 0;
926    if (TrySplitMI) {
927      std::map<unsigned,unsigned>::const_iterator NVI =
928        MBBVRegsMap.find(MBB->getNumber());
929      if (NVI != MBBVRegsMap.end()) {
930        NewVReg = NVI->second;
931        // One common case:
932        // x = use
933        // ...
934        // ...
935        // def = ...
936        //     = use
937        // It's better to start a new interval to avoid artifically
938        // extend the new interval.
939        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
940        bool MIHasUse = false;
941        bool MIHasDef = false;
942        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
943          MachineOperand& mop = MI->getOperand(i);
944          if (!mop.isRegister() || mop.getReg() != li.reg)
945            continue;
946          if (mop.isUse())
947            MIHasUse = true;
948          else
949            MIHasDef = true;
950        }
951        if (MIHasDef && !MIHasUse) {
952          MBBVRegsMap.erase(MBB->getNumber());
953          NewVReg = 0;
954        }
955      }
956    }
957    bool IsNew = NewVReg == 0;
958    bool HasDef = false;
959    bool HasUse = false;
960    rewriteInstructionForSpills(li, TrySplitMI, I->valno->id, index, end,
961                                MI, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot,
962                                isLoad, isLoadSS, DefIsReMat, CanDelete, vrm,
963                                RegMap, rc, ReMatIds, NewVReg, HasDef, HasUse,
964                                loopInfo, MBBVRegsMap, NewLIs);
965    if (!HasDef && !HasUse)
966      continue;
967
968    // Update weight of spill interval.
969    LiveInterval &nI = getOrCreateInterval(NewVReg);
970    if (!TrySplitMI) {
971      // The spill weight is now infinity as it cannot be spilled again.
972      nI.weight = HUGE_VALF;
973      continue;
974    }
975
976    // Keep track of the last def and first use in each MBB.
977    unsigned MBBId = MBB->getNumber();
978    if (HasDef) {
979      if (MI != ReMatOrigDefMI || !CanDelete) {
980        bool HasKill = false;
981        if (!HasUse)
982          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
983        else {
984          // If this is a two-address code, then this index starts a new VNInfo.
985          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
986          if (VNI)
987            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
988        }
989        if (!HasKill) {
990          std::map<unsigned, std::vector<SRInfo> >::iterator SII =
991            SpillIdxes.find(MBBId);
992          if (SII == SpillIdxes.end()) {
993            std::vector<SRInfo> S;
994            S.push_back(SRInfo(index, NewVReg, true));
995            SpillIdxes.insert(std::make_pair(MBBId, S));
996          } else if (SII->second.back().vreg != NewVReg) {
997            SII->second.push_back(SRInfo(index, NewVReg, true));
998          } else if ((int)index > SII->second.back().index) {
999            // If there is an earlier def and this is a two-address
1000            // instruction, then it's not possible to fold the store (which
1001            // would also fold the load).
1002            SRInfo &Info = SII->second.back();
1003            Info.index = index;
1004            Info.canFold = !HasUse;
1005          }
1006          SpillMBBs.set(MBBId);
1007        }
1008      }
1009    }
1010
1011    if (HasUse) {
1012      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1013        SpillIdxes.find(MBBId);
1014      if (SII != SpillIdxes.end() &&
1015          SII->second.back().vreg == NewVReg &&
1016          (int)index > SII->second.back().index)
1017        // Use(s) following the last def, it's not safe to fold the spill.
1018        SII->second.back().canFold = false;
1019      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1020        RestoreIdxes.find(MBBId);
1021      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1022        // If we are splitting live intervals, only fold if it's the first
1023        // use and there isn't another use later in the MBB.
1024        RII->second.back().canFold = false;
1025      else if (IsNew) {
1026        // Only need a reload if there isn't an earlier def / use.
1027        if (RII == RestoreIdxes.end()) {
1028          std::vector<SRInfo> Infos;
1029          Infos.push_back(SRInfo(index, NewVReg, true));
1030          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1031        } else {
1032          RII->second.push_back(SRInfo(index, NewVReg, true));
1033        }
1034        RestoreMBBs.set(MBBId);
1035      }
1036    }
1037
1038    // Update spill weight.
1039    unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
1040    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1041  }
1042}
1043
1044bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1045                        BitVector &RestoreMBBs,
1046                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1047  if (!RestoreMBBs[Id])
1048    return false;
1049  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1050  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1051    if (Restores[i].index == index &&
1052        Restores[i].vreg == vr &&
1053        Restores[i].canFold)
1054      return true;
1055  return false;
1056}
1057
1058void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1059                        BitVector &RestoreMBBs,
1060                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1061  if (!RestoreMBBs[Id])
1062    return;
1063  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1064  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1065    if (Restores[i].index == index && Restores[i].vreg)
1066      Restores[i].index = -1;
1067}
1068
1069
1070std::vector<LiveInterval*> LiveIntervals::
1071addIntervalsForSpills(const LiveInterval &li,
1072                      const LoopInfo *loopInfo, VirtRegMap &vrm) {
1073  // Since this is called after the analysis is done we don't know if
1074  // LiveVariables is available
1075  lv_ = getAnalysisToUpdate<LiveVariables>();
1076
1077  assert(li.weight != HUGE_VALF &&
1078         "attempt to spill already spilled interval!");
1079
1080  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1081  li.print(DOUT, mri_);
1082  DOUT << '\n';
1083
1084  // Each bit specify whether it a spill is required in the MBB.
1085  BitVector SpillMBBs(mf_->getNumBlockIDs());
1086  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1087  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1088  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1089  std::map<unsigned,unsigned> MBBVRegsMap;
1090  std::vector<LiveInterval*> NewLIs;
1091  SSARegMap *RegMap = mf_->getSSARegMap();
1092  const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
1093
1094  unsigned NumValNums = li.getNumValNums();
1095  SmallVector<MachineInstr*, 4> ReMatDefs;
1096  ReMatDefs.resize(NumValNums, NULL);
1097  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1098  ReMatOrigDefs.resize(NumValNums, NULL);
1099  SmallVector<int, 4> ReMatIds;
1100  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1101  BitVector ReMatDelete(NumValNums);
1102  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1103
1104  // Spilling a split live interval. It cannot be split any further. Also,
1105  // it's also guaranteed to be a single val# / range interval.
1106  if (vrm.getPreSplitReg(li.reg)) {
1107    vrm.setIsSplitFromReg(li.reg, 0);
1108    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1109    Slot = vrm.getStackSlot(li.reg);
1110    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1111    MachineInstr *ReMatDefMI = DefIsReMat ?
1112      vrm.getReMaterializedMI(li.reg) : NULL;
1113    int LdSlot = 0;
1114    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1115    bool isLoad = isLoadSS ||
1116      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1117    bool IsFirstRange = true;
1118    for (LiveInterval::Ranges::const_iterator
1119           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1120      // If this is a split live interval with multiple ranges, it means there
1121      // are two-address instructions that re-defined the value. Only the
1122      // first def can be rematerialized!
1123      if (IsFirstRange) {
1124        // Note ReMatOrigDefMI has already been deleted.
1125        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1126                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1127                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1128                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1129                             MBBVRegsMap, NewLIs);
1130      } else {
1131        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1132                             Slot, 0, false, false, false,
1133                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1134                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1135                             MBBVRegsMap, NewLIs);
1136      }
1137      IsFirstRange = false;
1138    }
1139    return NewLIs;
1140  }
1141
1142  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1143  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1144    TrySplit = false;
1145  if (TrySplit)
1146    ++numSplits;
1147  bool NeedStackSlot = false;
1148  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1149       i != e; ++i) {
1150    const VNInfo *VNI = *i;
1151    unsigned VN = VNI->id;
1152    unsigned DefIdx = VNI->def;
1153    if (DefIdx == ~1U)
1154      continue; // Dead val#.
1155    // Is the def for the val# rematerializable?
1156    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1157      ? 0 : getInstructionFromIndex(DefIdx);
1158    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
1159      // Remember how to remat the def of this val#.
1160      ReMatOrigDefs[VN] = ReMatDefMI;
1161      // Original def may be modified so we have to make a copy here. vrm must
1162      // delete these!
1163      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1164      vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
1165
1166      bool CanDelete = true;
1167      if (VNI->hasPHIKill) {
1168        // A kill is a phi node, not all of its uses can be rematerialized.
1169        // It must not be deleted.
1170        CanDelete = false;
1171        // Need a stack slot if there is any live range where uses cannot be
1172        // rematerialized.
1173        NeedStackSlot = true;
1174      }
1175      if (CanDelete)
1176        ReMatDelete.set(VN);
1177    } else {
1178      // Need a stack slot if there is any live range where uses cannot be
1179      // rematerialized.
1180      NeedStackSlot = true;
1181    }
1182  }
1183
1184  // One stack slot per live interval.
1185  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1186    Slot = vrm.assignVirt2StackSlot(li.reg);
1187
1188  // Create new intervals and rewrite defs and uses.
1189  for (LiveInterval::Ranges::const_iterator
1190         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1191    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1192    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1193    bool DefIsReMat = ReMatDefMI != NULL;
1194    bool CanDelete = ReMatDelete[I->valno->id];
1195    int LdSlot = 0;
1196    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1197    bool isLoad = isLoadSS ||
1198      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1199    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1200                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1201                               CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
1202                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1203                               MBBVRegsMap, NewLIs);
1204  }
1205
1206  // Insert spills / restores if we are splitting.
1207  if (!TrySplit)
1208    return NewLIs;
1209
1210  if (NeedStackSlot) {
1211    int Id = SpillMBBs.find_first();
1212    while (Id != -1) {
1213      std::vector<SRInfo> &spills = SpillIdxes[Id];
1214      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1215        int index = spills[i].index;
1216        unsigned VReg = spills[i].vreg;
1217        bool isReMat = vrm.isReMaterialized(VReg);
1218        MachineInstr *MI = getInstructionFromIndex(index);
1219        int OpIdx = -1;
1220        unsigned NumUses = 0;
1221        if (spills[i].canFold) {
1222          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1223            MachineOperand &MO = MI->getOperand(j);
1224            if (!MO.isRegister() || MO.getReg() != VReg)
1225              continue;
1226            if (MO.isDef()) {
1227              OpIdx = (int)j;
1228              continue;
1229            }
1230            // Can't fold if it's two-address code and the use isn't the
1231            // first and only use.
1232            if (isReMat ||
1233                (NumUses == 0 && !alsoFoldARestore(Id, index, VReg, RestoreMBBs,
1234                                                   RestoreIdxes))) {
1235              OpIdx = -1;
1236              break;
1237            }
1238            ++NumUses;
1239          }
1240        }
1241        // Fold the store into the def if possible.
1242        bool Folded = false;
1243        if (OpIdx != -1) {
1244          if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, NumUses,
1245                                   true, Slot, VReg)) {
1246            if (NumUses)
1247              // Folded a two-address instruction, do not issue a load.
1248              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1249            Folded = true;
1250          }
1251        }
1252
1253        // Else tell the spiller to issue a store for us.
1254        if (!Folded)
1255          vrm.addSpillPoint(VReg, MI);
1256      }
1257      Id = SpillMBBs.find_next(Id);
1258    }
1259  }
1260
1261  int Id = RestoreMBBs.find_first();
1262  while (Id != -1) {
1263    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1264    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1265      int index = restores[i].index;
1266      if (index == -1)
1267        continue;
1268      unsigned VReg = restores[i].vreg;
1269      MachineInstr *MI = getInstructionFromIndex(index);
1270      unsigned NumUses = 0;
1271      int OpIdx = -1;
1272      if (restores[i].canFold) {
1273        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1274          MachineOperand &MO = MI->getOperand(j);
1275          if (!MO.isRegister() || MO.getReg() != VReg)
1276            continue;
1277          if (MO.isDef()) {
1278            // Can't fold if it's two-address code and it hasn't already
1279            // been folded.
1280            OpIdx = -1;
1281            break;
1282          }
1283          if (NumUses == 0)
1284            // Use the first use index.
1285            OpIdx = (int)j;
1286          ++NumUses;
1287        }
1288      }
1289
1290      // Fold the load into the use if possible.
1291      bool Folded = false;
1292      if (OpIdx != -1) {
1293        if (vrm.isReMaterialized(VReg)) {
1294          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1295          int LdSlot = 0;
1296          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1297          // If the rematerializable def is a load, also try to fold it.
1298          if (isLoadSS ||
1299              (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
1300            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
1301                                          NumUses, isLoadSS, LdSlot, VReg);
1302        } else
1303          Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, NumUses,
1304                                        true, Slot, VReg);
1305      }
1306      // If folding is not possible / failed, then tell the spiller to issue a
1307      // load / rematerialization for us.
1308      if (!Folded)
1309        vrm.addRestorePoint(VReg, MI);
1310    }
1311    Id = RestoreMBBs.find_next(Id);
1312  }
1313
1314  // Finalize spill weights.
1315  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1316    NewLIs[i]->weight /= NewLIs[i]->getSize();
1317
1318  return NewLIs;
1319}
1320