LiveIntervalAnalysis.cpp revision ed17a89e7ae98a53f1298a2b6fcdb80106a5cfb1
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/SSARegMap.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(false), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(PHIEliminationID);
64  AU.addRequiredID(PHIEliminationID);
65  AU.addRequiredID(TwoAddressInstructionPassID);
66  MachineFunctionPass::getAnalysisUsage(AU);
67}
68
69void LiveIntervals::releaseMemory() {
70  Idx2MBBMap.clear();
71  mi2iMap_.clear();
72  i2miMap_.clear();
73  r2iMap_.clear();
74  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
75  VNInfoAllocator.Reset();
76  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
77    delete ClonedMIs[i];
78}
79
80namespace llvm {
81  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
82    return V < IM.first;
83  }
84
85  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
86    return IM.first < V;
87  }
88
89  struct Idx2MBBCompare {
90    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
91      return LHS.first < RHS.first;
92    }
93  };
94}
95
96/// runOnMachineFunction - Register allocate the whole function
97///
98bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
99  mf_ = &fn;
100  tm_ = &fn.getTarget();
101  mri_ = tm_->getRegisterInfo();
102  tii_ = tm_->getInstrInfo();
103  lv_ = &getAnalysis<LiveVariables>();
104  allocatableRegs_ = mri_->getAllocatableSet(fn);
105
106  // Number MachineInstrs and MachineBasicBlocks.
107  // Initialize MBB indexes to a sentinal.
108  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
109
110  unsigned MIIndex = 0;
111  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
112       MBB != E; ++MBB) {
113    unsigned StartIdx = MIIndex;
114
115    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
116         I != E; ++I) {
117      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
118      assert(inserted && "multiple MachineInstr -> index mappings");
119      i2miMap_.push_back(I);
120      MIIndex += InstrSlots::NUM;
121    }
122
123    // Set the MBB2IdxMap entry for this MBB.
124    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
125    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
126  }
127  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
128
129  computeIntervals();
130
131  numIntervals += getNumIntervals();
132
133  DOUT << "********** INTERVALS **********\n";
134  for (iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, mri_);
136    DOUT << "\n";
137  }
138
139  numIntervalsAfter += getNumIntervals();
140  DEBUG(dump());
141  return true;
142}
143
144/// print - Implement the dump method.
145void LiveIntervals::print(std::ostream &O, const Module* ) const {
146  O << "********** INTERVALS **********\n";
147  for (const_iterator I = begin(), E = end(); I != E; ++I) {
148    I->second.print(DOUT, mri_);
149    DOUT << "\n";
150  }
151
152  O << "********** MACHINEINSTRS **********\n";
153  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
154       mbbi != mbbe; ++mbbi) {
155    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
156    for (MachineBasicBlock::iterator mii = mbbi->begin(),
157           mie = mbbi->end(); mii != mie; ++mii) {
158      O << getInstructionIndex(mii) << '\t' << *mii;
159    }
160  }
161}
162
163/// conflictsWithPhysRegDef - Returns true if the specified register
164/// is defined during the duration of the specified interval.
165bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
166                                            VirtRegMap &vrm, unsigned reg) {
167  for (LiveInterval::Ranges::const_iterator
168         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
169    for (unsigned index = getBaseIndex(I->start),
170           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
171         index += InstrSlots::NUM) {
172      // skip deleted instructions
173      while (index != end && !getInstructionFromIndex(index))
174        index += InstrSlots::NUM;
175      if (index == end) break;
176
177      MachineInstr *MI = getInstructionFromIndex(index);
178      unsigned SrcReg, DstReg;
179      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
180        if (SrcReg == li.reg || DstReg == li.reg)
181          continue;
182      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
183        MachineOperand& mop = MI->getOperand(i);
184        if (!mop.isRegister())
185          continue;
186        unsigned PhysReg = mop.getReg();
187        if (PhysReg == 0 || PhysReg == li.reg)
188          continue;
189        if (MRegisterInfo::isVirtualRegister(PhysReg)) {
190          if (!vrm.hasPhys(PhysReg))
191            continue;
192          PhysReg = vrm.getPhys(PhysReg);
193        }
194        if (PhysReg && mri_->regsOverlap(PhysReg, reg))
195          return true;
196      }
197    }
198  }
199
200  return false;
201}
202
203void LiveIntervals::printRegName(unsigned reg) const {
204  if (MRegisterInfo::isPhysicalRegister(reg))
205    cerr << mri_->getName(reg);
206  else
207    cerr << "%reg" << reg;
208}
209
210void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
211                                             MachineBasicBlock::iterator mi,
212                                             unsigned MIIdx,
213                                             LiveInterval &interval) {
214  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
215  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
216
217  // Virtual registers may be defined multiple times (due to phi
218  // elimination and 2-addr elimination).  Much of what we do only has to be
219  // done once for the vreg.  We use an empty interval to detect the first
220  // time we see a vreg.
221  if (interval.empty()) {
222    // Get the Idx of the defining instructions.
223    unsigned defIndex = getDefIndex(MIIdx);
224    VNInfo *ValNo;
225    unsigned SrcReg, DstReg;
226    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
227      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
228    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
229      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
230                                    VNInfoAllocator);
231    else
232      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
233
234    assert(ValNo->id == 0 && "First value in interval is not 0?");
235
236    // Loop over all of the blocks that the vreg is defined in.  There are
237    // two cases we have to handle here.  The most common case is a vreg
238    // whose lifetime is contained within a basic block.  In this case there
239    // will be a single kill, in MBB, which comes after the definition.
240    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
241      // FIXME: what about dead vars?
242      unsigned killIdx;
243      if (vi.Kills[0] != mi)
244        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
245      else
246        killIdx = defIndex+1;
247
248      // If the kill happens after the definition, we have an intra-block
249      // live range.
250      if (killIdx > defIndex) {
251        assert(vi.AliveBlocks.none() &&
252               "Shouldn't be alive across any blocks!");
253        LiveRange LR(defIndex, killIdx, ValNo);
254        interval.addRange(LR);
255        DOUT << " +" << LR << "\n";
256        interval.addKill(ValNo, killIdx);
257        return;
258      }
259    }
260
261    // The other case we handle is when a virtual register lives to the end
262    // of the defining block, potentially live across some blocks, then is
263    // live into some number of blocks, but gets killed.  Start by adding a
264    // range that goes from this definition to the end of the defining block.
265    LiveRange NewLR(defIndex,
266                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
267                    ValNo);
268    DOUT << " +" << NewLR;
269    interval.addRange(NewLR);
270
271    // Iterate over all of the blocks that the variable is completely
272    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
273    // live interval.
274    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
275      if (vi.AliveBlocks[i]) {
276        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
277        if (!MBB->empty()) {
278          LiveRange LR(getMBBStartIdx(i),
279                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
280                       ValNo);
281          interval.addRange(LR);
282          DOUT << " +" << LR;
283        }
284      }
285    }
286
287    // Finally, this virtual register is live from the start of any killing
288    // block to the 'use' slot of the killing instruction.
289    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
290      MachineInstr *Kill = vi.Kills[i];
291      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
292      LiveRange LR(getMBBStartIdx(Kill->getParent()),
293                   killIdx, ValNo);
294      interval.addRange(LR);
295      interval.addKill(ValNo, killIdx);
296      DOUT << " +" << LR;
297    }
298
299  } else {
300    // If this is the second time we see a virtual register definition, it
301    // must be due to phi elimination or two addr elimination.  If this is
302    // the result of two address elimination, then the vreg is one of the
303    // def-and-use register operand.
304    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
305      // If this is a two-address definition, then we have already processed
306      // the live range.  The only problem is that we didn't realize there
307      // are actually two values in the live interval.  Because of this we
308      // need to take the LiveRegion that defines this register and split it
309      // into two values.
310      unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst));
311      unsigned RedefIndex = getDefIndex(MIIdx);
312
313      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
314      VNInfo *OldValNo = OldLR->valno;
315      unsigned OldEnd = OldLR->end;
316
317      // Delete the initial value, which should be short and continuous,
318      // because the 2-addr copy must be in the same MBB as the redef.
319      interval.removeRange(DefIndex, RedefIndex);
320
321      // Two-address vregs should always only be redefined once.  This means
322      // that at this point, there should be exactly one value number in it.
323      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
324
325      // The new value number (#1) is defined by the instruction we claimed
326      // defined value #0.
327      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
328      interval.copyValNumInfo(ValNo, OldValNo);
329
330      // Value#0 is now defined by the 2-addr instruction.
331      OldValNo->def = RedefIndex;
332      OldValNo->reg = 0;
333
334      // Add the new live interval which replaces the range for the input copy.
335      LiveRange LR(DefIndex, RedefIndex, ValNo);
336      DOUT << " replace range with " << LR;
337      interval.addRange(LR);
338      interval.addKill(ValNo, RedefIndex);
339      interval.removeKills(ValNo, RedefIndex, OldEnd);
340
341      // If this redefinition is dead, we need to add a dummy unit live
342      // range covering the def slot.
343      if (lv_->RegisterDefIsDead(mi, interval.reg))
344        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
345
346      DOUT << " RESULT: ";
347      interval.print(DOUT, mri_);
348
349    } else {
350      // Otherwise, this must be because of phi elimination.  If this is the
351      // first redefinition of the vreg that we have seen, go back and change
352      // the live range in the PHI block to be a different value number.
353      if (interval.containsOneValue()) {
354        assert(vi.Kills.size() == 1 &&
355               "PHI elimination vreg should have one kill, the PHI itself!");
356
357        // Remove the old range that we now know has an incorrect number.
358        VNInfo *VNI = interval.getValNumInfo(0);
359        MachineInstr *Killer = vi.Kills[0];
360        unsigned Start = getMBBStartIdx(Killer->getParent());
361        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
362        DOUT << " Removing [" << Start << "," << End << "] from: ";
363        interval.print(DOUT, mri_); DOUT << "\n";
364        interval.removeRange(Start, End);
365        interval.addKill(VNI, Start);
366        VNI->hasPHIKill = true;
367        DOUT << " RESULT: "; interval.print(DOUT, mri_);
368
369        // Replace the interval with one of a NEW value number.  Note that this
370        // value number isn't actually defined by an instruction, weird huh? :)
371        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
372        DOUT << " replace range with " << LR;
373        interval.addRange(LR);
374        interval.addKill(LR.valno, End);
375        DOUT << " RESULT: "; interval.print(DOUT, mri_);
376      }
377
378      // In the case of PHI elimination, each variable definition is only
379      // live until the end of the block.  We've already taken care of the
380      // rest of the live range.
381      unsigned defIndex = getDefIndex(MIIdx);
382
383      VNInfo *ValNo;
384      unsigned SrcReg, DstReg;
385      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
386        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
387      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
388        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
389                                      VNInfoAllocator);
390      else
391        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
392
393      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
394      LiveRange LR(defIndex, killIndex, ValNo);
395      interval.addRange(LR);
396      interval.addKill(ValNo, killIndex);
397      ValNo->hasPHIKill = true;
398      DOUT << " +" << LR;
399    }
400  }
401
402  DOUT << '\n';
403}
404
405void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
406                                              MachineBasicBlock::iterator mi,
407                                              unsigned MIIdx,
408                                              LiveInterval &interval,
409                                              unsigned SrcReg) {
410  // A physical register cannot be live across basic block, so its
411  // lifetime must end somewhere in its defining basic block.
412  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
413
414  unsigned baseIndex = MIIdx;
415  unsigned start = getDefIndex(baseIndex);
416  unsigned end = start;
417
418  // If it is not used after definition, it is considered dead at
419  // the instruction defining it. Hence its interval is:
420  // [defSlot(def), defSlot(def)+1)
421  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
422    DOUT << " dead";
423    end = getDefIndex(start) + 1;
424    goto exit;
425  }
426
427  // If it is not dead on definition, it must be killed by a
428  // subsequent instruction. Hence its interval is:
429  // [defSlot(def), useSlot(kill)+1)
430  while (++mi != MBB->end()) {
431    baseIndex += InstrSlots::NUM;
432    if (lv_->KillsRegister(mi, interval.reg)) {
433      DOUT << " killed";
434      end = getUseIndex(baseIndex) + 1;
435      goto exit;
436    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
437      // Another instruction redefines the register before it is ever read.
438      // Then the register is essentially dead at the instruction that defines
439      // it. Hence its interval is:
440      // [defSlot(def), defSlot(def)+1)
441      DOUT << " dead";
442      end = getDefIndex(start) + 1;
443      goto exit;
444    }
445  }
446
447  // The only case we should have a dead physreg here without a killing or
448  // instruction where we know it's dead is if it is live-in to the function
449  // and never used.
450  assert(!SrcReg && "physreg was not killed in defining block!");
451  end = getDefIndex(start) + 1;  // It's dead.
452
453exit:
454  assert(start < end && "did not find end of interval?");
455
456  // Already exists? Extend old live interval.
457  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
458  VNInfo *ValNo = (OldLR != interval.end())
459    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
460  LiveRange LR(start, end, ValNo);
461  interval.addRange(LR);
462  interval.addKill(LR.valno, end);
463  DOUT << " +" << LR << '\n';
464}
465
466void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
467                                      MachineBasicBlock::iterator MI,
468                                      unsigned MIIdx,
469                                      unsigned reg) {
470  if (MRegisterInfo::isVirtualRegister(reg))
471    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
472  else if (allocatableRegs_[reg]) {
473    unsigned SrcReg, DstReg;
474    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
475      SrcReg = MI->getOperand(1).getReg();
476    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
477      SrcReg = 0;
478    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
479    // Def of a register also defines its sub-registers.
480    for (const unsigned* AS = mri_->getSubRegisters(reg); *AS; ++AS)
481      // Avoid processing some defs more than once.
482      if (!MI->findRegisterDefOperand(*AS))
483        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
484  }
485}
486
487void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
488                                         unsigned MIIdx,
489                                         LiveInterval &interval, bool isAlias) {
490  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
491
492  // Look for kills, if it reaches a def before it's killed, then it shouldn't
493  // be considered a livein.
494  MachineBasicBlock::iterator mi = MBB->begin();
495  unsigned baseIndex = MIIdx;
496  unsigned start = baseIndex;
497  unsigned end = start;
498  while (mi != MBB->end()) {
499    if (lv_->KillsRegister(mi, interval.reg)) {
500      DOUT << " killed";
501      end = getUseIndex(baseIndex) + 1;
502      goto exit;
503    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
504      // Another instruction redefines the register before it is ever read.
505      // Then the register is essentially dead at the instruction that defines
506      // it. Hence its interval is:
507      // [defSlot(def), defSlot(def)+1)
508      DOUT << " dead";
509      end = getDefIndex(start) + 1;
510      goto exit;
511    }
512
513    baseIndex += InstrSlots::NUM;
514    ++mi;
515  }
516
517exit:
518  // Live-in register might not be used at all.
519  if (end == MIIdx) {
520    if (isAlias) {
521      DOUT << " dead";
522      end = getDefIndex(MIIdx) + 1;
523    } else {
524      DOUT << " live through";
525      end = baseIndex;
526    }
527  }
528
529  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
530  interval.addRange(LR);
531  interval.addKill(LR.valno, end);
532  DOUT << " +" << LR << '\n';
533}
534
535/// computeIntervals - computes the live intervals for virtual
536/// registers. for some ordering of the machine instructions [1,N] a
537/// live interval is an interval [i, j) where 1 <= i <= j < N for
538/// which a variable is live
539void LiveIntervals::computeIntervals() {
540  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
541       << "********** Function: "
542       << ((Value*)mf_->getFunction())->getName() << '\n';
543  // Track the index of the current machine instr.
544  unsigned MIIndex = 0;
545  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
546       MBBI != E; ++MBBI) {
547    MachineBasicBlock *MBB = MBBI;
548    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
549
550    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
551
552    // Create intervals for live-ins to this BB first.
553    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
554           LE = MBB->livein_end(); LI != LE; ++LI) {
555      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
556      // Multiple live-ins can alias the same register.
557      for (const unsigned* AS = mri_->getSubRegisters(*LI); *AS; ++AS)
558        if (!hasInterval(*AS))
559          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
560                               true);
561    }
562
563    for (; MI != miEnd; ++MI) {
564      DOUT << MIIndex << "\t" << *MI;
565
566      // Handle defs.
567      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
568        MachineOperand &MO = MI->getOperand(i);
569        // handle register defs - build intervals
570        if (MO.isRegister() && MO.getReg() && MO.isDef())
571          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
572      }
573
574      MIIndex += InstrSlots::NUM;
575    }
576  }
577}
578
579bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
580                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
581  std::vector<IdxMBBPair>::const_iterator I =
582    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
583
584  bool ResVal = false;
585  while (I != Idx2MBBMap.end()) {
586    if (LR.end <= I->first)
587      break;
588    MBBs.push_back(I->second);
589    ResVal = true;
590    ++I;
591  }
592  return ResVal;
593}
594
595
596LiveInterval LiveIntervals::createInterval(unsigned reg) {
597  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
598                       HUGE_VALF : 0.0F;
599  return LiveInterval(reg, Weight);
600}
601
602
603//===----------------------------------------------------------------------===//
604// Register allocator hooks.
605//
606
607/// isReMaterializable - Returns true if the definition MI of the specified
608/// val# of the specified interval is re-materializable.
609bool LiveIntervals::isReMaterializable(const LiveInterval &li,
610                                       const VNInfo *ValNo, MachineInstr *MI) {
611  if (DisableReMat)
612    return false;
613
614  if (tii_->isTriviallyReMaterializable(MI))
615    return true;
616
617  int FrameIdx = 0;
618  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
619      !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
620    return false;
621
622  // This is a load from fixed stack slot. It can be rematerialized unless it's
623  // re-defined by a two-address instruction.
624  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
625       i != e; ++i) {
626    const VNInfo *VNI = *i;
627    if (VNI == ValNo)
628      continue;
629    unsigned DefIdx = VNI->def;
630    if (DefIdx == ~1U)
631      continue; // Dead val#.
632    MachineInstr *DefMI = (DefIdx == ~0u)
633      ? NULL : getInstructionFromIndex(DefIdx);
634    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg))
635      return false;
636  }
637  return true;
638}
639
640/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
641/// slot / to reg or any rematerialized load into ith operand of specified
642/// MI. If it is successul, MI is updated with the newly created MI and
643/// returns true.
644bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
645                                         VirtRegMap &vrm, MachineInstr *DefMI,
646                                         unsigned InstrIdx,
647                                         SmallVector<unsigned, 2> &Ops,
648                                         bool isSS, int Slot, unsigned Reg) {
649  unsigned MRInfo = 0;
650  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
651  SmallVector<unsigned, 2> FoldOps;
652  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
653    unsigned OpIdx = Ops[i];
654    // FIXME: fold subreg use.
655    if (MI->getOperand(OpIdx).getSubReg())
656      return false;
657    if (MI->getOperand(OpIdx).isDef())
658      MRInfo |= (unsigned)VirtRegMap::isMod;
659    else {
660      // Filter out two-address use operand(s).
661      if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
662        MRInfo = VirtRegMap::isModRef;
663        continue;
664      }
665      MRInfo |= (unsigned)VirtRegMap::isRef;
666    }
667    FoldOps.push_back(OpIdx);
668  }
669
670  MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
671                           : mri_->foldMemoryOperand(MI, FoldOps, DefMI);
672  if (fmi) {
673    // Attempt to fold the memory reference into the instruction. If
674    // we can do this, we don't need to insert spill code.
675    if (lv_)
676      lv_->instructionChanged(MI, fmi);
677    else
678      LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
679    MachineBasicBlock &MBB = *MI->getParent();
680    if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
681      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
682    vrm.transferSpillPts(MI, fmi);
683    vrm.transferRestorePts(MI, fmi);
684    mi2iMap_.erase(MI);
685    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
686    mi2iMap_[fmi] = InstrIdx;
687    MI = MBB.insert(MBB.erase(MI), fmi);
688    ++numFolds;
689    return true;
690  }
691  return false;
692}
693
694/// canFoldMemoryOperand - Returns true if the specified load / store
695/// folding is possible.
696bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
697                                         SmallVector<unsigned, 2> &Ops) const {
698  SmallVector<unsigned, 2> FoldOps;
699  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
700    unsigned OpIdx = Ops[i];
701    // FIXME: fold subreg use.
702    if (MI->getOperand(OpIdx).getSubReg())
703      return false;
704    FoldOps.push_back(OpIdx);
705  }
706
707  return mri_->canFoldMemoryOperand(MI, FoldOps);
708}
709
710bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
711  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
712  for (LiveInterval::Ranges::const_iterator
713         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
714    std::vector<IdxMBBPair>::const_iterator II =
715      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
716    if (II == Idx2MBBMap.end())
717      continue;
718    if (I->end > II->first)  // crossing a MBB.
719      return false;
720    MBBs.insert(II->second);
721    if (MBBs.size() > 1)
722      return false;
723  }
724  return true;
725}
726
727/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
728/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
729bool LiveIntervals::
730rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
731                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
732                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
733                 unsigned Slot, int LdSlot,
734                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
735                 VirtRegMap &vrm, SSARegMap *RegMap,
736                 const TargetRegisterClass* rc,
737                 SmallVector<int, 4> &ReMatIds,
738                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
739                 const LoopInfo *loopInfo,
740                 std::map<unsigned,unsigned> &MBBVRegsMap,
741                 std::vector<LiveInterval*> &NewLIs) {
742  bool CanFold = false;
743 RestartInstruction:
744  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
745    MachineOperand& mop = MI->getOperand(i);
746    if (!mop.isRegister())
747      continue;
748    unsigned Reg = mop.getReg();
749    unsigned RegI = Reg;
750    if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg))
751      continue;
752    if (Reg != li.reg)
753      continue;
754
755    bool TryFold = !DefIsReMat;
756    bool FoldSS = true; // Default behavior unless it's a remat.
757    int FoldSlot = Slot;
758    if (DefIsReMat) {
759      // If this is the rematerializable definition MI itself and
760      // all of its uses are rematerialized, simply delete it.
761      if (MI == ReMatOrigDefMI && CanDelete) {
762        DOUT << "\t\t\t\tErasing re-materlizable def: ";
763        DOUT << MI << '\n';
764        RemoveMachineInstrFromMaps(MI);
765        vrm.RemoveMachineInstrFromMaps(MI);
766        MI->eraseFromParent();
767        break;
768      }
769
770      // If def for this use can't be rematerialized, then try folding.
771      // If def is rematerializable and it's a load, also try folding.
772      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
773      if (isLoad) {
774        // Try fold loads (from stack slot, constant pool, etc.) into uses.
775        FoldSS = isLoadSS;
776        FoldSlot = LdSlot;
777      }
778    }
779
780    // Scan all of the operands of this instruction rewriting operands
781    // to use NewVReg instead of li.reg as appropriate.  We do this for
782    // two reasons:
783    //
784    //   1. If the instr reads the same spilled vreg multiple times, we
785    //      want to reuse the NewVReg.
786    //   2. If the instr is a two-addr instruction, we are required to
787    //      keep the src/dst regs pinned.
788    //
789    // Keep track of whether we replace a use and/or def so that we can
790    // create the spill interval with the appropriate range.
791
792    HasUse = mop.isUse();
793    HasDef = mop.isDef();
794    SmallVector<unsigned, 2> Ops;
795    Ops.push_back(i);
796    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
797      const MachineOperand &MOj = MI->getOperand(j);
798      if (!MOj.isRegister())
799        continue;
800      unsigned RegJ = MOj.getReg();
801      if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
802        continue;
803      if (RegJ == RegI) {
804        Ops.push_back(j);
805        HasUse |= MOj.isUse();
806        HasDef |= MOj.isDef();
807      }
808    }
809
810    if (TryFold) {
811      // Do not fold load / store here if we are splitting. We'll find an
812      // optimal point to insert a load / store later.
813      if (!TrySplit) {
814        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
815                                 Ops, FoldSS, FoldSlot, Reg)) {
816          // Folding the load/store can completely change the instruction in
817          // unpredictable ways, rescan it from the beginning.
818          HasUse = false;
819          HasDef = false;
820          CanFold = false;
821          goto RestartInstruction;
822        }
823      } else {
824        CanFold = canFoldMemoryOperand(MI, Ops);
825      }
826    } else CanFold = false;
827
828    // Create a new virtual register for the spill interval.
829    bool CreatedNewVReg = false;
830    if (NewVReg == 0) {
831      NewVReg = RegMap->createVirtualRegister(rc);
832      vrm.grow();
833      CreatedNewVReg = true;
834    }
835    mop.setReg(NewVReg);
836
837    // Reuse NewVReg for other reads.
838    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
839      MI->getOperand(Ops[j]).setReg(NewVReg);
840
841    if (CreatedNewVReg) {
842      if (DefIsReMat) {
843        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
844        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
845          // Each valnum may have its own remat id.
846          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
847        } else {
848          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
849        }
850        if (!CanDelete || (HasUse && HasDef)) {
851          // If this is a two-addr instruction then its use operands are
852          // rematerializable but its def is not. It should be assigned a
853          // stack slot.
854          vrm.assignVirt2StackSlot(NewVReg, Slot);
855        }
856      } else {
857        vrm.assignVirt2StackSlot(NewVReg, Slot);
858      }
859    } else if (HasUse && HasDef &&
860               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
861      // If this interval hasn't been assigned a stack slot (because earlier
862      // def is a deleted remat def), do it now.
863      assert(Slot != VirtRegMap::NO_STACK_SLOT);
864      vrm.assignVirt2StackSlot(NewVReg, Slot);
865    }
866
867    // create a new register interval for this spill / remat.
868    LiveInterval &nI = getOrCreateInterval(NewVReg);
869    if (CreatedNewVReg) {
870      NewLIs.push_back(&nI);
871      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
872      if (TrySplit)
873        vrm.setIsSplitFromReg(NewVReg, li.reg);
874    }
875
876    if (HasUse) {
877      if (CreatedNewVReg) {
878        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
879                     nI.getNextValue(~0U, 0, VNInfoAllocator));
880        DOUT << " +" << LR;
881        nI.addRange(LR);
882      } else {
883        // Extend the split live interval to this def / use.
884        unsigned End = getUseIndex(index)+1;
885        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
886                     nI.getValNumInfo(nI.getNumValNums()-1));
887        DOUT << " +" << LR;
888        nI.addRange(LR);
889      }
890    }
891    if (HasDef) {
892      LiveRange LR(getDefIndex(index), getStoreIndex(index),
893                   nI.getNextValue(~0U, 0, VNInfoAllocator));
894      DOUT << " +" << LR;
895      nI.addRange(LR);
896    }
897
898    DOUT << "\t\t\t\tAdded new interval: ";
899    nI.print(DOUT, mri_);
900    DOUT << '\n';
901  }
902  return CanFold;
903}
904bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
905                                   const VNInfo *VNI,
906                                   MachineBasicBlock *MBB, unsigned Idx) const {
907  unsigned End = getMBBEndIdx(MBB);
908  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
909    unsigned KillIdx = VNI->kills[j];
910    if (KillIdx > Idx && KillIdx < End)
911      return true;
912  }
913  return false;
914}
915
916static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
917  const VNInfo *VNI = NULL;
918  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
919         e = li.vni_end(); i != e; ++i)
920    if ((*i)->def == DefIdx) {
921      VNI = *i;
922      break;
923    }
924  return VNI;
925}
926
927void LiveIntervals::
928rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
929                    LiveInterval::Ranges::const_iterator &I,
930                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
931                    unsigned Slot, int LdSlot,
932                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
933                    VirtRegMap &vrm, SSARegMap *RegMap,
934                    const TargetRegisterClass* rc,
935                    SmallVector<int, 4> &ReMatIds,
936                    const LoopInfo *loopInfo,
937                    BitVector &SpillMBBs,
938                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
939                    BitVector &RestoreMBBs,
940                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
941                    std::map<unsigned,unsigned> &MBBVRegsMap,
942                    std::vector<LiveInterval*> &NewLIs) {
943  bool AllCanFold = true;
944  unsigned NewVReg = 0;
945  unsigned index = getBaseIndex(I->start);
946  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
947  for (; index != end; index += InstrSlots::NUM) {
948    // skip deleted instructions
949    while (index != end && !getInstructionFromIndex(index))
950      index += InstrSlots::NUM;
951    if (index == end) break;
952
953    MachineInstr *MI = getInstructionFromIndex(index);
954    MachineBasicBlock *MBB = MI->getParent();
955    unsigned ThisVReg = 0;
956    if (TrySplit) {
957      std::map<unsigned,unsigned>::const_iterator NVI =
958        MBBVRegsMap.find(MBB->getNumber());
959      if (NVI != MBBVRegsMap.end()) {
960        ThisVReg = NVI->second;
961        // One common case:
962        // x = use
963        // ...
964        // ...
965        // def = ...
966        //     = use
967        // It's better to start a new interval to avoid artifically
968        // extend the new interval.
969        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
970        bool MIHasUse = false;
971        bool MIHasDef = false;
972        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
973          MachineOperand& mop = MI->getOperand(i);
974          if (!mop.isRegister() || mop.getReg() != li.reg)
975            continue;
976          if (mop.isUse())
977            MIHasUse = true;
978          else
979            MIHasDef = true;
980        }
981        if (MIHasDef && !MIHasUse) {
982          MBBVRegsMap.erase(MBB->getNumber());
983          ThisVReg = 0;
984        }
985      }
986    }
987
988    bool IsNew = ThisVReg == 0;
989    if (IsNew) {
990      // This ends the previous live interval. If all of its def / use
991      // can be folded, give it a low spill weight.
992      if (NewVReg && TrySplit && AllCanFold) {
993        LiveInterval &nI = getOrCreateInterval(NewVReg);
994        nI.weight /= 10.0F;
995      }
996      AllCanFold = true;
997    }
998    NewVReg = ThisVReg;
999
1000    bool HasDef = false;
1001    bool HasUse = false;
1002    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1003                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1004                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1005                                CanDelete, vrm, RegMap, rc, ReMatIds, NewVReg,
1006                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1007    if (!HasDef && !HasUse)
1008      continue;
1009
1010    AllCanFold &= CanFold;
1011
1012    // Update weight of spill interval.
1013    LiveInterval &nI = getOrCreateInterval(NewVReg);
1014    if (!TrySplit) {
1015      // The spill weight is now infinity as it cannot be spilled again.
1016      nI.weight = HUGE_VALF;
1017      continue;
1018    }
1019
1020    // Keep track of the last def and first use in each MBB.
1021    unsigned MBBId = MBB->getNumber();
1022    if (HasDef) {
1023      if (MI != ReMatOrigDefMI || !CanDelete) {
1024        bool HasKill = false;
1025        if (!HasUse)
1026          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1027        else {
1028          // If this is a two-address code, then this index starts a new VNInfo.
1029          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1030          if (VNI)
1031            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1032        }
1033        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1034          SpillIdxes.find(MBBId);
1035        if (!HasKill) {
1036          if (SII == SpillIdxes.end()) {
1037            std::vector<SRInfo> S;
1038            S.push_back(SRInfo(index, NewVReg, true));
1039            SpillIdxes.insert(std::make_pair(MBBId, S));
1040          } else if (SII->second.back().vreg != NewVReg) {
1041            SII->second.push_back(SRInfo(index, NewVReg, true));
1042          } else if ((int)index > SII->second.back().index) {
1043            // If there is an earlier def and this is a two-address
1044            // instruction, then it's not possible to fold the store (which
1045            // would also fold the load).
1046            SRInfo &Info = SII->second.back();
1047            Info.index = index;
1048            Info.canFold = !HasUse;
1049          }
1050          SpillMBBs.set(MBBId);
1051        } else if (SII != SpillIdxes.end() &&
1052                   SII->second.back().vreg == NewVReg &&
1053                   (int)index > SII->second.back().index) {
1054          // There is an earlier def that's not killed (must be two-address).
1055          // The spill is no longer needed.
1056          SII->second.pop_back();
1057          if (SII->second.empty()) {
1058            SpillIdxes.erase(MBBId);
1059            SpillMBBs.reset(MBBId);
1060          }
1061        }
1062      }
1063    }
1064
1065    if (HasUse) {
1066      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1067        SpillIdxes.find(MBBId);
1068      if (SII != SpillIdxes.end() &&
1069          SII->second.back().vreg == NewVReg &&
1070          (int)index > SII->second.back().index)
1071        // Use(s) following the last def, it's not safe to fold the spill.
1072        SII->second.back().canFold = false;
1073      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1074        RestoreIdxes.find(MBBId);
1075      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1076        // If we are splitting live intervals, only fold if it's the first
1077        // use and there isn't another use later in the MBB.
1078        RII->second.back().canFold = false;
1079      else if (IsNew) {
1080        // Only need a reload if there isn't an earlier def / use.
1081        if (RII == RestoreIdxes.end()) {
1082          std::vector<SRInfo> Infos;
1083          Infos.push_back(SRInfo(index, NewVReg, true));
1084          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1085        } else {
1086          RII->second.push_back(SRInfo(index, NewVReg, true));
1087        }
1088        RestoreMBBs.set(MBBId);
1089      }
1090    }
1091
1092    // Update spill weight.
1093    unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
1094    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1095  }
1096
1097  if (NewVReg && TrySplit && AllCanFold) {
1098    // If all of its def / use can be folded, give it a low spill weight.
1099    LiveInterval &nI = getOrCreateInterval(NewVReg);
1100    nI.weight /= 10.0F;
1101  }
1102}
1103
1104bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1105                        BitVector &RestoreMBBs,
1106                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1107  if (!RestoreMBBs[Id])
1108    return false;
1109  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1110  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1111    if (Restores[i].index == index &&
1112        Restores[i].vreg == vr &&
1113        Restores[i].canFold)
1114      return true;
1115  return false;
1116}
1117
1118void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1119                        BitVector &RestoreMBBs,
1120                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1121  if (!RestoreMBBs[Id])
1122    return;
1123  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1124  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1125    if (Restores[i].index == index && Restores[i].vreg)
1126      Restores[i].index = -1;
1127}
1128
1129
1130std::vector<LiveInterval*> LiveIntervals::
1131addIntervalsForSpills(const LiveInterval &li,
1132                      const LoopInfo *loopInfo, VirtRegMap &vrm) {
1133  // Since this is called after the analysis is done we don't know if
1134  // LiveVariables is available
1135  lv_ = getAnalysisToUpdate<LiveVariables>();
1136
1137  assert(li.weight != HUGE_VALF &&
1138         "attempt to spill already spilled interval!");
1139
1140  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1141  li.print(DOUT, mri_);
1142  DOUT << '\n';
1143
1144  // Each bit specify whether it a spill is required in the MBB.
1145  BitVector SpillMBBs(mf_->getNumBlockIDs());
1146  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1147  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1148  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1149  std::map<unsigned,unsigned> MBBVRegsMap;
1150  std::vector<LiveInterval*> NewLIs;
1151  SSARegMap *RegMap = mf_->getSSARegMap();
1152  const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
1153
1154  unsigned NumValNums = li.getNumValNums();
1155  SmallVector<MachineInstr*, 4> ReMatDefs;
1156  ReMatDefs.resize(NumValNums, NULL);
1157  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1158  ReMatOrigDefs.resize(NumValNums, NULL);
1159  SmallVector<int, 4> ReMatIds;
1160  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1161  BitVector ReMatDelete(NumValNums);
1162  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1163
1164  // Spilling a split live interval. It cannot be split any further. Also,
1165  // it's also guaranteed to be a single val# / range interval.
1166  if (vrm.getPreSplitReg(li.reg)) {
1167    vrm.setIsSplitFromReg(li.reg, 0);
1168    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1169    Slot = vrm.getStackSlot(li.reg);
1170    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1171    MachineInstr *ReMatDefMI = DefIsReMat ?
1172      vrm.getReMaterializedMI(li.reg) : NULL;
1173    int LdSlot = 0;
1174    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1175    bool isLoad = isLoadSS ||
1176      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1177    bool IsFirstRange = true;
1178    for (LiveInterval::Ranges::const_iterator
1179           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1180      // If this is a split live interval with multiple ranges, it means there
1181      // are two-address instructions that re-defined the value. Only the
1182      // first def can be rematerialized!
1183      if (IsFirstRange) {
1184        // Note ReMatOrigDefMI has already been deleted.
1185        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1186                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1187                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1188                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1189                             MBBVRegsMap, NewLIs);
1190      } else {
1191        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1192                             Slot, 0, false, false, false,
1193                             false, vrm, RegMap, rc, ReMatIds, loopInfo,
1194                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1195                             MBBVRegsMap, NewLIs);
1196      }
1197      IsFirstRange = false;
1198    }
1199    return NewLIs;
1200  }
1201
1202  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1203  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1204    TrySplit = false;
1205  if (TrySplit)
1206    ++numSplits;
1207  bool NeedStackSlot = false;
1208  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1209       i != e; ++i) {
1210    const VNInfo *VNI = *i;
1211    unsigned VN = VNI->id;
1212    unsigned DefIdx = VNI->def;
1213    if (DefIdx == ~1U)
1214      continue; // Dead val#.
1215    // Is the def for the val# rematerializable?
1216    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1217      ? 0 : getInstructionFromIndex(DefIdx);
1218    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
1219      // Remember how to remat the def of this val#.
1220      ReMatOrigDefs[VN] = ReMatDefMI;
1221      // Original def may be modified so we have to make a copy here. vrm must
1222      // delete these!
1223      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1224      vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
1225
1226      bool CanDelete = true;
1227      if (VNI->hasPHIKill) {
1228        // A kill is a phi node, not all of its uses can be rematerialized.
1229        // It must not be deleted.
1230        CanDelete = false;
1231        // Need a stack slot if there is any live range where uses cannot be
1232        // rematerialized.
1233        NeedStackSlot = true;
1234      }
1235      if (CanDelete)
1236        ReMatDelete.set(VN);
1237    } else {
1238      // Need a stack slot if there is any live range where uses cannot be
1239      // rematerialized.
1240      NeedStackSlot = true;
1241    }
1242  }
1243
1244  // One stack slot per live interval.
1245  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1246    Slot = vrm.assignVirt2StackSlot(li.reg);
1247
1248  // Create new intervals and rewrite defs and uses.
1249  for (LiveInterval::Ranges::const_iterator
1250         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1251    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1252    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1253    bool DefIsReMat = ReMatDefMI != NULL;
1254    bool CanDelete = ReMatDelete[I->valno->id];
1255    int LdSlot = 0;
1256    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1257    bool isLoad = isLoadSS ||
1258      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
1259    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1260                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1261                               CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
1262                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1263                               MBBVRegsMap, NewLIs);
1264  }
1265
1266  // Insert spills / restores if we are splitting.
1267  if (!TrySplit)
1268    return NewLIs;
1269
1270  SmallPtrSet<LiveInterval*, 4> AddedKill;
1271  SmallVector<unsigned, 2> Ops;
1272  if (NeedStackSlot) {
1273    int Id = SpillMBBs.find_first();
1274    while (Id != -1) {
1275      std::vector<SRInfo> &spills = SpillIdxes[Id];
1276      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1277        int index = spills[i].index;
1278        unsigned VReg = spills[i].vreg;
1279        LiveInterval &nI = getOrCreateInterval(VReg);
1280        bool isReMat = vrm.isReMaterialized(VReg);
1281        MachineInstr *MI = getInstructionFromIndex(index);
1282        bool CanFold = false;
1283        bool FoundUse = false;
1284        Ops.clear();
1285        if (spills[i].canFold) {
1286          CanFold = true;
1287          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1288            MachineOperand &MO = MI->getOperand(j);
1289            if (!MO.isRegister() || MO.getReg() != VReg)
1290              continue;
1291
1292            Ops.push_back(j);
1293            if (MO.isDef())
1294              continue;
1295            if (isReMat ||
1296                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1297                                                RestoreMBBs, RestoreIdxes))) {
1298              // MI has two-address uses of the same register. If the use
1299              // isn't the first and only use in the BB, then we can't fold
1300              // it. FIXME: Move this to rewriteInstructionsForSpills.
1301              CanFold = false;
1302              break;
1303            }
1304            FoundUse = true;
1305          }
1306        }
1307        // Fold the store into the def if possible.
1308        bool Folded = false;
1309        if (CanFold && !Ops.empty()) {
1310          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1311            Folded = true;
1312            if (FoundUse > 0)
1313              // Also folded uses, do not issue a load.
1314              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1315            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1316          }
1317        }
1318
1319        // Else tell the spiller to issue a spill.
1320        if (!Folded) {
1321          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1322          bool isKill = LR->end == getStoreIndex(index);
1323          vrm.addSpillPoint(VReg, isKill, MI);
1324          if (isKill)
1325            AddedKill.insert(&nI);
1326        }
1327      }
1328      Id = SpillMBBs.find_next(Id);
1329    }
1330  }
1331
1332  int Id = RestoreMBBs.find_first();
1333  while (Id != -1) {
1334    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1335    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1336      int index = restores[i].index;
1337      if (index == -1)
1338        continue;
1339      unsigned VReg = restores[i].vreg;
1340      LiveInterval &nI = getOrCreateInterval(VReg);
1341      MachineInstr *MI = getInstructionFromIndex(index);
1342      bool CanFold = false;
1343      Ops.clear();
1344      if (restores[i].canFold) {
1345        CanFold = true;
1346        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1347          MachineOperand &MO = MI->getOperand(j);
1348          if (!MO.isRegister() || MO.getReg() != VReg)
1349            continue;
1350
1351          if (MO.isDef()) {
1352            // If this restore were to be folded, it would have been folded
1353            // already.
1354            CanFold = false;
1355            break;
1356          }
1357          Ops.push_back(j);
1358        }
1359      }
1360
1361      // Fold the load into the use if possible.
1362      bool Folded = false;
1363      if (CanFold && !Ops.empty()) {
1364        if (!vrm.isReMaterialized(VReg))
1365          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1366        else {
1367          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1368          int LdSlot = 0;
1369          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1370          // If the rematerializable def is a load, also try to fold it.
1371          if (isLoadSS ||
1372              (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
1373            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1374                                          Ops, isLoadSS, LdSlot, VReg);
1375        }
1376      }
1377      // If folding is not possible / failed, then tell the spiller to issue a
1378      // load / rematerialization for us.
1379      if (Folded)
1380        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1381      else
1382        vrm.addRestorePoint(VReg, MI);
1383    }
1384    Id = RestoreMBBs.find_next(Id);
1385  }
1386
1387  // Finalize intervals: add kills, finalize spill weights, and filter out
1388  // dead intervals.
1389  std::vector<LiveInterval*> RetNewLIs;
1390  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1391    LiveInterval *LI = NewLIs[i];
1392    if (!LI->empty()) {
1393      LI->weight /= LI->getSize();
1394      if (!AddedKill.count(LI)) {
1395        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1396        MachineInstr *LastUse = getInstructionFromIndex(getBaseIndex(LR->end));
1397        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1398        assert(UseIdx != -1);
1399        if (LastUse->getInstrDescriptor()->
1400            getOperandConstraint(UseIdx, TOI::TIED_TO) == -1)
1401          LastUse->getOperand(UseIdx).setIsKill();
1402      }
1403      RetNewLIs.push_back(LI);
1404    }
1405  }
1406
1407  return RetNewLIs;
1408}
1409