LiveIntervalAnalysis.cpp revision c8d044e4f779fdcfc5e7d592927740fd8f672a70
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82namespace llvm {
83  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
84    return V < IM.first;
85  }
86
87  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
88    return IM.first < V;
89  }
90
91  struct Idx2MBBCompare {
92    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
93      return LHS.first < RHS.first;
94    }
95  };
96}
97
98/// runOnMachineFunction - Register allocate the whole function
99///
100bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
101  mf_ = &fn;
102  tm_ = &fn.getTarget();
103  tri_ = tm_->getRegisterInfo();
104  tii_ = tm_->getInstrInfo();
105  lv_ = &getAnalysis<LiveVariables>();
106  allocatableRegs_ = tri_->getAllocatableSet(fn);
107
108  // Number MachineInstrs and MachineBasicBlocks.
109  // Initialize MBB indexes to a sentinal.
110  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111
112  unsigned MIIndex = 0;
113  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114       MBB != E; ++MBB) {
115    unsigned StartIdx = MIIndex;
116
117    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
118         I != E; ++I) {
119      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
120      assert(inserted && "multiple MachineInstr -> index mappings");
121      i2miMap_.push_back(I);
122      MIIndex += InstrSlots::NUM;
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  computeIntervals();
132
133  numIntervals += getNumIntervals();
134
135  DOUT << "********** INTERVALS **********\n";
136  for (iterator I = begin(), E = end(); I != E; ++I) {
137    I->second.print(DOUT, tri_);
138    DOUT << "\n";
139  }
140
141  numIntervalsAfter += getNumIntervals();
142  DEBUG(dump());
143  return true;
144}
145
146/// print - Implement the dump method.
147void LiveIntervals::print(std::ostream &O, const Module* ) const {
148  O << "********** INTERVALS **********\n";
149  for (const_iterator I = begin(), E = end(); I != E; ++I) {
150    I->second.print(DOUT, tri_);
151    DOUT << "\n";
152  }
153
154  O << "********** MACHINEINSTRS **********\n";
155  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
156       mbbi != mbbe; ++mbbi) {
157    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
158    for (MachineBasicBlock::iterator mii = mbbi->begin(),
159           mie = mbbi->end(); mii != mie; ++mii) {
160      O << getInstructionIndex(mii) << '\t' << *mii;
161    }
162  }
163}
164
165/// conflictsWithPhysRegDef - Returns true if the specified register
166/// is defined during the duration of the specified interval.
167bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
168                                            VirtRegMap &vrm, unsigned reg) {
169  for (LiveInterval::Ranges::const_iterator
170         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
171    for (unsigned index = getBaseIndex(I->start),
172           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
173         index += InstrSlots::NUM) {
174      // skip deleted instructions
175      while (index != end && !getInstructionFromIndex(index))
176        index += InstrSlots::NUM;
177      if (index == end) break;
178
179      MachineInstr *MI = getInstructionFromIndex(index);
180      unsigned SrcReg, DstReg;
181      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
182        if (SrcReg == li.reg || DstReg == li.reg)
183          continue;
184      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
185        MachineOperand& mop = MI->getOperand(i);
186        if (!mop.isRegister())
187          continue;
188        unsigned PhysReg = mop.getReg();
189        if (PhysReg == 0 || PhysReg == li.reg)
190          continue;
191        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
192          if (!vrm.hasPhys(PhysReg))
193            continue;
194          PhysReg = vrm.getPhys(PhysReg);
195        }
196        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
197          return true;
198      }
199    }
200  }
201
202  return false;
203}
204
205void LiveIntervals::printRegName(unsigned reg) const {
206  if (TargetRegisterInfo::isPhysicalRegister(reg))
207    cerr << tri_->getName(reg);
208  else
209    cerr << "%reg" << reg;
210}
211
212void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
213                                             MachineBasicBlock::iterator mi,
214                                             unsigned MIIdx,
215                                             LiveInterval &interval) {
216  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
217  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
218
219  // Virtual registers may be defined multiple times (due to phi
220  // elimination and 2-addr elimination).  Much of what we do only has to be
221  // done once for the vreg.  We use an empty interval to detect the first
222  // time we see a vreg.
223  if (interval.empty()) {
224    // Get the Idx of the defining instructions.
225    unsigned defIndex = getDefIndex(MIIdx);
226    VNInfo *ValNo;
227    MachineInstr *CopyMI = NULL;
228    unsigned SrcReg, DstReg;
229    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
230        tii_->isMoveInstr(*mi, SrcReg, DstReg))
231      CopyMI = mi;
232    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
233
234    assert(ValNo->id == 0 && "First value in interval is not 0?");
235
236    // Loop over all of the blocks that the vreg is defined in.  There are
237    // two cases we have to handle here.  The most common case is a vreg
238    // whose lifetime is contained within a basic block.  In this case there
239    // will be a single kill, in MBB, which comes after the definition.
240    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
241      // FIXME: what about dead vars?
242      unsigned killIdx;
243      if (vi.Kills[0] != mi)
244        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
245      else
246        killIdx = defIndex+1;
247
248      // If the kill happens after the definition, we have an intra-block
249      // live range.
250      if (killIdx > defIndex) {
251        assert(vi.AliveBlocks.none() &&
252               "Shouldn't be alive across any blocks!");
253        LiveRange LR(defIndex, killIdx, ValNo);
254        interval.addRange(LR);
255        DOUT << " +" << LR << "\n";
256        interval.addKill(ValNo, killIdx);
257        return;
258      }
259    }
260
261    // The other case we handle is when a virtual register lives to the end
262    // of the defining block, potentially live across some blocks, then is
263    // live into some number of blocks, but gets killed.  Start by adding a
264    // range that goes from this definition to the end of the defining block.
265    LiveRange NewLR(defIndex,
266                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
267                    ValNo);
268    DOUT << " +" << NewLR;
269    interval.addRange(NewLR);
270
271    // Iterate over all of the blocks that the variable is completely
272    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
273    // live interval.
274    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
275      if (vi.AliveBlocks[i]) {
276        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
277        if (!MBB->empty()) {
278          LiveRange LR(getMBBStartIdx(i),
279                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
280                       ValNo);
281          interval.addRange(LR);
282          DOUT << " +" << LR;
283        }
284      }
285    }
286
287    // Finally, this virtual register is live from the start of any killing
288    // block to the 'use' slot of the killing instruction.
289    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
290      MachineInstr *Kill = vi.Kills[i];
291      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
292      LiveRange LR(getMBBStartIdx(Kill->getParent()),
293                   killIdx, ValNo);
294      interval.addRange(LR);
295      interval.addKill(ValNo, killIdx);
296      DOUT << " +" << LR;
297    }
298
299  } else {
300    // If this is the second time we see a virtual register definition, it
301    // must be due to phi elimination or two addr elimination.  If this is
302    // the result of two address elimination, then the vreg is one of the
303    // def-and-use register operand.
304    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
305      // If this is a two-address definition, then we have already processed
306      // the live range.  The only problem is that we didn't realize there
307      // are actually two values in the live interval.  Because of this we
308      // need to take the LiveRegion that defines this register and split it
309      // into two values.
310      assert(interval.containsOneValue());
311      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
312      unsigned RedefIndex = getDefIndex(MIIdx);
313
314      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
315      VNInfo *OldValNo = OldLR->valno;
316
317      // Delete the initial value, which should be short and continuous,
318      // because the 2-addr copy must be in the same MBB as the redef.
319      interval.removeRange(DefIndex, RedefIndex);
320
321      // Two-address vregs should always only be redefined once.  This means
322      // that at this point, there should be exactly one value number in it.
323      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
324
325      // The new value number (#1) is defined by the instruction we claimed
326      // defined value #0.
327      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
328                                            VNInfoAllocator);
329
330      // Value#0 is now defined by the 2-addr instruction.
331      OldValNo->def  = RedefIndex;
332      OldValNo->copy = 0;
333
334      // Add the new live interval which replaces the range for the input copy.
335      LiveRange LR(DefIndex, RedefIndex, ValNo);
336      DOUT << " replace range with " << LR;
337      interval.addRange(LR);
338      interval.addKill(ValNo, RedefIndex);
339
340      // If this redefinition is dead, we need to add a dummy unit live
341      // range covering the def slot.
342      if (lv_->RegisterDefIsDead(mi, interval.reg))
343        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
344
345      DOUT << " RESULT: ";
346      interval.print(DOUT, tri_);
347
348    } else {
349      // Otherwise, this must be because of phi elimination.  If this is the
350      // first redefinition of the vreg that we have seen, go back and change
351      // the live range in the PHI block to be a different value number.
352      if (interval.containsOneValue()) {
353        assert(vi.Kills.size() == 1 &&
354               "PHI elimination vreg should have one kill, the PHI itself!");
355
356        // Remove the old range that we now know has an incorrect number.
357        VNInfo *VNI = interval.getValNumInfo(0);
358        MachineInstr *Killer = vi.Kills[0];
359        unsigned Start = getMBBStartIdx(Killer->getParent());
360        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
361        DOUT << " Removing [" << Start << "," << End << "] from: ";
362        interval.print(DOUT, tri_); DOUT << "\n";
363        interval.removeRange(Start, End);
364        VNI->hasPHIKill = true;
365        DOUT << " RESULT: "; interval.print(DOUT, tri_);
366
367        // Replace the interval with one of a NEW value number.  Note that this
368        // value number isn't actually defined by an instruction, weird huh? :)
369        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
370        DOUT << " replace range with " << LR;
371        interval.addRange(LR);
372        interval.addKill(LR.valno, End);
373        DOUT << " RESULT: "; interval.print(DOUT, tri_);
374      }
375
376      // In the case of PHI elimination, each variable definition is only
377      // live until the end of the block.  We've already taken care of the
378      // rest of the live range.
379      unsigned defIndex = getDefIndex(MIIdx);
380
381      VNInfo *ValNo;
382      MachineInstr *CopyMI = NULL;
383      unsigned SrcReg, DstReg;
384      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
385          tii_->isMoveInstr(*mi, SrcReg, DstReg))
386        CopyMI = mi;
387      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
388
389      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
390      LiveRange LR(defIndex, killIndex, ValNo);
391      interval.addRange(LR);
392      interval.addKill(ValNo, killIndex);
393      ValNo->hasPHIKill = true;
394      DOUT << " +" << LR;
395    }
396  }
397
398  DOUT << '\n';
399}
400
401void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
402                                              MachineBasicBlock::iterator mi,
403                                              unsigned MIIdx,
404                                              LiveInterval &interval,
405                                              MachineInstr *CopyMI) {
406  // A physical register cannot be live across basic block, so its
407  // lifetime must end somewhere in its defining basic block.
408  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
409
410  unsigned baseIndex = MIIdx;
411  unsigned start = getDefIndex(baseIndex);
412  unsigned end = start;
413
414  // If it is not used after definition, it is considered dead at
415  // the instruction defining it. Hence its interval is:
416  // [defSlot(def), defSlot(def)+1)
417  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
418    DOUT << " dead";
419    end = getDefIndex(start) + 1;
420    goto exit;
421  }
422
423  // If it is not dead on definition, it must be killed by a
424  // subsequent instruction. Hence its interval is:
425  // [defSlot(def), useSlot(kill)+1)
426  while (++mi != MBB->end()) {
427    baseIndex += InstrSlots::NUM;
428    if (lv_->KillsRegister(mi, interval.reg)) {
429      DOUT << " killed";
430      end = getUseIndex(baseIndex) + 1;
431      goto exit;
432    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
433      // Another instruction redefines the register before it is ever read.
434      // Then the register is essentially dead at the instruction that defines
435      // it. Hence its interval is:
436      // [defSlot(def), defSlot(def)+1)
437      DOUT << " dead";
438      end = getDefIndex(start) + 1;
439      goto exit;
440    }
441  }
442
443  // The only case we should have a dead physreg here without a killing or
444  // instruction where we know it's dead is if it is live-in to the function
445  // and never used.
446  assert(!CopyMI && "physreg was not killed in defining block!");
447  end = getDefIndex(start) + 1;  // It's dead.
448
449exit:
450  assert(start < end && "did not find end of interval?");
451
452  // Already exists? Extend old live interval.
453  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
454  VNInfo *ValNo = (OldLR != interval.end())
455    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
456  LiveRange LR(start, end, ValNo);
457  interval.addRange(LR);
458  interval.addKill(LR.valno, end);
459  DOUT << " +" << LR << '\n';
460}
461
462void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
463                                      MachineBasicBlock::iterator MI,
464                                      unsigned MIIdx,
465                                      unsigned reg) {
466  if (TargetRegisterInfo::isVirtualRegister(reg))
467    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
468  else if (allocatableRegs_[reg]) {
469    MachineInstr *CopyMI = NULL;
470    unsigned SrcReg, DstReg;
471    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
472        tii_->isMoveInstr(*MI, SrcReg, DstReg))
473      CopyMI = MI;
474    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
475    // Def of a register also defines its sub-registers.
476    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
477      // Avoid processing some defs more than once.
478      if (!MI->findRegisterDefOperand(*AS))
479        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
480  }
481}
482
483void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
484                                         unsigned MIIdx,
485                                         LiveInterval &interval, bool isAlias) {
486  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
487
488  // Look for kills, if it reaches a def before it's killed, then it shouldn't
489  // be considered a livein.
490  MachineBasicBlock::iterator mi = MBB->begin();
491  unsigned baseIndex = MIIdx;
492  unsigned start = baseIndex;
493  unsigned end = start;
494  while (mi != MBB->end()) {
495    if (lv_->KillsRegister(mi, interval.reg)) {
496      DOUT << " killed";
497      end = getUseIndex(baseIndex) + 1;
498      goto exit;
499    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
500      // Another instruction redefines the register before it is ever read.
501      // Then the register is essentially dead at the instruction that defines
502      // it. Hence its interval is:
503      // [defSlot(def), defSlot(def)+1)
504      DOUT << " dead";
505      end = getDefIndex(start) + 1;
506      goto exit;
507    }
508
509    baseIndex += InstrSlots::NUM;
510    ++mi;
511  }
512
513exit:
514  // Live-in register might not be used at all.
515  if (end == MIIdx) {
516    if (isAlias) {
517      DOUT << " dead";
518      end = getDefIndex(MIIdx) + 1;
519    } else {
520      DOUT << " live through";
521      end = baseIndex;
522    }
523  }
524
525  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
526  interval.addRange(LR);
527  interval.addKill(LR.valno, end);
528  DOUT << " +" << LR << '\n';
529}
530
531/// computeIntervals - computes the live intervals for virtual
532/// registers. for some ordering of the machine instructions [1,N] a
533/// live interval is an interval [i, j) where 1 <= i <= j < N for
534/// which a variable is live
535void LiveIntervals::computeIntervals() {
536  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
537       << "********** Function: "
538       << ((Value*)mf_->getFunction())->getName() << '\n';
539  // Track the index of the current machine instr.
540  unsigned MIIndex = 0;
541  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
542       MBBI != E; ++MBBI) {
543    MachineBasicBlock *MBB = MBBI;
544    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
545
546    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
547
548    // Create intervals for live-ins to this BB first.
549    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
550           LE = MBB->livein_end(); LI != LE; ++LI) {
551      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
552      // Multiple live-ins can alias the same register.
553      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
554        if (!hasInterval(*AS))
555          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
556                               true);
557    }
558
559    for (; MI != miEnd; ++MI) {
560      DOUT << MIIndex << "\t" << *MI;
561
562      // Handle defs.
563      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
564        MachineOperand &MO = MI->getOperand(i);
565        // handle register defs - build intervals
566        if (MO.isRegister() && MO.getReg() && MO.isDef())
567          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
568      }
569
570      MIIndex += InstrSlots::NUM;
571    }
572  }
573}
574
575bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
576                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
577  std::vector<IdxMBBPair>::const_iterator I =
578    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
579
580  bool ResVal = false;
581  while (I != Idx2MBBMap.end()) {
582    if (LR.end <= I->first)
583      break;
584    MBBs.push_back(I->second);
585    ResVal = true;
586    ++I;
587  }
588  return ResVal;
589}
590
591
592LiveInterval LiveIntervals::createInterval(unsigned reg) {
593  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
594                       HUGE_VALF : 0.0F;
595  return LiveInterval(reg, Weight);
596}
597
598/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
599/// copy field and returns the source register that defines it.
600unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
601  if (!VNI->copy)
602    return 0;
603
604  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
605    return VNI->copy->getOperand(1).getReg();
606  unsigned SrcReg, DstReg;
607  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
608    return SrcReg;
609  assert(0 && "Unrecognized copy instruction!");
610  return 0;
611}
612
613//===----------------------------------------------------------------------===//
614// Register allocator hooks.
615//
616
617/// isReMaterializable - Returns true if the definition MI of the specified
618/// val# of the specified interval is re-materializable.
619bool LiveIntervals::isReMaterializable(const LiveInterval &li,
620                                       const VNInfo *ValNo, MachineInstr *MI,
621                                       bool &isLoad) {
622  if (DisableReMat)
623    return false;
624
625  isLoad = false;
626  const TargetInstrDesc &TID = MI->getDesc();
627  if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) {
628    isLoad = TID.isSimpleLoad();
629    return true;
630  }
631
632  int FrameIdx = 0;
633  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
634      !mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
635    return false;
636
637  // This is a load from fixed stack slot. It can be rematerialized unless it's
638  // re-defined by a two-address instruction.
639  isLoad = true;
640  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
641       i != e; ++i) {
642    const VNInfo *VNI = *i;
643    if (VNI == ValNo)
644      continue;
645    unsigned DefIdx = VNI->def;
646    if (DefIdx == ~1U)
647      continue; // Dead val#.
648    MachineInstr *DefMI = (DefIdx == ~0u)
649      ? NULL : getInstructionFromIndex(DefIdx);
650    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
651      isLoad = false;
652      return false;
653    }
654  }
655  return true;
656}
657
658/// isReMaterializable - Returns true if every definition of MI of every
659/// val# of the specified interval is re-materializable.
660bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
661  isLoad = false;
662  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
663       i != e; ++i) {
664    const VNInfo *VNI = *i;
665    unsigned DefIdx = VNI->def;
666    if (DefIdx == ~1U)
667      continue; // Dead val#.
668    // Is the def for the val# rematerializable?
669    if (DefIdx == ~0u)
670      return false;
671    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
672    bool DefIsLoad = false;
673    if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
674      return false;
675    isLoad |= DefIsLoad;
676  }
677  return true;
678}
679
680/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
681/// slot / to reg or any rematerialized load into ith operand of specified
682/// MI. If it is successul, MI is updated with the newly created MI and
683/// returns true.
684bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
685                                         VirtRegMap &vrm, MachineInstr *DefMI,
686                                         unsigned InstrIdx,
687                                         SmallVector<unsigned, 2> &Ops,
688                                         bool isSS, int Slot, unsigned Reg) {
689  unsigned MRInfo = 0;
690  const TargetInstrDesc &TID = MI->getDesc();
691  // If it is an implicit def instruction, just delete it.
692  if (TID.isImplicitDef()) {
693    RemoveMachineInstrFromMaps(MI);
694    vrm.RemoveMachineInstrFromMaps(MI);
695    MI->eraseFromParent();
696    ++numFolds;
697    return true;
698  }
699
700  SmallVector<unsigned, 2> FoldOps;
701  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
702    unsigned OpIdx = Ops[i];
703    // FIXME: fold subreg use.
704    if (MI->getOperand(OpIdx).getSubReg())
705      return false;
706    if (MI->getOperand(OpIdx).isDef())
707      MRInfo |= (unsigned)VirtRegMap::isMod;
708    else {
709      // Filter out two-address use operand(s).
710      if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
711        MRInfo = VirtRegMap::isModRef;
712        continue;
713      }
714      MRInfo |= (unsigned)VirtRegMap::isRef;
715    }
716    FoldOps.push_back(OpIdx);
717  }
718
719  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
720                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
721  if (fmi) {
722    // Attempt to fold the memory reference into the instruction. If
723    // we can do this, we don't need to insert spill code.
724    if (lv_)
725      lv_->instructionChanged(MI, fmi);
726    else
727      fmi->copyKillDeadInfo(MI, tri_);
728    MachineBasicBlock &MBB = *MI->getParent();
729    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
730      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
731    vrm.transferSpillPts(MI, fmi);
732    vrm.transferRestorePts(MI, fmi);
733    mi2iMap_.erase(MI);
734    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
735    mi2iMap_[fmi] = InstrIdx;
736    MI = MBB.insert(MBB.erase(MI), fmi);
737    ++numFolds;
738    return true;
739  }
740  return false;
741}
742
743/// canFoldMemoryOperand - Returns true if the specified load / store
744/// folding is possible.
745bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
746                                         SmallVector<unsigned, 2> &Ops) const {
747  SmallVector<unsigned, 2> FoldOps;
748  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
749    unsigned OpIdx = Ops[i];
750    // FIXME: fold subreg use.
751    if (MI->getOperand(OpIdx).getSubReg())
752      return false;
753    FoldOps.push_back(OpIdx);
754  }
755
756  return tii_->canFoldMemoryOperand(MI, FoldOps);
757}
758
759bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
760  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
761  for (LiveInterval::Ranges::const_iterator
762         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
763    std::vector<IdxMBBPair>::const_iterator II =
764      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
765    if (II == Idx2MBBMap.end())
766      continue;
767    if (I->end > II->first)  // crossing a MBB.
768      return false;
769    MBBs.insert(II->second);
770    if (MBBs.size() > 1)
771      return false;
772  }
773  return true;
774}
775
776/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
777/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
778bool LiveIntervals::
779rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
780                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
781                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
782                 unsigned Slot, int LdSlot,
783                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
784                 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
785                 const TargetRegisterClass* rc,
786                 SmallVector<int, 4> &ReMatIds,
787                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
788                 const MachineLoopInfo *loopInfo,
789                 std::map<unsigned,unsigned> &MBBVRegsMap,
790                 std::vector<LiveInterval*> &NewLIs) {
791  bool CanFold = false;
792 RestartInstruction:
793  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
794    MachineOperand& mop = MI->getOperand(i);
795    if (!mop.isRegister())
796      continue;
797    unsigned Reg = mop.getReg();
798    unsigned RegI = Reg;
799    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
800      continue;
801    if (Reg != li.reg)
802      continue;
803
804    bool TryFold = !DefIsReMat;
805    bool FoldSS = true; // Default behavior unless it's a remat.
806    int FoldSlot = Slot;
807    if (DefIsReMat) {
808      // If this is the rematerializable definition MI itself and
809      // all of its uses are rematerialized, simply delete it.
810      if (MI == ReMatOrigDefMI && CanDelete) {
811        DOUT << "\t\t\t\tErasing re-materlizable def: ";
812        DOUT << MI << '\n';
813        RemoveMachineInstrFromMaps(MI);
814        vrm.RemoveMachineInstrFromMaps(MI);
815        MI->eraseFromParent();
816        break;
817      }
818
819      // If def for this use can't be rematerialized, then try folding.
820      // If def is rematerializable and it's a load, also try folding.
821      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
822      if (isLoad) {
823        // Try fold loads (from stack slot, constant pool, etc.) into uses.
824        FoldSS = isLoadSS;
825        FoldSlot = LdSlot;
826      }
827    }
828
829    // Scan all of the operands of this instruction rewriting operands
830    // to use NewVReg instead of li.reg as appropriate.  We do this for
831    // two reasons:
832    //
833    //   1. If the instr reads the same spilled vreg multiple times, we
834    //      want to reuse the NewVReg.
835    //   2. If the instr is a two-addr instruction, we are required to
836    //      keep the src/dst regs pinned.
837    //
838    // Keep track of whether we replace a use and/or def so that we can
839    // create the spill interval with the appropriate range.
840
841    HasUse = mop.isUse();
842    HasDef = mop.isDef();
843    SmallVector<unsigned, 2> Ops;
844    Ops.push_back(i);
845    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
846      const MachineOperand &MOj = MI->getOperand(j);
847      if (!MOj.isRegister())
848        continue;
849      unsigned RegJ = MOj.getReg();
850      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
851        continue;
852      if (RegJ == RegI) {
853        Ops.push_back(j);
854        HasUse |= MOj.isUse();
855        HasDef |= MOj.isDef();
856      }
857    }
858
859    if (TryFold) {
860      // Do not fold load / store here if we are splitting. We'll find an
861      // optimal point to insert a load / store later.
862      if (!TrySplit) {
863        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
864                                 Ops, FoldSS, FoldSlot, Reg)) {
865          // Folding the load/store can completely change the instruction in
866          // unpredictable ways, rescan it from the beginning.
867          HasUse = false;
868          HasDef = false;
869          CanFold = false;
870          goto RestartInstruction;
871        }
872      } else {
873        CanFold = canFoldMemoryOperand(MI, Ops);
874      }
875    } else
876      CanFold = false;
877
878    // Create a new virtual register for the spill interval.
879    bool CreatedNewVReg = false;
880    if (NewVReg == 0) {
881      NewVReg = RegInfo.createVirtualRegister(rc);
882      vrm.grow();
883      CreatedNewVReg = true;
884    }
885    mop.setReg(NewVReg);
886
887    // Reuse NewVReg for other reads.
888    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
889      MI->getOperand(Ops[j]).setReg(NewVReg);
890
891    if (CreatedNewVReg) {
892      if (DefIsReMat) {
893        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
894        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
895          // Each valnum may have its own remat id.
896          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
897        } else {
898          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
899        }
900        if (!CanDelete || (HasUse && HasDef)) {
901          // If this is a two-addr instruction then its use operands are
902          // rematerializable but its def is not. It should be assigned a
903          // stack slot.
904          vrm.assignVirt2StackSlot(NewVReg, Slot);
905        }
906      } else {
907        vrm.assignVirt2StackSlot(NewVReg, Slot);
908      }
909    } else if (HasUse && HasDef &&
910               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
911      // If this interval hasn't been assigned a stack slot (because earlier
912      // def is a deleted remat def), do it now.
913      assert(Slot != VirtRegMap::NO_STACK_SLOT);
914      vrm.assignVirt2StackSlot(NewVReg, Slot);
915    }
916
917    // create a new register interval for this spill / remat.
918    LiveInterval &nI = getOrCreateInterval(NewVReg);
919    if (CreatedNewVReg) {
920      NewLIs.push_back(&nI);
921      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
922      if (TrySplit)
923        vrm.setIsSplitFromReg(NewVReg, li.reg);
924    }
925
926    if (HasUse) {
927      if (CreatedNewVReg) {
928        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
929                     nI.getNextValue(~0U, 0, VNInfoAllocator));
930        DOUT << " +" << LR;
931        nI.addRange(LR);
932      } else {
933        // Extend the split live interval to this def / use.
934        unsigned End = getUseIndex(index)+1;
935        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
936                     nI.getValNumInfo(nI.getNumValNums()-1));
937        DOUT << " +" << LR;
938        nI.addRange(LR);
939      }
940    }
941    if (HasDef) {
942      LiveRange LR(getDefIndex(index), getStoreIndex(index),
943                   nI.getNextValue(~0U, 0, VNInfoAllocator));
944      DOUT << " +" << LR;
945      nI.addRange(LR);
946    }
947
948    DOUT << "\t\t\t\tAdded new interval: ";
949    nI.print(DOUT, tri_);
950    DOUT << '\n';
951  }
952  return CanFold;
953}
954bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
955                                   const VNInfo *VNI,
956                                   MachineBasicBlock *MBB, unsigned Idx) const {
957  unsigned End = getMBBEndIdx(MBB);
958  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
959    unsigned KillIdx = VNI->kills[j];
960    if (KillIdx > Idx && KillIdx < End)
961      return true;
962  }
963  return false;
964}
965
966static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
967  const VNInfo *VNI = NULL;
968  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
969         e = li.vni_end(); i != e; ++i)
970    if ((*i)->def == DefIdx) {
971      VNI = *i;
972      break;
973    }
974  return VNI;
975}
976
977void LiveIntervals::
978rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
979                    LiveInterval::Ranges::const_iterator &I,
980                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
981                    unsigned Slot, int LdSlot,
982                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
983                    VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
984                    const TargetRegisterClass* rc,
985                    SmallVector<int, 4> &ReMatIds,
986                    const MachineLoopInfo *loopInfo,
987                    BitVector &SpillMBBs,
988                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
989                    BitVector &RestoreMBBs,
990                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
991                    std::map<unsigned,unsigned> &MBBVRegsMap,
992                    std::vector<LiveInterval*> &NewLIs) {
993  bool AllCanFold = true;
994  unsigned NewVReg = 0;
995  unsigned index = getBaseIndex(I->start);
996  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
997  for (; index != end; index += InstrSlots::NUM) {
998    // skip deleted instructions
999    while (index != end && !getInstructionFromIndex(index))
1000      index += InstrSlots::NUM;
1001    if (index == end) break;
1002
1003    MachineInstr *MI = getInstructionFromIndex(index);
1004    MachineBasicBlock *MBB = MI->getParent();
1005    unsigned ThisVReg = 0;
1006    if (TrySplit) {
1007      std::map<unsigned,unsigned>::const_iterator NVI =
1008        MBBVRegsMap.find(MBB->getNumber());
1009      if (NVI != MBBVRegsMap.end()) {
1010        ThisVReg = NVI->second;
1011        // One common case:
1012        // x = use
1013        // ...
1014        // ...
1015        // def = ...
1016        //     = use
1017        // It's better to start a new interval to avoid artifically
1018        // extend the new interval.
1019        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
1020        bool MIHasUse = false;
1021        bool MIHasDef = false;
1022        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1023          MachineOperand& mop = MI->getOperand(i);
1024          if (!mop.isRegister() || mop.getReg() != li.reg)
1025            continue;
1026          if (mop.isUse())
1027            MIHasUse = true;
1028          else
1029            MIHasDef = true;
1030        }
1031        if (MIHasDef && !MIHasUse) {
1032          MBBVRegsMap.erase(MBB->getNumber());
1033          ThisVReg = 0;
1034        }
1035      }
1036    }
1037
1038    bool IsNew = ThisVReg == 0;
1039    if (IsNew) {
1040      // This ends the previous live interval. If all of its def / use
1041      // can be folded, give it a low spill weight.
1042      if (NewVReg && TrySplit && AllCanFold) {
1043        LiveInterval &nI = getOrCreateInterval(NewVReg);
1044        nI.weight /= 10.0F;
1045      }
1046      AllCanFold = true;
1047    }
1048    NewVReg = ThisVReg;
1049
1050    bool HasDef = false;
1051    bool HasUse = false;
1052    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1053                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1054                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1055                                CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1056                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1057    if (!HasDef && !HasUse)
1058      continue;
1059
1060    AllCanFold &= CanFold;
1061
1062    // Update weight of spill interval.
1063    LiveInterval &nI = getOrCreateInterval(NewVReg);
1064    if (!TrySplit) {
1065      // The spill weight is now infinity as it cannot be spilled again.
1066      nI.weight = HUGE_VALF;
1067      continue;
1068    }
1069
1070    // Keep track of the last def and first use in each MBB.
1071    unsigned MBBId = MBB->getNumber();
1072    if (HasDef) {
1073      if (MI != ReMatOrigDefMI || !CanDelete) {
1074        bool HasKill = false;
1075        if (!HasUse)
1076          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1077        else {
1078          // If this is a two-address code, then this index starts a new VNInfo.
1079          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1080          if (VNI)
1081            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1082        }
1083        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1084          SpillIdxes.find(MBBId);
1085        if (!HasKill) {
1086          if (SII == SpillIdxes.end()) {
1087            std::vector<SRInfo> S;
1088            S.push_back(SRInfo(index, NewVReg, true));
1089            SpillIdxes.insert(std::make_pair(MBBId, S));
1090          } else if (SII->second.back().vreg != NewVReg) {
1091            SII->second.push_back(SRInfo(index, NewVReg, true));
1092          } else if ((int)index > SII->second.back().index) {
1093            // If there is an earlier def and this is a two-address
1094            // instruction, then it's not possible to fold the store (which
1095            // would also fold the load).
1096            SRInfo &Info = SII->second.back();
1097            Info.index = index;
1098            Info.canFold = !HasUse;
1099          }
1100          SpillMBBs.set(MBBId);
1101        } else if (SII != SpillIdxes.end() &&
1102                   SII->second.back().vreg == NewVReg &&
1103                   (int)index > SII->second.back().index) {
1104          // There is an earlier def that's not killed (must be two-address).
1105          // The spill is no longer needed.
1106          SII->second.pop_back();
1107          if (SII->second.empty()) {
1108            SpillIdxes.erase(MBBId);
1109            SpillMBBs.reset(MBBId);
1110          }
1111        }
1112      }
1113    }
1114
1115    if (HasUse) {
1116      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1117        SpillIdxes.find(MBBId);
1118      if (SII != SpillIdxes.end() &&
1119          SII->second.back().vreg == NewVReg &&
1120          (int)index > SII->second.back().index)
1121        // Use(s) following the last def, it's not safe to fold the spill.
1122        SII->second.back().canFold = false;
1123      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1124        RestoreIdxes.find(MBBId);
1125      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1126        // If we are splitting live intervals, only fold if it's the first
1127        // use and there isn't another use later in the MBB.
1128        RII->second.back().canFold = false;
1129      else if (IsNew) {
1130        // Only need a reload if there isn't an earlier def / use.
1131        if (RII == RestoreIdxes.end()) {
1132          std::vector<SRInfo> Infos;
1133          Infos.push_back(SRInfo(index, NewVReg, true));
1134          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1135        } else {
1136          RII->second.push_back(SRInfo(index, NewVReg, true));
1137        }
1138        RestoreMBBs.set(MBBId);
1139      }
1140    }
1141
1142    // Update spill weight.
1143    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1144    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1145  }
1146
1147  if (NewVReg && TrySplit && AllCanFold) {
1148    // If all of its def / use can be folded, give it a low spill weight.
1149    LiveInterval &nI = getOrCreateInterval(NewVReg);
1150    nI.weight /= 10.0F;
1151  }
1152}
1153
1154bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1155                        BitVector &RestoreMBBs,
1156                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1157  if (!RestoreMBBs[Id])
1158    return false;
1159  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1160  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1161    if (Restores[i].index == index &&
1162        Restores[i].vreg == vr &&
1163        Restores[i].canFold)
1164      return true;
1165  return false;
1166}
1167
1168void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1169                        BitVector &RestoreMBBs,
1170                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1171  if (!RestoreMBBs[Id])
1172    return;
1173  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1174  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1175    if (Restores[i].index == index && Restores[i].vreg)
1176      Restores[i].index = -1;
1177}
1178
1179
1180std::vector<LiveInterval*> LiveIntervals::
1181addIntervalsForSpills(const LiveInterval &li,
1182                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1183  // Since this is called after the analysis is done we don't know if
1184  // LiveVariables is available
1185  lv_ = getAnalysisToUpdate<LiveVariables>();
1186
1187  assert(li.weight != HUGE_VALF &&
1188         "attempt to spill already spilled interval!");
1189
1190  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1191  li.print(DOUT, tri_);
1192  DOUT << '\n';
1193
1194  // Each bit specify whether it a spill is required in the MBB.
1195  BitVector SpillMBBs(mf_->getNumBlockIDs());
1196  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1197  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1198  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1199  std::map<unsigned,unsigned> MBBVRegsMap;
1200  std::vector<LiveInterval*> NewLIs;
1201  MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1202  const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1203
1204  unsigned NumValNums = li.getNumValNums();
1205  SmallVector<MachineInstr*, 4> ReMatDefs;
1206  ReMatDefs.resize(NumValNums, NULL);
1207  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1208  ReMatOrigDefs.resize(NumValNums, NULL);
1209  SmallVector<int, 4> ReMatIds;
1210  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1211  BitVector ReMatDelete(NumValNums);
1212  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1213
1214  // Spilling a split live interval. It cannot be split any further. Also,
1215  // it's also guaranteed to be a single val# / range interval.
1216  if (vrm.getPreSplitReg(li.reg)) {
1217    vrm.setIsSplitFromReg(li.reg, 0);
1218    // Unset the split kill marker on the last use.
1219    unsigned KillIdx = vrm.getKillPoint(li.reg);
1220    if (KillIdx) {
1221      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1222      assert(KillMI && "Last use disappeared?");
1223      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1224      assert(KillOp != -1 && "Last use disappeared?");
1225      KillMI->getOperand(KillOp).setIsKill(false);
1226    }
1227    vrm.removeKillPoint(li.reg);
1228    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1229    Slot = vrm.getStackSlot(li.reg);
1230    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1231    MachineInstr *ReMatDefMI = DefIsReMat ?
1232      vrm.getReMaterializedMI(li.reg) : NULL;
1233    int LdSlot = 0;
1234    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1235    bool isLoad = isLoadSS ||
1236      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1237    bool IsFirstRange = true;
1238    for (LiveInterval::Ranges::const_iterator
1239           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1240      // If this is a split live interval with multiple ranges, it means there
1241      // are two-address instructions that re-defined the value. Only the
1242      // first def can be rematerialized!
1243      if (IsFirstRange) {
1244        // Note ReMatOrigDefMI has already been deleted.
1245        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1246                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1247                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1248                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1249                             MBBVRegsMap, NewLIs);
1250      } else {
1251        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1252                             Slot, 0, false, false, false,
1253                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1254                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1255                             MBBVRegsMap, NewLIs);
1256      }
1257      IsFirstRange = false;
1258    }
1259    return NewLIs;
1260  }
1261
1262  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1263  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1264    TrySplit = false;
1265  if (TrySplit)
1266    ++numSplits;
1267  bool NeedStackSlot = false;
1268  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1269       i != e; ++i) {
1270    const VNInfo *VNI = *i;
1271    unsigned VN = VNI->id;
1272    unsigned DefIdx = VNI->def;
1273    if (DefIdx == ~1U)
1274      continue; // Dead val#.
1275    // Is the def for the val# rematerializable?
1276    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1277      ? 0 : getInstructionFromIndex(DefIdx);
1278    bool dummy;
1279    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1280      // Remember how to remat the def of this val#.
1281      ReMatOrigDefs[VN] = ReMatDefMI;
1282      // Original def may be modified so we have to make a copy here. vrm must
1283      // delete these!
1284      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1285
1286      bool CanDelete = true;
1287      if (VNI->hasPHIKill) {
1288        // A kill is a phi node, not all of its uses can be rematerialized.
1289        // It must not be deleted.
1290        CanDelete = false;
1291        // Need a stack slot if there is any live range where uses cannot be
1292        // rematerialized.
1293        NeedStackSlot = true;
1294      }
1295      if (CanDelete)
1296        ReMatDelete.set(VN);
1297    } else {
1298      // Need a stack slot if there is any live range where uses cannot be
1299      // rematerialized.
1300      NeedStackSlot = true;
1301    }
1302  }
1303
1304  // One stack slot per live interval.
1305  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1306    Slot = vrm.assignVirt2StackSlot(li.reg);
1307
1308  // Create new intervals and rewrite defs and uses.
1309  for (LiveInterval::Ranges::const_iterator
1310         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1311    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1312    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1313    bool DefIsReMat = ReMatDefMI != NULL;
1314    bool CanDelete = ReMatDelete[I->valno->id];
1315    int LdSlot = 0;
1316    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1317    bool isLoad = isLoadSS ||
1318      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1319    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1320                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1321                               CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1322                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1323                               MBBVRegsMap, NewLIs);
1324  }
1325
1326  // Insert spills / restores if we are splitting.
1327  if (!TrySplit)
1328    return NewLIs;
1329
1330  SmallPtrSet<LiveInterval*, 4> AddedKill;
1331  SmallVector<unsigned, 2> Ops;
1332  if (NeedStackSlot) {
1333    int Id = SpillMBBs.find_first();
1334    while (Id != -1) {
1335      std::vector<SRInfo> &spills = SpillIdxes[Id];
1336      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1337        int index = spills[i].index;
1338        unsigned VReg = spills[i].vreg;
1339        LiveInterval &nI = getOrCreateInterval(VReg);
1340        bool isReMat = vrm.isReMaterialized(VReg);
1341        MachineInstr *MI = getInstructionFromIndex(index);
1342        bool CanFold = false;
1343        bool FoundUse = false;
1344        Ops.clear();
1345        if (spills[i].canFold) {
1346          CanFold = true;
1347          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1348            MachineOperand &MO = MI->getOperand(j);
1349            if (!MO.isRegister() || MO.getReg() != VReg)
1350              continue;
1351
1352            Ops.push_back(j);
1353            if (MO.isDef())
1354              continue;
1355            if (isReMat ||
1356                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1357                                                RestoreMBBs, RestoreIdxes))) {
1358              // MI has two-address uses of the same register. If the use
1359              // isn't the first and only use in the BB, then we can't fold
1360              // it. FIXME: Move this to rewriteInstructionsForSpills.
1361              CanFold = false;
1362              break;
1363            }
1364            FoundUse = true;
1365          }
1366        }
1367        // Fold the store into the def if possible.
1368        bool Folded = false;
1369        if (CanFold && !Ops.empty()) {
1370          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1371            Folded = true;
1372            if (FoundUse > 0) {
1373              // Also folded uses, do not issue a load.
1374              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1375              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1376            }
1377            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1378          }
1379        }
1380
1381        // Else tell the spiller to issue a spill.
1382        if (!Folded) {
1383          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1384          bool isKill = LR->end == getStoreIndex(index);
1385          vrm.addSpillPoint(VReg, isKill, MI);
1386          if (isKill)
1387            AddedKill.insert(&nI);
1388        }
1389      }
1390      Id = SpillMBBs.find_next(Id);
1391    }
1392  }
1393
1394  int Id = RestoreMBBs.find_first();
1395  while (Id != -1) {
1396    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1397    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1398      int index = restores[i].index;
1399      if (index == -1)
1400        continue;
1401      unsigned VReg = restores[i].vreg;
1402      LiveInterval &nI = getOrCreateInterval(VReg);
1403      MachineInstr *MI = getInstructionFromIndex(index);
1404      bool CanFold = false;
1405      Ops.clear();
1406      if (restores[i].canFold) {
1407        CanFold = true;
1408        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1409          MachineOperand &MO = MI->getOperand(j);
1410          if (!MO.isRegister() || MO.getReg() != VReg)
1411            continue;
1412
1413          if (MO.isDef()) {
1414            // If this restore were to be folded, it would have been folded
1415            // already.
1416            CanFold = false;
1417            break;
1418          }
1419          Ops.push_back(j);
1420        }
1421      }
1422
1423      // Fold the load into the use if possible.
1424      bool Folded = false;
1425      if (CanFold && !Ops.empty()) {
1426        if (!vrm.isReMaterialized(VReg))
1427          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1428        else {
1429          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1430          int LdSlot = 0;
1431          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1432          // If the rematerializable def is a load, also try to fold it.
1433          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1434            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1435                                          Ops, isLoadSS, LdSlot, VReg);
1436        }
1437      }
1438      // If folding is not possible / failed, then tell the spiller to issue a
1439      // load / rematerialization for us.
1440      if (Folded)
1441        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1442      else
1443        vrm.addRestorePoint(VReg, MI);
1444    }
1445    Id = RestoreMBBs.find_next(Id);
1446  }
1447
1448  // Finalize intervals: add kills, finalize spill weights, and filter out
1449  // dead intervals.
1450  std::vector<LiveInterval*> RetNewLIs;
1451  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1452    LiveInterval *LI = NewLIs[i];
1453    if (!LI->empty()) {
1454      LI->weight /= LI->getSize();
1455      if (!AddedKill.count(LI)) {
1456        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1457        unsigned LastUseIdx = getBaseIndex(LR->end);
1458        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1459        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1460        assert(UseIdx != -1);
1461        if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) ==
1462            -1) {
1463          LastUse->getOperand(UseIdx).setIsKill();
1464          vrm.addKillPoint(LI->reg, LastUseIdx);
1465        }
1466      }
1467      RetNewLIs.push_back(LI);
1468    }
1469  }
1470
1471  return RetNewLIs;
1472}
1473