LiveIntervalAnalysis.cpp revision 249ded3fa8884f91fded869fb6e251b8aebb0376
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  // Virtual registers may be defined multiple times (due to phi
205  // elimination and 2-addr elimination).  Much of what we do only has to be
206  // done once for the vreg.  We use an empty interval to detect the first
207  // time we see a vreg.
208  if (interval.empty()) {
209    // Get the Idx of the defining instructions.
210    unsigned defIndex = getDefIndex(MIIdx);
211    VNInfo *ValNo;
212    MachineInstr *CopyMI = NULL;
213    unsigned SrcReg, DstReg;
214    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215        tii_->isMoveInstr(*mi, SrcReg, DstReg))
216      CopyMI = mi;
217    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      unsigned killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230      else
231        killIdx = defIndex+1;
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.none() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DOUT << " +" << LR << "\n";
241        interval.addKill(ValNo, killIdx);
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex,
251                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252                    ValNo);
253    DOUT << " +" << NewLR;
254    interval.addRange(NewLR);
255
256    // Iterate over all of the blocks that the variable is completely
257    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258    // live interval.
259    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260      if (vi.AliveBlocks[i]) {
261        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262        if (!MBB->empty()) {
263          LiveRange LR(getMBBStartIdx(i),
264                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265                       ValNo);
266          interval.addRange(LR);
267          DOUT << " +" << LR;
268        }
269      }
270    }
271
272    // Finally, this virtual register is live from the start of any killing
273    // block to the 'use' slot of the killing instruction.
274    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275      MachineInstr *Kill = vi.Kills[i];
276      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277      LiveRange LR(getMBBStartIdx(Kill->getParent()),
278                   killIdx, ValNo);
279      interval.addRange(LR);
280      interval.addKill(ValNo, killIdx);
281      DOUT << " +" << LR;
282    }
283
284  } else {
285    // If this is the second time we see a virtual register definition, it
286    // must be due to phi elimination or two addr elimination.  If this is
287    // the result of two address elimination, then the vreg is one of the
288    // def-and-use register operand.
289    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290      // If this is a two-address definition, then we have already processed
291      // the live range.  The only problem is that we didn't realize there
292      // are actually two values in the live interval.  Because of this we
293      // need to take the LiveRegion that defines this register and split it
294      // into two values.
295      assert(interval.containsOneValue());
296      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297      unsigned RedefIndex = getDefIndex(MIIdx);
298
299      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300      VNInfo *OldValNo = OldLR->valno;
301
302      // Delete the initial value, which should be short and continuous,
303      // because the 2-addr copy must be in the same MBB as the redef.
304      interval.removeRange(DefIndex, RedefIndex);
305
306      // Two-address vregs should always only be redefined once.  This means
307      // that at this point, there should be exactly one value number in it.
308      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309
310      // The new value number (#1) is defined by the instruction we claimed
311      // defined value #0.
312      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
313                                            VNInfoAllocator);
314
315      // Value#0 is now defined by the 2-addr instruction.
316      OldValNo->def  = RedefIndex;
317      OldValNo->copy = 0;
318
319      // Add the new live interval which replaces the range for the input copy.
320      LiveRange LR(DefIndex, RedefIndex, ValNo);
321      DOUT << " replace range with " << LR;
322      interval.addRange(LR);
323      interval.addKill(ValNo, RedefIndex);
324
325      // If this redefinition is dead, we need to add a dummy unit live
326      // range covering the def slot.
327      if (lv_->RegisterDefIsDead(mi, interval.reg))
328        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
329
330      DOUT << " RESULT: ";
331      interval.print(DOUT, tri_);
332
333    } else {
334      // Otherwise, this must be because of phi elimination.  If this is the
335      // first redefinition of the vreg that we have seen, go back and change
336      // the live range in the PHI block to be a different value number.
337      if (interval.containsOneValue()) {
338        assert(vi.Kills.size() == 1 &&
339               "PHI elimination vreg should have one kill, the PHI itself!");
340
341        // Remove the old range that we now know has an incorrect number.
342        VNInfo *VNI = interval.getValNumInfo(0);
343        MachineInstr *Killer = vi.Kills[0];
344        unsigned Start = getMBBStartIdx(Killer->getParent());
345        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346        DOUT << " Removing [" << Start << "," << End << "] from: ";
347        interval.print(DOUT, tri_); DOUT << "\n";
348        interval.removeRange(Start, End);
349        VNI->hasPHIKill = true;
350        DOUT << " RESULT: "; interval.print(DOUT, tri_);
351
352        // Replace the interval with one of a NEW value number.  Note that this
353        // value number isn't actually defined by an instruction, weird huh? :)
354        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355        DOUT << " replace range with " << LR;
356        interval.addRange(LR);
357        interval.addKill(LR.valno, End);
358        DOUT << " RESULT: "; interval.print(DOUT, tri_);
359      }
360
361      // In the case of PHI elimination, each variable definition is only
362      // live until the end of the block.  We've already taken care of the
363      // rest of the live range.
364      unsigned defIndex = getDefIndex(MIIdx);
365
366      VNInfo *ValNo;
367      MachineInstr *CopyMI = NULL;
368      unsigned SrcReg, DstReg;
369      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370          tii_->isMoveInstr(*mi, SrcReg, DstReg))
371        CopyMI = mi;
372      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373
374      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375      LiveRange LR(defIndex, killIndex, ValNo);
376      interval.addRange(LR);
377      interval.addKill(ValNo, killIndex);
378      ValNo->hasPHIKill = true;
379      DOUT << " +" << LR;
380    }
381  }
382
383  DOUT << '\n';
384}
385
386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387                                              MachineBasicBlock::iterator mi,
388                                              unsigned MIIdx,
389                                              LiveInterval &interval,
390                                              MachineInstr *CopyMI) {
391  // A physical register cannot be live across basic block, so its
392  // lifetime must end somewhere in its defining basic block.
393  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394
395  unsigned baseIndex = MIIdx;
396  unsigned start = getDefIndex(baseIndex);
397  unsigned end = start;
398
399  // If it is not used after definition, it is considered dead at
400  // the instruction defining it. Hence its interval is:
401  // [defSlot(def), defSlot(def)+1)
402  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
403    DOUT << " dead";
404    end = getDefIndex(start) + 1;
405    goto exit;
406  }
407
408  // If it is not dead on definition, it must be killed by a
409  // subsequent instruction. Hence its interval is:
410  // [defSlot(def), useSlot(kill)+1)
411  while (++mi != MBB->end()) {
412    baseIndex += InstrSlots::NUM;
413    if (lv_->KillsRegister(mi, interval.reg)) {
414      DOUT << " killed";
415      end = getUseIndex(baseIndex) + 1;
416      goto exit;
417    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
418      // Another instruction redefines the register before it is ever read.
419      // Then the register is essentially dead at the instruction that defines
420      // it. Hence its interval is:
421      // [defSlot(def), defSlot(def)+1)
422      DOUT << " dead";
423      end = getDefIndex(start) + 1;
424      goto exit;
425    }
426  }
427
428  // The only case we should have a dead physreg here without a killing or
429  // instruction where we know it's dead is if it is live-in to the function
430  // and never used.
431  assert(!CopyMI && "physreg was not killed in defining block!");
432  end = getDefIndex(start) + 1;  // It's dead.
433
434exit:
435  assert(start < end && "did not find end of interval?");
436
437  // Already exists? Extend old live interval.
438  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439  VNInfo *ValNo = (OldLR != interval.end())
440    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441  LiveRange LR(start, end, ValNo);
442  interval.addRange(LR);
443  interval.addKill(LR.valno, end);
444  DOUT << " +" << LR << '\n';
445}
446
447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448                                      MachineBasicBlock::iterator MI,
449                                      unsigned MIIdx,
450                                      unsigned reg) {
451  if (TargetRegisterInfo::isVirtualRegister(reg))
452    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453  else if (allocatableRegs_[reg]) {
454    MachineInstr *CopyMI = NULL;
455    unsigned SrcReg, DstReg;
456    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457        tii_->isMoveInstr(*MI, SrcReg, DstReg))
458      CopyMI = MI;
459    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460    // Def of a register also defines its sub-registers.
461    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462      // Avoid processing some defs more than once.
463      if (!MI->findRegisterDefOperand(*AS))
464        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
465  }
466}
467
468void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
469                                         unsigned MIIdx,
470                                         LiveInterval &interval, bool isAlias) {
471  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
472
473  // Look for kills, if it reaches a def before it's killed, then it shouldn't
474  // be considered a livein.
475  MachineBasicBlock::iterator mi = MBB->begin();
476  unsigned baseIndex = MIIdx;
477  unsigned start = baseIndex;
478  unsigned end = start;
479  while (mi != MBB->end()) {
480    if (lv_->KillsRegister(mi, interval.reg)) {
481      DOUT << " killed";
482      end = getUseIndex(baseIndex) + 1;
483      goto exit;
484    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
485      // Another instruction redefines the register before it is ever read.
486      // Then the register is essentially dead at the instruction that defines
487      // it. Hence its interval is:
488      // [defSlot(def), defSlot(def)+1)
489      DOUT << " dead";
490      end = getDefIndex(start) + 1;
491      goto exit;
492    }
493
494    baseIndex += InstrSlots::NUM;
495    ++mi;
496  }
497
498exit:
499  // Live-in register might not be used at all.
500  if (end == MIIdx) {
501    if (isAlias) {
502      DOUT << " dead";
503      end = getDefIndex(MIIdx) + 1;
504    } else {
505      DOUT << " live through";
506      end = baseIndex;
507    }
508  }
509
510  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
511  interval.addRange(LR);
512  interval.addKill(LR.valno, end);
513  DOUT << " +" << LR << '\n';
514}
515
516/// computeIntervals - computes the live intervals for virtual
517/// registers. for some ordering of the machine instructions [1,N] a
518/// live interval is an interval [i, j) where 1 <= i <= j < N for
519/// which a variable is live
520void LiveIntervals::computeIntervals() {
521  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
522       << "********** Function: "
523       << ((Value*)mf_->getFunction())->getName() << '\n';
524  // Track the index of the current machine instr.
525  unsigned MIIndex = 0;
526  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
527       MBBI != E; ++MBBI) {
528    MachineBasicBlock *MBB = MBBI;
529    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
530
531    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
532
533    // Create intervals for live-ins to this BB first.
534    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
535           LE = MBB->livein_end(); LI != LE; ++LI) {
536      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
537      // Multiple live-ins can alias the same register.
538      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
539        if (!hasInterval(*AS))
540          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
541                               true);
542    }
543
544    for (; MI != miEnd; ++MI) {
545      DOUT << MIIndex << "\t" << *MI;
546
547      // Handle defs.
548      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
549        MachineOperand &MO = MI->getOperand(i);
550        // handle register defs - build intervals
551        if (MO.isRegister() && MO.getReg() && MO.isDef())
552          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
553      }
554
555      MIIndex += InstrSlots::NUM;
556    }
557  }
558}
559
560bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
561                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
562  std::vector<IdxMBBPair>::const_iterator I =
563    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
564
565  bool ResVal = false;
566  while (I != Idx2MBBMap.end()) {
567    if (LR.end <= I->first)
568      break;
569    MBBs.push_back(I->second);
570    ResVal = true;
571    ++I;
572  }
573  return ResVal;
574}
575
576
577LiveInterval LiveIntervals::createInterval(unsigned reg) {
578  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
579                       HUGE_VALF : 0.0F;
580  return LiveInterval(reg, Weight);
581}
582
583/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
584/// copy field and returns the source register that defines it.
585unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
586  if (!VNI->copy)
587    return 0;
588
589  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
590    return VNI->copy->getOperand(1).getReg();
591  unsigned SrcReg, DstReg;
592  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
593    return SrcReg;
594  assert(0 && "Unrecognized copy instruction!");
595  return 0;
596}
597
598//===----------------------------------------------------------------------===//
599// Register allocator hooks.
600//
601
602/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
603/// allow one) virtual register operand, then its uses are implicitly using
604/// the register. Returns the virtual register.
605unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
606                                            MachineInstr *MI) const {
607  unsigned RegOp = 0;
608  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
609    MachineOperand &MO = MI->getOperand(i);
610    if (!MO.isRegister() || !MO.isUse())
611      continue;
612    unsigned Reg = MO.getReg();
613    if (Reg == 0 || Reg == li.reg)
614      continue;
615    // FIXME: For now, only remat MI with at most one register operand.
616    assert(!RegOp &&
617           "Can't rematerialize instruction with multiple register operand!");
618    RegOp = MO.getReg();
619    break;
620  }
621  return RegOp;
622}
623
624/// isValNoAvailableAt - Return true if the val# of the specified interval
625/// which reaches the given instruction also reaches the specified use index.
626bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
627                                       unsigned UseIdx) const {
628  unsigned Index = getInstructionIndex(MI);
629  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
630  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
631  return UI != li.end() && UI->valno == ValNo;
632}
633
634/// isReMaterializable - Returns true if the definition MI of the specified
635/// val# of the specified interval is re-materializable.
636bool LiveIntervals::isReMaterializable(const LiveInterval &li,
637                                       const VNInfo *ValNo, MachineInstr *MI,
638                                       bool &isLoad) {
639  if (DisableReMat)
640    return false;
641
642  isLoad = false;
643  const TargetInstrDesc &TID = MI->getDesc();
644  if (TID.isImplicitDef())
645    return true;
646
647  int FrameIdx = 0;
648  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
649      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
650    // This is a load from fixed stack slot. It can be rematerialized.
651    return true;
652
653  if (tii_->isTriviallyReMaterializable(MI)) {
654    isLoad = TID.isSimpleLoad();
655
656    unsigned ImpUse = getReMatImplicitUse(li, MI);
657    if (ImpUse) {
658      const LiveInterval &ImpLi = getInterval(ImpUse);
659      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
660             re = mri_->use_end(); ri != re; ++ri) {
661        MachineInstr *UseMI = &*ri;
662        unsigned UseIdx = getInstructionIndex(UseMI);
663        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
664          continue;
665        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
666          return false;
667      }
668    }
669    return true;
670  }
671
672  return false;
673}
674
675/// isReMaterializable - Returns true if every definition of MI of every
676/// val# of the specified interval is re-materializable.
677bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
678  isLoad = false;
679  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
680       i != e; ++i) {
681    const VNInfo *VNI = *i;
682    unsigned DefIdx = VNI->def;
683    if (DefIdx == ~1U)
684      continue; // Dead val#.
685    // Is the def for the val# rematerializable?
686    if (DefIdx == ~0u)
687      return false;
688    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
689    bool DefIsLoad = false;
690    if (!ReMatDefMI ||
691        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
692      return false;
693    isLoad |= DefIsLoad;
694  }
695  return true;
696}
697
698/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
699/// slot / to reg or any rematerialized load into ith operand of specified
700/// MI. If it is successul, MI is updated with the newly created MI and
701/// returns true.
702bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
703                                         VirtRegMap &vrm, MachineInstr *DefMI,
704                                         unsigned InstrIdx,
705                                         SmallVector<unsigned, 2> &Ops,
706                                         bool isSS, int Slot, unsigned Reg) {
707  unsigned MRInfo = 0;
708  const TargetInstrDesc &TID = MI->getDesc();
709  // If it is an implicit def instruction, just delete it.
710  if (TID.isImplicitDef()) {
711    RemoveMachineInstrFromMaps(MI);
712    vrm.RemoveMachineInstrFromMaps(MI);
713    MI->eraseFromParent();
714    ++numFolds;
715    return true;
716  }
717
718  SmallVector<unsigned, 2> FoldOps;
719  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
720    unsigned OpIdx = Ops[i];
721    MachineOperand &MO = MI->getOperand(OpIdx);
722    // FIXME: fold subreg use.
723    if (MO.getSubReg())
724      return false;
725    if (MO.isDef())
726      MRInfo |= (unsigned)VirtRegMap::isMod;
727    else {
728      // Filter out two-address use operand(s).
729      if (!MO.isImplicit() &&
730          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
731        MRInfo = VirtRegMap::isModRef;
732        continue;
733      }
734      MRInfo |= (unsigned)VirtRegMap::isRef;
735    }
736    FoldOps.push_back(OpIdx);
737  }
738
739  // Can't fold a load from fixed stack slot into a two address instruction.
740  if (isSS && DefMI && (MRInfo & VirtRegMap::isMod))
741    return false;
742
743  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
744                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
745  if (fmi) {
746    // Attempt to fold the memory reference into the instruction. If
747    // we can do this, we don't need to insert spill code.
748    if (lv_)
749      lv_->instructionChanged(MI, fmi);
750    else
751      fmi->copyKillDeadInfo(MI, tri_);
752    MachineBasicBlock &MBB = *MI->getParent();
753    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
754      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
755    vrm.transferSpillPts(MI, fmi);
756    vrm.transferRestorePts(MI, fmi);
757    mi2iMap_.erase(MI);
758    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
759    mi2iMap_[fmi] = InstrIdx;
760    MI = MBB.insert(MBB.erase(MI), fmi);
761    ++numFolds;
762    return true;
763  }
764  return false;
765}
766
767/// canFoldMemoryOperand - Returns true if the specified load / store
768/// folding is possible.
769bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
770                                         SmallVector<unsigned, 2> &Ops) const {
771  SmallVector<unsigned, 2> FoldOps;
772  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
773    unsigned OpIdx = Ops[i];
774    // FIXME: fold subreg use.
775    if (MI->getOperand(OpIdx).getSubReg())
776      return false;
777    FoldOps.push_back(OpIdx);
778  }
779
780  return tii_->canFoldMemoryOperand(MI, FoldOps);
781}
782
783bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, unsigned Reg) const {
784  SmallVector<unsigned, 2> FoldOps;
785  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
786    MachineOperand& mop = MI->getOperand(i);
787    if (!mop.isRegister())
788      continue;
789    unsigned UseReg = mop.getReg();
790    if (UseReg != Reg)
791      continue;
792    // FIXME: fold subreg use.
793    if (mop.getSubReg())
794      return false;
795    FoldOps.push_back(i);
796  }
797  return tii_->canFoldMemoryOperand(MI, FoldOps);
798}
799
800bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
801  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
802  for (LiveInterval::Ranges::const_iterator
803         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
804    std::vector<IdxMBBPair>::const_iterator II =
805      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
806    if (II == Idx2MBBMap.end())
807      continue;
808    if (I->end > II->first)  // crossing a MBB.
809      return false;
810    MBBs.insert(II->second);
811    if (MBBs.size() > 1)
812      return false;
813  }
814  return true;
815}
816
817/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
818/// interval on to-be re-materialized operands of MI) with new register.
819void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
820                                       MachineInstr *MI, unsigned NewVReg,
821                                       VirtRegMap &vrm) {
822  // There is an implicit use. That means one of the other operand is
823  // being remat'ed and the remat'ed instruction has li.reg as an
824  // use operand. Make sure we rewrite that as well.
825  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
826    MachineOperand &MO = MI->getOperand(i);
827    if (!MO.isRegister())
828      continue;
829    unsigned Reg = MO.getReg();
830    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
831      continue;
832    if (!vrm.isReMaterialized(Reg))
833      continue;
834    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
835    int OpIdx = ReMatMI->findRegisterUseOperandIdx(li.reg);
836    if (OpIdx != -1)
837      ReMatMI->getOperand(OpIdx).setReg(NewVReg);
838  }
839}
840
841/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
842/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
843bool LiveIntervals::
844rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
845                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
846                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
847                 unsigned Slot, int LdSlot,
848                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
849                 VirtRegMap &vrm,
850                 const TargetRegisterClass* rc,
851                 SmallVector<int, 4> &ReMatIds,
852                 const MachineLoopInfo *loopInfo,
853                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
854                 std::map<unsigned,unsigned> &MBBVRegsMap,
855                 std::vector<LiveInterval*> &NewLIs) {
856  bool CanFold = false;
857 RestartInstruction:
858  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
859    MachineOperand& mop = MI->getOperand(i);
860    if (!mop.isRegister())
861      continue;
862    unsigned Reg = mop.getReg();
863    unsigned RegI = Reg;
864    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
865      continue;
866    if (Reg != li.reg)
867      continue;
868
869    bool TryFold = !DefIsReMat;
870    bool FoldSS = true; // Default behavior unless it's a remat.
871    int FoldSlot = Slot;
872    if (DefIsReMat) {
873      // If this is the rematerializable definition MI itself and
874      // all of its uses are rematerialized, simply delete it.
875      if (MI == ReMatOrigDefMI && CanDelete) {
876        DOUT << "\t\t\t\tErasing re-materlizable def: ";
877        DOUT << MI << '\n';
878        unsigned ImpUse = getReMatImplicitUse(li, MI);
879        if (ImpUse) {
880          // To be deleted MI has a virtual register operand, update the
881          // spill weight of the register interval.
882          unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
883          LiveInterval &ImpLi = getInterval(ImpUse);
884          ImpLi.weight -=
885            getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
886        }
887        RemoveMachineInstrFromMaps(MI);
888        vrm.RemoveMachineInstrFromMaps(MI);
889        MI->eraseFromParent();
890        break;
891      }
892
893      // If def for this use can't be rematerialized, then try folding.
894      // If def is rematerializable and it's a load, also try folding.
895      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
896      if (isLoad) {
897        // Try fold loads (from stack slot, constant pool, etc.) into uses.
898        FoldSS = isLoadSS;
899        FoldSlot = LdSlot;
900      }
901    }
902
903    // Scan all of the operands of this instruction rewriting operands
904    // to use NewVReg instead of li.reg as appropriate.  We do this for
905    // two reasons:
906    //
907    //   1. If the instr reads the same spilled vreg multiple times, we
908    //      want to reuse the NewVReg.
909    //   2. If the instr is a two-addr instruction, we are required to
910    //      keep the src/dst regs pinned.
911    //
912    // Keep track of whether we replace a use and/or def so that we can
913    // create the spill interval with the appropriate range.
914
915    HasUse = mop.isUse();
916    HasDef = mop.isDef();
917    SmallVector<unsigned, 2> Ops;
918    Ops.push_back(i);
919    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
920      const MachineOperand &MOj = MI->getOperand(j);
921      if (!MOj.isRegister())
922        continue;
923      unsigned RegJ = MOj.getReg();
924      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
925        continue;
926      if (RegJ == RegI) {
927        Ops.push_back(j);
928        HasUse |= MOj.isUse();
929        HasDef |= MOj.isDef();
930      }
931    }
932
933    if (TryFold) {
934      // Do not fold load / store here if we are splitting. We'll find an
935      // optimal point to insert a load / store later.
936      if (!TrySplit) {
937        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
938                                 Ops, FoldSS, FoldSlot, Reg)) {
939          // Folding the load/store can completely change the instruction in
940          // unpredictable ways, rescan it from the beginning.
941          HasUse = false;
942          HasDef = false;
943          CanFold = false;
944          goto RestartInstruction;
945        }
946      } else {
947        CanFold = canFoldMemoryOperand(MI, Ops);
948      }
949    } else
950      CanFold = false;
951
952    // Create a new virtual register for the spill interval.
953    bool CreatedNewVReg = false;
954    if (NewVReg == 0) {
955      NewVReg = mri_->createVirtualRegister(rc);
956      vrm.grow();
957      CreatedNewVReg = true;
958    }
959    mop.setReg(NewVReg);
960    if (mop.isImplicit())
961      rewriteImplicitOps(li, MI, NewVReg, vrm);
962
963    // Reuse NewVReg for other reads.
964    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
965      MachineOperand &mopj = MI->getOperand(Ops[j]);
966      mopj.setReg(NewVReg);
967      if (mopj.isImplicit())
968        rewriteImplicitOps(li, MI, NewVReg, vrm);
969    }
970
971    if (CreatedNewVReg) {
972      if (DefIsReMat) {
973        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
974        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
975          // Each valnum may have its own remat id.
976          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
977        } else {
978          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
979        }
980        if (!CanDelete || (HasUse && HasDef)) {
981          // If this is a two-addr instruction then its use operands are
982          // rematerializable but its def is not. It should be assigned a
983          // stack slot.
984          vrm.assignVirt2StackSlot(NewVReg, Slot);
985        }
986      } else {
987        vrm.assignVirt2StackSlot(NewVReg, Slot);
988      }
989    } else if (HasUse && HasDef &&
990               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
991      // If this interval hasn't been assigned a stack slot (because earlier
992      // def is a deleted remat def), do it now.
993      assert(Slot != VirtRegMap::NO_STACK_SLOT);
994      vrm.assignVirt2StackSlot(NewVReg, Slot);
995    }
996
997    // Re-matting an instruction with virtual register use. Add the
998    // register as an implicit use on the use MI.
999    if (DefIsReMat && ImpUse)
1000      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1001
1002    // create a new register interval for this spill / remat.
1003    LiveInterval &nI = getOrCreateInterval(NewVReg);
1004    if (CreatedNewVReg) {
1005      NewLIs.push_back(&nI);
1006      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1007      if (TrySplit)
1008        vrm.setIsSplitFromReg(NewVReg, li.reg);
1009    }
1010
1011    if (HasUse) {
1012      if (CreatedNewVReg) {
1013        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1014                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1015        DOUT << " +" << LR;
1016        nI.addRange(LR);
1017      } else {
1018        // Extend the split live interval to this def / use.
1019        unsigned End = getUseIndex(index)+1;
1020        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1021                     nI.getValNumInfo(nI.getNumValNums()-1));
1022        DOUT << " +" << LR;
1023        nI.addRange(LR);
1024      }
1025    }
1026    if (HasDef) {
1027      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1028                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1029      DOUT << " +" << LR;
1030      nI.addRange(LR);
1031    }
1032
1033    DOUT << "\t\t\t\tAdded new interval: ";
1034    nI.print(DOUT, tri_);
1035    DOUT << '\n';
1036  }
1037  return CanFold;
1038}
1039bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1040                                   const VNInfo *VNI,
1041                                   MachineBasicBlock *MBB, unsigned Idx) const {
1042  unsigned End = getMBBEndIdx(MBB);
1043  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1044    unsigned KillIdx = VNI->kills[j];
1045    if (KillIdx > Idx && KillIdx < End)
1046      return true;
1047  }
1048  return false;
1049}
1050
1051static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1052  const VNInfo *VNI = NULL;
1053  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1054         e = li.vni_end(); i != e; ++i)
1055    if ((*i)->def == DefIdx) {
1056      VNI = *i;
1057      break;
1058    }
1059  return VNI;
1060}
1061
1062/// RewriteInfo - Keep track of machine instrs that will be rewritten
1063/// during spilling.
1064struct RewriteInfo {
1065  unsigned Index;
1066  MachineInstr *MI;
1067  bool HasUse;
1068  bool HasDef;
1069  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1070    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1071};
1072
1073struct RewriteInfoCompare {
1074  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1075    return LHS.Index < RHS.Index;
1076  }
1077};
1078
1079void LiveIntervals::
1080rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1081                    LiveInterval::Ranges::const_iterator &I,
1082                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1083                    unsigned Slot, int LdSlot,
1084                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1085                    VirtRegMap &vrm,
1086                    const TargetRegisterClass* rc,
1087                    SmallVector<int, 4> &ReMatIds,
1088                    const MachineLoopInfo *loopInfo,
1089                    BitVector &SpillMBBs,
1090                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1091                    BitVector &RestoreMBBs,
1092                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1093                    std::map<unsigned,unsigned> &MBBVRegsMap,
1094                    std::vector<LiveInterval*> &NewLIs) {
1095  bool AllCanFold = true;
1096  unsigned NewVReg = 0;
1097  unsigned start = getBaseIndex(I->start);
1098  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1099
1100  // First collect all the def / use in this live range that will be rewritten.
1101  // Make sure they are sorted according instruction index.
1102  std::vector<RewriteInfo> RewriteMIs;
1103  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1104         re = mri_->reg_end(); ri != re; ) {
1105    MachineInstr *MI = &(*ri);
1106    MachineOperand &O = ri.getOperand();
1107    ++ri;
1108    unsigned index = getInstructionIndex(MI);
1109    if (index < start || index >= end)
1110      continue;
1111    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1112  }
1113  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1114
1115  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1116  // Now rewrite the defs and uses.
1117  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1118    RewriteInfo &rwi = RewriteMIs[i];
1119    ++i;
1120    unsigned index = rwi.Index;
1121    bool MIHasUse = rwi.HasUse;
1122    bool MIHasDef = rwi.HasDef;
1123    MachineInstr *MI = rwi.MI;
1124    // If MI def and/or use the same register multiple times, then there
1125    // are multiple entries.
1126    unsigned NumUses = MIHasUse;
1127    while (i != e && RewriteMIs[i].MI == MI) {
1128      assert(RewriteMIs[i].Index == index);
1129      bool isUse = RewriteMIs[i].HasUse;
1130      if (isUse) ++NumUses;
1131      MIHasUse |= isUse;
1132      MIHasDef |= RewriteMIs[i].HasDef;
1133      ++i;
1134    }
1135    MachineBasicBlock *MBB = MI->getParent();
1136
1137    if (ImpUse && MI != ReMatDefMI) {
1138      // Re-matting an instruction with virtual register use. Update the
1139      // register interval's spill weight.
1140      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1141      LiveInterval &ImpLi = getInterval(ImpUse);
1142      ImpLi.weight +=
1143        getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize();
1144    }
1145
1146    unsigned MBBId = MBB->getNumber();
1147    unsigned ThisVReg = 0;
1148    if (TrySplit) {
1149      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1150      if (NVI != MBBVRegsMap.end()) {
1151        ThisVReg = NVI->second;
1152        // One common case:
1153        // x = use
1154        // ...
1155        // ...
1156        // def = ...
1157        //     = use
1158        // It's better to start a new interval to avoid artifically
1159        // extend the new interval.
1160        if (MIHasDef && !MIHasUse) {
1161          MBBVRegsMap.erase(MBB->getNumber());
1162          ThisVReg = 0;
1163        }
1164      }
1165    }
1166
1167    bool IsNew = ThisVReg == 0;
1168    if (IsNew) {
1169      // This ends the previous live interval. If all of its def / use
1170      // can be folded, give it a low spill weight.
1171      if (NewVReg && TrySplit && AllCanFold) {
1172        LiveInterval &nI = getOrCreateInterval(NewVReg);
1173        nI.weight /= 10.0F;
1174      }
1175      AllCanFold = true;
1176    }
1177    NewVReg = ThisVReg;
1178
1179    bool HasDef = false;
1180    bool HasUse = false;
1181    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1182                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1183                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1184                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1185                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1186    if (!HasDef && !HasUse)
1187      continue;
1188
1189    AllCanFold &= CanFold;
1190
1191    // Update weight of spill interval.
1192    LiveInterval &nI = getOrCreateInterval(NewVReg);
1193    if (!TrySplit) {
1194      // The spill weight is now infinity as it cannot be spilled again.
1195      nI.weight = HUGE_VALF;
1196      continue;
1197    }
1198
1199    // Keep track of the last def and first use in each MBB.
1200    if (HasDef) {
1201      if (MI != ReMatOrigDefMI || !CanDelete) {
1202        bool HasKill = false;
1203        if (!HasUse)
1204          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1205        else {
1206          // If this is a two-address code, then this index starts a new VNInfo.
1207          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1208          if (VNI)
1209            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1210        }
1211        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1212          SpillIdxes.find(MBBId);
1213        if (!HasKill) {
1214          if (SII == SpillIdxes.end()) {
1215            std::vector<SRInfo> S;
1216            S.push_back(SRInfo(index, NewVReg, true));
1217            SpillIdxes.insert(std::make_pair(MBBId, S));
1218          } else if (SII->second.back().vreg != NewVReg) {
1219            SII->second.push_back(SRInfo(index, NewVReg, true));
1220          } else if ((int)index > SII->second.back().index) {
1221            // If there is an earlier def and this is a two-address
1222            // instruction, then it's not possible to fold the store (which
1223            // would also fold the load).
1224            SRInfo &Info = SII->second.back();
1225            Info.index = index;
1226            Info.canFold = !HasUse;
1227          }
1228          SpillMBBs.set(MBBId);
1229        } else if (SII != SpillIdxes.end() &&
1230                   SII->second.back().vreg == NewVReg &&
1231                   (int)index > SII->second.back().index) {
1232          // There is an earlier def that's not killed (must be two-address).
1233          // The spill is no longer needed.
1234          SII->second.pop_back();
1235          if (SII->second.empty()) {
1236            SpillIdxes.erase(MBBId);
1237            SpillMBBs.reset(MBBId);
1238          }
1239        }
1240      }
1241    }
1242
1243    if (HasUse) {
1244      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1245        SpillIdxes.find(MBBId);
1246      if (SII != SpillIdxes.end() &&
1247          SII->second.back().vreg == NewVReg &&
1248          (int)index > SII->second.back().index)
1249        // Use(s) following the last def, it's not safe to fold the spill.
1250        SII->second.back().canFold = false;
1251      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1252        RestoreIdxes.find(MBBId);
1253      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1254        // If we are splitting live intervals, only fold if it's the first
1255        // use and there isn't another use later in the MBB.
1256        RII->second.back().canFold = false;
1257      else if (IsNew) {
1258        // Only need a reload if there isn't an earlier def / use.
1259        if (RII == RestoreIdxes.end()) {
1260          std::vector<SRInfo> Infos;
1261          Infos.push_back(SRInfo(index, NewVReg, true));
1262          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1263        } else {
1264          RII->second.push_back(SRInfo(index, NewVReg, true));
1265        }
1266        RestoreMBBs.set(MBBId);
1267      }
1268    }
1269
1270    // Update spill weight.
1271    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1272    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1273  }
1274
1275  if (NewVReg && TrySplit && AllCanFold) {
1276    // If all of its def / use can be folded, give it a low spill weight.
1277    LiveInterval &nI = getOrCreateInterval(NewVReg);
1278    nI.weight /= 10.0F;
1279  }
1280}
1281
1282bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1283                        BitVector &RestoreMBBs,
1284                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1285  if (!RestoreMBBs[Id])
1286    return false;
1287  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1288  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1289    if (Restores[i].index == index &&
1290        Restores[i].vreg == vr &&
1291        Restores[i].canFold)
1292      return true;
1293  return false;
1294}
1295
1296void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1297                        BitVector &RestoreMBBs,
1298                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1299  if (!RestoreMBBs[Id])
1300    return;
1301  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1302  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1303    if (Restores[i].index == index && Restores[i].vreg)
1304      Restores[i].index = -1;
1305}
1306
1307
1308std::vector<LiveInterval*> LiveIntervals::
1309addIntervalsForSpills(const LiveInterval &li,
1310                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1311  // Since this is called after the analysis is done we don't know if
1312  // LiveVariables is available
1313  lv_ = getAnalysisToUpdate<LiveVariables>();
1314
1315  assert(li.weight != HUGE_VALF &&
1316         "attempt to spill already spilled interval!");
1317
1318  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1319  li.print(DOUT, tri_);
1320  DOUT << '\n';
1321
1322  // Each bit specify whether it a spill is required in the MBB.
1323  BitVector SpillMBBs(mf_->getNumBlockIDs());
1324  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1325  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1326  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1327  std::map<unsigned,unsigned> MBBVRegsMap;
1328  std::vector<LiveInterval*> NewLIs;
1329  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1330
1331  unsigned NumValNums = li.getNumValNums();
1332  SmallVector<MachineInstr*, 4> ReMatDefs;
1333  ReMatDefs.resize(NumValNums, NULL);
1334  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1335  ReMatOrigDefs.resize(NumValNums, NULL);
1336  SmallVector<int, 4> ReMatIds;
1337  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1338  BitVector ReMatDelete(NumValNums);
1339  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1340
1341  // Spilling a split live interval. It cannot be split any further. Also,
1342  // it's also guaranteed to be a single val# / range interval.
1343  if (vrm.getPreSplitReg(li.reg)) {
1344    vrm.setIsSplitFromReg(li.reg, 0);
1345    // Unset the split kill marker on the last use.
1346    unsigned KillIdx = vrm.getKillPoint(li.reg);
1347    if (KillIdx) {
1348      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1349      assert(KillMI && "Last use disappeared?");
1350      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1351      assert(KillOp != -1 && "Last use disappeared?");
1352      KillMI->getOperand(KillOp).setIsKill(false);
1353    }
1354    vrm.removeKillPoint(li.reg);
1355    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1356    Slot = vrm.getStackSlot(li.reg);
1357    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1358    MachineInstr *ReMatDefMI = DefIsReMat ?
1359      vrm.getReMaterializedMI(li.reg) : NULL;
1360    int LdSlot = 0;
1361    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1362    bool isLoad = isLoadSS ||
1363      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1364    bool IsFirstRange = true;
1365    for (LiveInterval::Ranges::const_iterator
1366           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1367      // If this is a split live interval with multiple ranges, it means there
1368      // are two-address instructions that re-defined the value. Only the
1369      // first def can be rematerialized!
1370      if (IsFirstRange) {
1371        // Note ReMatOrigDefMI has already been deleted.
1372        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1373                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1374                             false, vrm, rc, ReMatIds, loopInfo,
1375                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1376                             MBBVRegsMap, NewLIs);
1377      } else {
1378        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1379                             Slot, 0, false, false, false,
1380                             false, vrm, rc, ReMatIds, loopInfo,
1381                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1382                             MBBVRegsMap, NewLIs);
1383      }
1384      IsFirstRange = false;
1385    }
1386    return NewLIs;
1387  }
1388
1389  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1390  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1391    TrySplit = false;
1392  if (TrySplit)
1393    ++numSplits;
1394  bool NeedStackSlot = false;
1395  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1396       i != e; ++i) {
1397    const VNInfo *VNI = *i;
1398    unsigned VN = VNI->id;
1399    unsigned DefIdx = VNI->def;
1400    if (DefIdx == ~1U)
1401      continue; // Dead val#.
1402    // Is the def for the val# rematerializable?
1403    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1404      ? 0 : getInstructionFromIndex(DefIdx);
1405    bool dummy;
1406    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1407      // Remember how to remat the def of this val#.
1408      ReMatOrigDefs[VN] = ReMatDefMI;
1409      // Original def may be modified so we have to make a copy here. vrm must
1410      // delete these!
1411      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1412
1413      bool CanDelete = true;
1414      if (VNI->hasPHIKill) {
1415        // A kill is a phi node, not all of its uses can be rematerialized.
1416        // It must not be deleted.
1417        CanDelete = false;
1418        // Need a stack slot if there is any live range where uses cannot be
1419        // rematerialized.
1420        NeedStackSlot = true;
1421      }
1422      if (CanDelete)
1423        ReMatDelete.set(VN);
1424    } else {
1425      // Need a stack slot if there is any live range where uses cannot be
1426      // rematerialized.
1427      NeedStackSlot = true;
1428    }
1429  }
1430
1431  // One stack slot per live interval.
1432  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1433    Slot = vrm.assignVirt2StackSlot(li.reg);
1434
1435  // Create new intervals and rewrite defs and uses.
1436  for (LiveInterval::Ranges::const_iterator
1437         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1438    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1439    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1440    bool DefIsReMat = ReMatDefMI != NULL;
1441    bool CanDelete = ReMatDelete[I->valno->id];
1442    int LdSlot = 0;
1443    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1444    bool isLoad = isLoadSS ||
1445      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1446    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1447                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1448                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1449                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1450                               MBBVRegsMap, NewLIs);
1451  }
1452
1453  // Insert spills / restores if we are splitting.
1454  if (!TrySplit)
1455    return NewLIs;
1456
1457  SmallPtrSet<LiveInterval*, 4> AddedKill;
1458  SmallVector<unsigned, 2> Ops;
1459  if (NeedStackSlot) {
1460    int Id = SpillMBBs.find_first();
1461    while (Id != -1) {
1462      std::vector<SRInfo> &spills = SpillIdxes[Id];
1463      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1464        int index = spills[i].index;
1465        unsigned VReg = spills[i].vreg;
1466        LiveInterval &nI = getOrCreateInterval(VReg);
1467        bool isReMat = vrm.isReMaterialized(VReg);
1468        MachineInstr *MI = getInstructionFromIndex(index);
1469        bool CanFold = false;
1470        bool FoundUse = false;
1471        Ops.clear();
1472        if (spills[i].canFold) {
1473          CanFold = true;
1474          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1475            MachineOperand &MO = MI->getOperand(j);
1476            if (!MO.isRegister() || MO.getReg() != VReg)
1477              continue;
1478
1479            Ops.push_back(j);
1480            if (MO.isDef())
1481              continue;
1482            if (isReMat ||
1483                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1484                                                RestoreMBBs, RestoreIdxes))) {
1485              // MI has two-address uses of the same register. If the use
1486              // isn't the first and only use in the BB, then we can't fold
1487              // it. FIXME: Move this to rewriteInstructionsForSpills.
1488              CanFold = false;
1489              break;
1490            }
1491            FoundUse = true;
1492          }
1493        }
1494        // Fold the store into the def if possible.
1495        bool Folded = false;
1496        if (CanFold && !Ops.empty()) {
1497          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1498            Folded = true;
1499            if (FoundUse > 0) {
1500              // Also folded uses, do not issue a load.
1501              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1502              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1503            }
1504            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1505          }
1506        }
1507
1508        // Else tell the spiller to issue a spill.
1509        if (!Folded) {
1510          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1511          bool isKill = LR->end == getStoreIndex(index);
1512          vrm.addSpillPoint(VReg, isKill, MI);
1513          if (isKill)
1514            AddedKill.insert(&nI);
1515        }
1516      }
1517      Id = SpillMBBs.find_next(Id);
1518    }
1519  }
1520
1521  int Id = RestoreMBBs.find_first();
1522  while (Id != -1) {
1523    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1524    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1525      int index = restores[i].index;
1526      if (index == -1)
1527        continue;
1528      unsigned VReg = restores[i].vreg;
1529      LiveInterval &nI = getOrCreateInterval(VReg);
1530      MachineInstr *MI = getInstructionFromIndex(index);
1531      bool CanFold = false;
1532      Ops.clear();
1533      if (restores[i].canFold) {
1534        CanFold = true;
1535        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1536          MachineOperand &MO = MI->getOperand(j);
1537          if (!MO.isRegister() || MO.getReg() != VReg)
1538            continue;
1539
1540          if (MO.isDef()) {
1541            // If this restore were to be folded, it would have been folded
1542            // already.
1543            CanFold = false;
1544            break;
1545          }
1546          Ops.push_back(j);
1547        }
1548      }
1549
1550      // Fold the load into the use if possible.
1551      bool Folded = false;
1552      if (CanFold && !Ops.empty()) {
1553        if (!vrm.isReMaterialized(VReg))
1554          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1555        else {
1556          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1557          int LdSlot = 0;
1558          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1559          // If the rematerializable def is a load, also try to fold it.
1560          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1561            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1562                                          Ops, isLoadSS, LdSlot, VReg);
1563          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1564          if (ImpUse) {
1565            // Re-matting an instruction with virtual register use. Add the
1566            // register as an implicit use on the use MI and update the register
1567            // interval's spill weight.
1568            unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1569            LiveInterval &ImpLi = getInterval(ImpUse);
1570            ImpLi.weight +=
1571              getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
1572
1573            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1574          }
1575        }
1576      }
1577      // If folding is not possible / failed, then tell the spiller to issue a
1578      // load / rematerialization for us.
1579      if (Folded)
1580        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1581      else
1582        vrm.addRestorePoint(VReg, MI);
1583    }
1584    Id = RestoreMBBs.find_next(Id);
1585  }
1586
1587  // Finalize intervals: add kills, finalize spill weights, and filter out
1588  // dead intervals.
1589  std::vector<LiveInterval*> RetNewLIs;
1590  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1591    LiveInterval *LI = NewLIs[i];
1592    if (!LI->empty()) {
1593      LI->weight /= LI->getSize();
1594      if (!AddedKill.count(LI)) {
1595        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1596        unsigned LastUseIdx = getBaseIndex(LR->end);
1597        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1598        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1599        assert(UseIdx != -1);
1600        if (LastUse->getOperand(UseIdx).isImplicit() ||
1601            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1602          LastUse->getOperand(UseIdx).setIsKill();
1603          vrm.addKillPoint(LI->reg, LastUseIdx);
1604        }
1605      }
1606      RetNewLIs.push_back(LI);
1607    }
1608  }
1609
1610  return RetNewLIs;
1611}
1612