LiveIntervalAnalysis.cpp revision 298bbe82cb390235f7b8ab4bd550feff909e0c3d
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  // Virtual registers may be defined multiple times (due to phi
205  // elimination and 2-addr elimination).  Much of what we do only has to be
206  // done once for the vreg.  We use an empty interval to detect the first
207  // time we see a vreg.
208  if (interval.empty()) {
209    // Get the Idx of the defining instructions.
210    unsigned defIndex = getDefIndex(MIIdx);
211    VNInfo *ValNo;
212    MachineInstr *CopyMI = NULL;
213    unsigned SrcReg, DstReg;
214    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215        tii_->isMoveInstr(*mi, SrcReg, DstReg))
216      CopyMI = mi;
217    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      unsigned killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230      else
231        killIdx = defIndex+1;
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.none() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DOUT << " +" << LR << "\n";
241        interval.addKill(ValNo, killIdx);
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex,
251                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252                    ValNo);
253    DOUT << " +" << NewLR;
254    interval.addRange(NewLR);
255
256    // Iterate over all of the blocks that the variable is completely
257    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258    // live interval.
259    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260      if (vi.AliveBlocks[i]) {
261        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262        if (!MBB->empty()) {
263          LiveRange LR(getMBBStartIdx(i),
264                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265                       ValNo);
266          interval.addRange(LR);
267          DOUT << " +" << LR;
268        }
269      }
270    }
271
272    // Finally, this virtual register is live from the start of any killing
273    // block to the 'use' slot of the killing instruction.
274    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275      MachineInstr *Kill = vi.Kills[i];
276      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277      LiveRange LR(getMBBStartIdx(Kill->getParent()),
278                   killIdx, ValNo);
279      interval.addRange(LR);
280      interval.addKill(ValNo, killIdx);
281      DOUT << " +" << LR;
282    }
283
284  } else {
285    // If this is the second time we see a virtual register definition, it
286    // must be due to phi elimination or two addr elimination.  If this is
287    // the result of two address elimination, then the vreg is one of the
288    // def-and-use register operand.
289    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290      // If this is a two-address definition, then we have already processed
291      // the live range.  The only problem is that we didn't realize there
292      // are actually two values in the live interval.  Because of this we
293      // need to take the LiveRegion that defines this register and split it
294      // into two values.
295      assert(interval.containsOneValue());
296      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297      unsigned RedefIndex = getDefIndex(MIIdx);
298
299      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300      VNInfo *OldValNo = OldLR->valno;
301
302      // Delete the initial value, which should be short and continuous,
303      // because the 2-addr copy must be in the same MBB as the redef.
304      interval.removeRange(DefIndex, RedefIndex);
305
306      // Two-address vregs should always only be redefined once.  This means
307      // that at this point, there should be exactly one value number in it.
308      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309
310      // The new value number (#1) is defined by the instruction we claimed
311      // defined value #0.
312      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
313                                            VNInfoAllocator);
314
315      // Value#0 is now defined by the 2-addr instruction.
316      OldValNo->def  = RedefIndex;
317      OldValNo->copy = 0;
318
319      // Add the new live interval which replaces the range for the input copy.
320      LiveRange LR(DefIndex, RedefIndex, ValNo);
321      DOUT << " replace range with " << LR;
322      interval.addRange(LR);
323      interval.addKill(ValNo, RedefIndex);
324
325      // If this redefinition is dead, we need to add a dummy unit live
326      // range covering the def slot.
327      if (lv_->RegisterDefIsDead(mi, interval.reg))
328        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
329
330      DOUT << " RESULT: ";
331      interval.print(DOUT, tri_);
332
333    } else {
334      // Otherwise, this must be because of phi elimination.  If this is the
335      // first redefinition of the vreg that we have seen, go back and change
336      // the live range in the PHI block to be a different value number.
337      if (interval.containsOneValue()) {
338        assert(vi.Kills.size() == 1 &&
339               "PHI elimination vreg should have one kill, the PHI itself!");
340
341        // Remove the old range that we now know has an incorrect number.
342        VNInfo *VNI = interval.getValNumInfo(0);
343        MachineInstr *Killer = vi.Kills[0];
344        unsigned Start = getMBBStartIdx(Killer->getParent());
345        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346        DOUT << " Removing [" << Start << "," << End << "] from: ";
347        interval.print(DOUT, tri_); DOUT << "\n";
348        interval.removeRange(Start, End);
349        VNI->hasPHIKill = true;
350        DOUT << " RESULT: "; interval.print(DOUT, tri_);
351
352        // Replace the interval with one of a NEW value number.  Note that this
353        // value number isn't actually defined by an instruction, weird huh? :)
354        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355        DOUT << " replace range with " << LR;
356        interval.addRange(LR);
357        interval.addKill(LR.valno, End);
358        DOUT << " RESULT: "; interval.print(DOUT, tri_);
359      }
360
361      // In the case of PHI elimination, each variable definition is only
362      // live until the end of the block.  We've already taken care of the
363      // rest of the live range.
364      unsigned defIndex = getDefIndex(MIIdx);
365
366      VNInfo *ValNo;
367      MachineInstr *CopyMI = NULL;
368      unsigned SrcReg, DstReg;
369      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370          tii_->isMoveInstr(*mi, SrcReg, DstReg))
371        CopyMI = mi;
372      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373
374      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375      LiveRange LR(defIndex, killIndex, ValNo);
376      interval.addRange(LR);
377      interval.addKill(ValNo, killIndex);
378      ValNo->hasPHIKill = true;
379      DOUT << " +" << LR;
380    }
381  }
382
383  DOUT << '\n';
384}
385
386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387                                              MachineBasicBlock::iterator mi,
388                                              unsigned MIIdx,
389                                              LiveInterval &interval,
390                                              MachineInstr *CopyMI) {
391  // A physical register cannot be live across basic block, so its
392  // lifetime must end somewhere in its defining basic block.
393  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394
395  unsigned baseIndex = MIIdx;
396  unsigned start = getDefIndex(baseIndex);
397  unsigned end = start;
398
399  // If it is not used after definition, it is considered dead at
400  // the instruction defining it. Hence its interval is:
401  // [defSlot(def), defSlot(def)+1)
402  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
403    DOUT << " dead";
404    end = getDefIndex(start) + 1;
405    goto exit;
406  }
407
408  // If it is not dead on definition, it must be killed by a
409  // subsequent instruction. Hence its interval is:
410  // [defSlot(def), useSlot(kill)+1)
411  while (++mi != MBB->end()) {
412    baseIndex += InstrSlots::NUM;
413    if (lv_->KillsRegister(mi, interval.reg)) {
414      DOUT << " killed";
415      end = getUseIndex(baseIndex) + 1;
416      goto exit;
417    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
418      // Another instruction redefines the register before it is ever read.
419      // Then the register is essentially dead at the instruction that defines
420      // it. Hence its interval is:
421      // [defSlot(def), defSlot(def)+1)
422      DOUT << " dead";
423      end = getDefIndex(start) + 1;
424      goto exit;
425    }
426  }
427
428  // The only case we should have a dead physreg here without a killing or
429  // instruction where we know it's dead is if it is live-in to the function
430  // and never used.
431  assert(!CopyMI && "physreg was not killed in defining block!");
432  end = getDefIndex(start) + 1;  // It's dead.
433
434exit:
435  assert(start < end && "did not find end of interval?");
436
437  // Already exists? Extend old live interval.
438  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439  VNInfo *ValNo = (OldLR != interval.end())
440    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441  LiveRange LR(start, end, ValNo);
442  interval.addRange(LR);
443  interval.addKill(LR.valno, end);
444  DOUT << " +" << LR << '\n';
445}
446
447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448                                      MachineBasicBlock::iterator MI,
449                                      unsigned MIIdx,
450                                      unsigned reg) {
451  if (TargetRegisterInfo::isVirtualRegister(reg))
452    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453  else if (allocatableRegs_[reg]) {
454    MachineInstr *CopyMI = NULL;
455    unsigned SrcReg, DstReg;
456    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457        tii_->isMoveInstr(*MI, SrcReg, DstReg))
458      CopyMI = MI;
459    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460    // Def of a register also defines its sub-registers.
461    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462      // Avoid processing some defs more than once.
463      if (!MI->findRegisterDefOperand(*AS))
464        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
465  }
466}
467
468void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
469                                         unsigned MIIdx,
470                                         LiveInterval &interval, bool isAlias) {
471  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
472
473  // Look for kills, if it reaches a def before it's killed, then it shouldn't
474  // be considered a livein.
475  MachineBasicBlock::iterator mi = MBB->begin();
476  unsigned baseIndex = MIIdx;
477  unsigned start = baseIndex;
478  unsigned end = start;
479  while (mi != MBB->end()) {
480    if (lv_->KillsRegister(mi, interval.reg)) {
481      DOUT << " killed";
482      end = getUseIndex(baseIndex) + 1;
483      goto exit;
484    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
485      // Another instruction redefines the register before it is ever read.
486      // Then the register is essentially dead at the instruction that defines
487      // it. Hence its interval is:
488      // [defSlot(def), defSlot(def)+1)
489      DOUT << " dead";
490      end = getDefIndex(start) + 1;
491      goto exit;
492    }
493
494    baseIndex += InstrSlots::NUM;
495    ++mi;
496  }
497
498exit:
499  // Live-in register might not be used at all.
500  if (end == MIIdx) {
501    if (isAlias) {
502      DOUT << " dead";
503      end = getDefIndex(MIIdx) + 1;
504    } else {
505      DOUT << " live through";
506      end = baseIndex;
507    }
508  }
509
510  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
511  interval.addRange(LR);
512  interval.addKill(LR.valno, end);
513  DOUT << " +" << LR << '\n';
514}
515
516/// computeIntervals - computes the live intervals for virtual
517/// registers. for some ordering of the machine instructions [1,N] a
518/// live interval is an interval [i, j) where 1 <= i <= j < N for
519/// which a variable is live
520void LiveIntervals::computeIntervals() {
521  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
522       << "********** Function: "
523       << ((Value*)mf_->getFunction())->getName() << '\n';
524  // Track the index of the current machine instr.
525  unsigned MIIndex = 0;
526  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
527       MBBI != E; ++MBBI) {
528    MachineBasicBlock *MBB = MBBI;
529    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
530
531    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
532
533    // Create intervals for live-ins to this BB first.
534    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
535           LE = MBB->livein_end(); LI != LE; ++LI) {
536      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
537      // Multiple live-ins can alias the same register.
538      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
539        if (!hasInterval(*AS))
540          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
541                               true);
542    }
543
544    for (; MI != miEnd; ++MI) {
545      DOUT << MIIndex << "\t" << *MI;
546
547      // Handle defs.
548      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
549        MachineOperand &MO = MI->getOperand(i);
550        // handle register defs - build intervals
551        if (MO.isRegister() && MO.getReg() && MO.isDef())
552          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
553      }
554
555      MIIndex += InstrSlots::NUM;
556    }
557  }
558}
559
560bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
561                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
562  std::vector<IdxMBBPair>::const_iterator I =
563    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
564
565  bool ResVal = false;
566  while (I != Idx2MBBMap.end()) {
567    if (LR.end <= I->first)
568      break;
569    MBBs.push_back(I->second);
570    ResVal = true;
571    ++I;
572  }
573  return ResVal;
574}
575
576
577LiveInterval LiveIntervals::createInterval(unsigned reg) {
578  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
579                       HUGE_VALF : 0.0F;
580  return LiveInterval(reg, Weight);
581}
582
583/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
584/// copy field and returns the source register that defines it.
585unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
586  if (!VNI->copy)
587    return 0;
588
589  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
590    return VNI->copy->getOperand(1).getReg();
591  unsigned SrcReg, DstReg;
592  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
593    return SrcReg;
594  assert(0 && "Unrecognized copy instruction!");
595  return 0;
596}
597
598//===----------------------------------------------------------------------===//
599// Register allocator hooks.
600//
601
602/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
603/// allow one) virtual register operand, then its uses are implicitly using
604/// the register. Returns the virtual register.
605unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
606                                            MachineInstr *MI) const {
607  unsigned RegOp = 0;
608  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
609    MachineOperand &MO = MI->getOperand(i);
610    if (!MO.isRegister() || !MO.isUse())
611      continue;
612    unsigned Reg = MO.getReg();
613    if (Reg == 0 || Reg == li.reg)
614      continue;
615    // FIXME: For now, only remat MI with at most one register operand.
616    assert(!RegOp &&
617           "Can't rematerialize instruction with multiple register operand!");
618    RegOp = MO.getReg();
619    break;
620  }
621  return RegOp;
622}
623
624/// isValNoAvailableAt - Return true if the val# of the specified interval
625/// which reaches the given instruction also reaches the specified use index.
626bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
627                                       unsigned UseIdx) const {
628  unsigned Index = getInstructionIndex(MI);
629  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
630  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
631  return UI != li.end() && UI->valno == ValNo;
632}
633
634/// isReMaterializable - Returns true if the definition MI of the specified
635/// val# of the specified interval is re-materializable.
636bool LiveIntervals::isReMaterializable(const LiveInterval &li,
637                                       const VNInfo *ValNo, MachineInstr *MI,
638                                       bool &isLoad) {
639  if (DisableReMat)
640    return false;
641
642  isLoad = false;
643  const TargetInstrDesc &TID = MI->getDesc();
644  if (TID.isImplicitDef())
645    return true;
646
647  int FrameIdx = 0;
648  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
649      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx)) {
650    // This is a load from fixed stack slot. It can be rematerialized unless
651    // it's re-defined by a two-address instruction.
652    isLoad = true;
653    for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
654         i != e; ++i) {
655      const VNInfo *VNI = *i;
656      if (VNI == ValNo)
657        continue;
658      unsigned DefIdx = VNI->def;
659      if (DefIdx == ~1U)
660        continue; // Dead val#.
661      MachineInstr *DefMI = (DefIdx == ~0u)
662        ? NULL : getInstructionFromIndex(DefIdx);
663      if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
664        isLoad = false;
665        return false;
666      }
667    }
668    return true;
669  }
670
671  if (tii_->isTriviallyReMaterializable(MI)) {
672    isLoad = TID.isSimpleLoad();
673
674    unsigned ImpUse = getReMatImplicitUse(li, MI);
675    if (ImpUse) {
676      const LiveInterval &ImpLi = getInterval(ImpUse);
677      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
678             re = mri_->use_end(); ri != re; ++ri) {
679        MachineInstr *UseMI = &*ri;
680        unsigned UseIdx = getInstructionIndex(UseMI);
681        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
682          continue;
683        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
684          return false;
685      }
686    }
687    return true;
688  }
689
690  return false;
691}
692
693/// isReMaterializable - Returns true if every definition of MI of every
694/// val# of the specified interval is re-materializable.
695bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
696  isLoad = false;
697  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
698       i != e; ++i) {
699    const VNInfo *VNI = *i;
700    unsigned DefIdx = VNI->def;
701    if (DefIdx == ~1U)
702      continue; // Dead val#.
703    // Is the def for the val# rematerializable?
704    if (DefIdx == ~0u)
705      return false;
706    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
707    bool DefIsLoad = false;
708    if (!ReMatDefMI ||
709        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
710      return false;
711    isLoad |= DefIsLoad;
712  }
713  return true;
714}
715
716/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
717/// slot / to reg or any rematerialized load into ith operand of specified
718/// MI. If it is successul, MI is updated with the newly created MI and
719/// returns true.
720bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
721                                         VirtRegMap &vrm, MachineInstr *DefMI,
722                                         unsigned InstrIdx,
723                                         SmallVector<unsigned, 2> &Ops,
724                                         bool isSS, int Slot, unsigned Reg) {
725  unsigned MRInfo = 0;
726  const TargetInstrDesc &TID = MI->getDesc();
727  // If it is an implicit def instruction, just delete it.
728  if (TID.isImplicitDef()) {
729    RemoveMachineInstrFromMaps(MI);
730    vrm.RemoveMachineInstrFromMaps(MI);
731    MI->eraseFromParent();
732    ++numFolds;
733    return true;
734  }
735
736  SmallVector<unsigned, 2> FoldOps;
737  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
738    unsigned OpIdx = Ops[i];
739    MachineOperand &MO = MI->getOperand(OpIdx);
740    // FIXME: fold subreg use.
741    if (MO.getSubReg())
742      return false;
743    if (MO.isDef())
744      MRInfo |= (unsigned)VirtRegMap::isMod;
745    else {
746      // Filter out two-address use operand(s).
747      if (!MO.isImplicit() &&
748          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
749        MRInfo = VirtRegMap::isModRef;
750        continue;
751      }
752      MRInfo |= (unsigned)VirtRegMap::isRef;
753    }
754    FoldOps.push_back(OpIdx);
755  }
756
757  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
758                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
759  if (fmi) {
760    // Attempt to fold the memory reference into the instruction. If
761    // we can do this, we don't need to insert spill code.
762    if (lv_)
763      lv_->instructionChanged(MI, fmi);
764    else
765      fmi->copyKillDeadInfo(MI, tri_);
766    MachineBasicBlock &MBB = *MI->getParent();
767    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
768      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
769    vrm.transferSpillPts(MI, fmi);
770    vrm.transferRestorePts(MI, fmi);
771    mi2iMap_.erase(MI);
772    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
773    mi2iMap_[fmi] = InstrIdx;
774    MI = MBB.insert(MBB.erase(MI), fmi);
775    ++numFolds;
776    return true;
777  }
778  return false;
779}
780
781/// canFoldMemoryOperand - Returns true if the specified load / store
782/// folding is possible.
783bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
784                                         SmallVector<unsigned, 2> &Ops) const {
785  SmallVector<unsigned, 2> FoldOps;
786  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
787    unsigned OpIdx = Ops[i];
788    // FIXME: fold subreg use.
789    if (MI->getOperand(OpIdx).getSubReg())
790      return false;
791    FoldOps.push_back(OpIdx);
792  }
793
794  return tii_->canFoldMemoryOperand(MI, FoldOps);
795}
796
797bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, unsigned Reg) const {
798  SmallVector<unsigned, 2> FoldOps;
799  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
800    MachineOperand& mop = MI->getOperand(i);
801    if (!mop.isRegister())
802      continue;
803    unsigned UseReg = mop.getReg();
804    if (UseReg != Reg)
805      continue;
806    // FIXME: fold subreg use.
807    if (mop.getSubReg())
808      return false;
809    FoldOps.push_back(i);
810  }
811  return tii_->canFoldMemoryOperand(MI, FoldOps);
812}
813
814bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
815  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
816  for (LiveInterval::Ranges::const_iterator
817         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
818    std::vector<IdxMBBPair>::const_iterator II =
819      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
820    if (II == Idx2MBBMap.end())
821      continue;
822    if (I->end > II->first)  // crossing a MBB.
823      return false;
824    MBBs.insert(II->second);
825    if (MBBs.size() > 1)
826      return false;
827  }
828  return true;
829}
830
831/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
832/// interval on to-be re-materialized operands of MI) with new register.
833void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
834                                       MachineInstr *MI, unsigned NewVReg,
835                                       VirtRegMap &vrm) {
836  // There is an implicit use. That means one of the other operand is
837  // being remat'ed and the remat'ed instruction has li.reg as an
838  // use operand. Make sure we rewrite that as well.
839  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
840    MachineOperand &MO = MI->getOperand(i);
841    if (!MO.isRegister())
842      continue;
843    unsigned Reg = MO.getReg();
844    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
845      continue;
846    if (!vrm.isReMaterialized(Reg))
847      continue;
848    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
849    int OpIdx = ReMatMI->findRegisterUseOperandIdx(li.reg);
850    if (OpIdx != -1)
851      ReMatMI->getOperand(OpIdx).setReg(NewVReg);
852  }
853}
854
855/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
856/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
857bool LiveIntervals::
858rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
859                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
860                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
861                 unsigned Slot, int LdSlot,
862                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
863                 VirtRegMap &vrm,
864                 const TargetRegisterClass* rc,
865                 SmallVector<int, 4> &ReMatIds,
866                 const MachineLoopInfo *loopInfo,
867                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
868                 std::map<unsigned,unsigned> &MBBVRegsMap,
869                 std::vector<LiveInterval*> &NewLIs) {
870  bool CanFold = false;
871 RestartInstruction:
872  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
873    MachineOperand& mop = MI->getOperand(i);
874    if (!mop.isRegister())
875      continue;
876    unsigned Reg = mop.getReg();
877    unsigned RegI = Reg;
878    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
879      continue;
880    if (Reg != li.reg)
881      continue;
882
883    bool TryFold = !DefIsReMat;
884    bool FoldSS = true; // Default behavior unless it's a remat.
885    int FoldSlot = Slot;
886    if (DefIsReMat) {
887      // If this is the rematerializable definition MI itself and
888      // all of its uses are rematerialized, simply delete it.
889      if (MI == ReMatOrigDefMI && CanDelete) {
890        DOUT << "\t\t\t\tErasing re-materlizable def: ";
891        DOUT << MI << '\n';
892        unsigned ImpUse = getReMatImplicitUse(li, MI);
893        if (ImpUse) {
894          // To be deleted MI has a virtual register operand, update the
895          // spill weight of the register interval.
896          unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
897          LiveInterval &ImpLi = getInterval(ImpUse);
898          ImpLi.weight -=
899            getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
900        }
901        RemoveMachineInstrFromMaps(MI);
902        vrm.RemoveMachineInstrFromMaps(MI);
903        MI->eraseFromParent();
904        break;
905      }
906
907      // If def for this use can't be rematerialized, then try folding.
908      // If def is rematerializable and it's a load, also try folding.
909      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
910      if (isLoad) {
911        // Try fold loads (from stack slot, constant pool, etc.) into uses.
912        FoldSS = isLoadSS;
913        FoldSlot = LdSlot;
914      }
915    }
916
917    // Scan all of the operands of this instruction rewriting operands
918    // to use NewVReg instead of li.reg as appropriate.  We do this for
919    // two reasons:
920    //
921    //   1. If the instr reads the same spilled vreg multiple times, we
922    //      want to reuse the NewVReg.
923    //   2. If the instr is a two-addr instruction, we are required to
924    //      keep the src/dst regs pinned.
925    //
926    // Keep track of whether we replace a use and/or def so that we can
927    // create the spill interval with the appropriate range.
928
929    HasUse = mop.isUse();
930    HasDef = mop.isDef();
931    SmallVector<unsigned, 2> Ops;
932    Ops.push_back(i);
933    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
934      const MachineOperand &MOj = MI->getOperand(j);
935      if (!MOj.isRegister())
936        continue;
937      unsigned RegJ = MOj.getReg();
938      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
939        continue;
940      if (RegJ == RegI) {
941        Ops.push_back(j);
942        HasUse |= MOj.isUse();
943        HasDef |= MOj.isDef();
944      }
945    }
946
947    if (TryFold) {
948      // Do not fold load / store here if we are splitting. We'll find an
949      // optimal point to insert a load / store later.
950      if (!TrySplit) {
951        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
952                                 Ops, FoldSS, FoldSlot, Reg)) {
953          // Folding the load/store can completely change the instruction in
954          // unpredictable ways, rescan it from the beginning.
955          HasUse = false;
956          HasDef = false;
957          CanFold = false;
958          goto RestartInstruction;
959        }
960      } else {
961        CanFold = canFoldMemoryOperand(MI, Ops);
962      }
963    } else
964      CanFold = false;
965
966    // Create a new virtual register for the spill interval.
967    bool CreatedNewVReg = false;
968    if (NewVReg == 0) {
969      NewVReg = mri_->createVirtualRegister(rc);
970      vrm.grow();
971      CreatedNewVReg = true;
972    }
973    mop.setReg(NewVReg);
974    if (mop.isImplicit())
975      rewriteImplicitOps(li, MI, NewVReg, vrm);
976
977    // Reuse NewVReg for other reads.
978    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
979      MachineOperand &mopj = MI->getOperand(Ops[j]);
980      mopj.setReg(NewVReg);
981      if (mopj.isImplicit())
982        rewriteImplicitOps(li, MI, NewVReg, vrm);
983    }
984
985    if (CreatedNewVReg) {
986      if (DefIsReMat) {
987        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
988        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
989          // Each valnum may have its own remat id.
990          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
991        } else {
992          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
993        }
994        if (!CanDelete || (HasUse && HasDef)) {
995          // If this is a two-addr instruction then its use operands are
996          // rematerializable but its def is not. It should be assigned a
997          // stack slot.
998          vrm.assignVirt2StackSlot(NewVReg, Slot);
999        }
1000      } else {
1001        vrm.assignVirt2StackSlot(NewVReg, Slot);
1002      }
1003    } else if (HasUse && HasDef &&
1004               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1005      // If this interval hasn't been assigned a stack slot (because earlier
1006      // def is a deleted remat def), do it now.
1007      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1008      vrm.assignVirt2StackSlot(NewVReg, Slot);
1009    }
1010
1011    // Re-matting an instruction with virtual register use. Add the
1012    // register as an implicit use on the use MI.
1013    if (DefIsReMat && ImpUse)
1014      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1015
1016    // create a new register interval for this spill / remat.
1017    LiveInterval &nI = getOrCreateInterval(NewVReg);
1018    if (CreatedNewVReg) {
1019      NewLIs.push_back(&nI);
1020      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1021      if (TrySplit)
1022        vrm.setIsSplitFromReg(NewVReg, li.reg);
1023    }
1024
1025    if (HasUse) {
1026      if (CreatedNewVReg) {
1027        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1028                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1029        DOUT << " +" << LR;
1030        nI.addRange(LR);
1031      } else {
1032        // Extend the split live interval to this def / use.
1033        unsigned End = getUseIndex(index)+1;
1034        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1035                     nI.getValNumInfo(nI.getNumValNums()-1));
1036        DOUT << " +" << LR;
1037        nI.addRange(LR);
1038      }
1039    }
1040    if (HasDef) {
1041      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1042                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1043      DOUT << " +" << LR;
1044      nI.addRange(LR);
1045    }
1046
1047    DOUT << "\t\t\t\tAdded new interval: ";
1048    nI.print(DOUT, tri_);
1049    DOUT << '\n';
1050  }
1051  return CanFold;
1052}
1053bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1054                                   const VNInfo *VNI,
1055                                   MachineBasicBlock *MBB, unsigned Idx) const {
1056  unsigned End = getMBBEndIdx(MBB);
1057  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1058    unsigned KillIdx = VNI->kills[j];
1059    if (KillIdx > Idx && KillIdx < End)
1060      return true;
1061  }
1062  return false;
1063}
1064
1065static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1066  const VNInfo *VNI = NULL;
1067  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1068         e = li.vni_end(); i != e; ++i)
1069    if ((*i)->def == DefIdx) {
1070      VNI = *i;
1071      break;
1072    }
1073  return VNI;
1074}
1075
1076/// RewriteInfo - Keep track of machine instrs that will be rewritten
1077/// during spilling.
1078struct RewriteInfo {
1079  unsigned Index;
1080  MachineInstr *MI;
1081  bool HasUse;
1082  bool HasDef;
1083  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1084    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1085};
1086
1087struct RewriteInfoCompare {
1088  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1089    return LHS.Index < RHS.Index;
1090  }
1091};
1092
1093void LiveIntervals::
1094rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1095                    LiveInterval::Ranges::const_iterator &I,
1096                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1097                    unsigned Slot, int LdSlot,
1098                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1099                    VirtRegMap &vrm,
1100                    const TargetRegisterClass* rc,
1101                    SmallVector<int, 4> &ReMatIds,
1102                    const MachineLoopInfo *loopInfo,
1103                    BitVector &SpillMBBs,
1104                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1105                    BitVector &RestoreMBBs,
1106                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1107                    std::map<unsigned,unsigned> &MBBVRegsMap,
1108                    std::vector<LiveInterval*> &NewLIs) {
1109  bool AllCanFold = true;
1110  unsigned NewVReg = 0;
1111  unsigned start = getBaseIndex(I->start);
1112  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1113
1114  // First collect all the def / use in this live range that will be rewritten.
1115  // Make sure they are sorted according instruction index.
1116  std::vector<RewriteInfo> RewriteMIs;
1117  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1118         re = mri_->reg_end(); ri != re; ) {
1119    MachineInstr *MI = &(*ri);
1120    MachineOperand &O = ri.getOperand();
1121    ++ri;
1122    unsigned index = getInstructionIndex(MI);
1123    if (index < start || index >= end)
1124      continue;
1125    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1126  }
1127  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1128
1129  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1130  // Now rewrite the defs and uses.
1131  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1132    RewriteInfo &rwi = RewriteMIs[i];
1133    ++i;
1134    unsigned index = rwi.Index;
1135    bool MIHasUse = rwi.HasUse;
1136    bool MIHasDef = rwi.HasDef;
1137    MachineInstr *MI = rwi.MI;
1138    // If MI def and/or use the same register multiple times, then there
1139    // are multiple entries.
1140    unsigned NumUses = MIHasUse;
1141    while (i != e && RewriteMIs[i].MI == MI) {
1142      assert(RewriteMIs[i].Index == index);
1143      bool isUse = RewriteMIs[i].HasUse;
1144      if (isUse) ++NumUses;
1145      MIHasUse |= isUse;
1146      MIHasDef |= RewriteMIs[i].HasDef;
1147      ++i;
1148    }
1149    MachineBasicBlock *MBB = MI->getParent();
1150
1151    if (ImpUse && MI != ReMatDefMI) {
1152      // Re-matting an instruction with virtual register use. Update the
1153      // register interval's spill weight.
1154      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1155      LiveInterval &ImpLi = getInterval(ImpUse);
1156      ImpLi.weight +=
1157        getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize();
1158    }
1159
1160    unsigned MBBId = MBB->getNumber();
1161    unsigned ThisVReg = 0;
1162    if (TrySplit) {
1163      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1164      if (NVI != MBBVRegsMap.end()) {
1165        ThisVReg = NVI->second;
1166        // One common case:
1167        // x = use
1168        // ...
1169        // ...
1170        // def = ...
1171        //     = use
1172        // It's better to start a new interval to avoid artifically
1173        // extend the new interval.
1174        if (MIHasDef && !MIHasUse) {
1175          MBBVRegsMap.erase(MBB->getNumber());
1176          ThisVReg = 0;
1177        }
1178      }
1179    }
1180
1181    bool IsNew = ThisVReg == 0;
1182    if (IsNew) {
1183      // This ends the previous live interval. If all of its def / use
1184      // can be folded, give it a low spill weight.
1185      if (NewVReg && TrySplit && AllCanFold) {
1186        LiveInterval &nI = getOrCreateInterval(NewVReg);
1187        nI.weight /= 10.0F;
1188      }
1189      AllCanFold = true;
1190    }
1191    NewVReg = ThisVReg;
1192
1193    bool HasDef = false;
1194    bool HasUse = false;
1195    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1196                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1197                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1198                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1199                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1200    if (!HasDef && !HasUse)
1201      continue;
1202
1203    AllCanFold &= CanFold;
1204
1205    // Update weight of spill interval.
1206    LiveInterval &nI = getOrCreateInterval(NewVReg);
1207    if (!TrySplit) {
1208      // The spill weight is now infinity as it cannot be spilled again.
1209      nI.weight = HUGE_VALF;
1210      continue;
1211    }
1212
1213    // Keep track of the last def and first use in each MBB.
1214    if (HasDef) {
1215      if (MI != ReMatOrigDefMI || !CanDelete) {
1216        bool HasKill = false;
1217        if (!HasUse)
1218          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1219        else {
1220          // If this is a two-address code, then this index starts a new VNInfo.
1221          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1222          if (VNI)
1223            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1224        }
1225        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1226          SpillIdxes.find(MBBId);
1227        if (!HasKill) {
1228          if (SII == SpillIdxes.end()) {
1229            std::vector<SRInfo> S;
1230            S.push_back(SRInfo(index, NewVReg, true));
1231            SpillIdxes.insert(std::make_pair(MBBId, S));
1232          } else if (SII->second.back().vreg != NewVReg) {
1233            SII->second.push_back(SRInfo(index, NewVReg, true));
1234          } else if ((int)index > SII->second.back().index) {
1235            // If there is an earlier def and this is a two-address
1236            // instruction, then it's not possible to fold the store (which
1237            // would also fold the load).
1238            SRInfo &Info = SII->second.back();
1239            Info.index = index;
1240            Info.canFold = !HasUse;
1241          }
1242          SpillMBBs.set(MBBId);
1243        } else if (SII != SpillIdxes.end() &&
1244                   SII->second.back().vreg == NewVReg &&
1245                   (int)index > SII->second.back().index) {
1246          // There is an earlier def that's not killed (must be two-address).
1247          // The spill is no longer needed.
1248          SII->second.pop_back();
1249          if (SII->second.empty()) {
1250            SpillIdxes.erase(MBBId);
1251            SpillMBBs.reset(MBBId);
1252          }
1253        }
1254      }
1255    }
1256
1257    if (HasUse) {
1258      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1259        SpillIdxes.find(MBBId);
1260      if (SII != SpillIdxes.end() &&
1261          SII->second.back().vreg == NewVReg &&
1262          (int)index > SII->second.back().index)
1263        // Use(s) following the last def, it's not safe to fold the spill.
1264        SII->second.back().canFold = false;
1265      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1266        RestoreIdxes.find(MBBId);
1267      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1268        // If we are splitting live intervals, only fold if it's the first
1269        // use and there isn't another use later in the MBB.
1270        RII->second.back().canFold = false;
1271      else if (IsNew) {
1272        // Only need a reload if there isn't an earlier def / use.
1273        if (RII == RestoreIdxes.end()) {
1274          std::vector<SRInfo> Infos;
1275          Infos.push_back(SRInfo(index, NewVReg, true));
1276          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1277        } else {
1278          RII->second.push_back(SRInfo(index, NewVReg, true));
1279        }
1280        RestoreMBBs.set(MBBId);
1281      }
1282    }
1283
1284    // Update spill weight.
1285    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1286    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1287  }
1288
1289  if (NewVReg && TrySplit && AllCanFold) {
1290    // If all of its def / use can be folded, give it a low spill weight.
1291    LiveInterval &nI = getOrCreateInterval(NewVReg);
1292    nI.weight /= 10.0F;
1293  }
1294}
1295
1296bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1297                        BitVector &RestoreMBBs,
1298                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1299  if (!RestoreMBBs[Id])
1300    return false;
1301  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1302  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1303    if (Restores[i].index == index &&
1304        Restores[i].vreg == vr &&
1305        Restores[i].canFold)
1306      return true;
1307  return false;
1308}
1309
1310void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1311                        BitVector &RestoreMBBs,
1312                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1313  if (!RestoreMBBs[Id])
1314    return;
1315  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1316  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1317    if (Restores[i].index == index && Restores[i].vreg)
1318      Restores[i].index = -1;
1319}
1320
1321
1322std::vector<LiveInterval*> LiveIntervals::
1323addIntervalsForSpills(const LiveInterval &li,
1324                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1325  // Since this is called after the analysis is done we don't know if
1326  // LiveVariables is available
1327  lv_ = getAnalysisToUpdate<LiveVariables>();
1328
1329  assert(li.weight != HUGE_VALF &&
1330         "attempt to spill already spilled interval!");
1331
1332  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1333  li.print(DOUT, tri_);
1334  DOUT << '\n';
1335
1336  // Each bit specify whether it a spill is required in the MBB.
1337  BitVector SpillMBBs(mf_->getNumBlockIDs());
1338  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1339  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1340  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1341  std::map<unsigned,unsigned> MBBVRegsMap;
1342  std::vector<LiveInterval*> NewLIs;
1343  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1344
1345  unsigned NumValNums = li.getNumValNums();
1346  SmallVector<MachineInstr*, 4> ReMatDefs;
1347  ReMatDefs.resize(NumValNums, NULL);
1348  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1349  ReMatOrigDefs.resize(NumValNums, NULL);
1350  SmallVector<int, 4> ReMatIds;
1351  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1352  BitVector ReMatDelete(NumValNums);
1353  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1354
1355  // Spilling a split live interval. It cannot be split any further. Also,
1356  // it's also guaranteed to be a single val# / range interval.
1357  if (vrm.getPreSplitReg(li.reg)) {
1358    vrm.setIsSplitFromReg(li.reg, 0);
1359    // Unset the split kill marker on the last use.
1360    unsigned KillIdx = vrm.getKillPoint(li.reg);
1361    if (KillIdx) {
1362      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1363      assert(KillMI && "Last use disappeared?");
1364      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1365      assert(KillOp != -1 && "Last use disappeared?");
1366      KillMI->getOperand(KillOp).setIsKill(false);
1367    }
1368    vrm.removeKillPoint(li.reg);
1369    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1370    Slot = vrm.getStackSlot(li.reg);
1371    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1372    MachineInstr *ReMatDefMI = DefIsReMat ?
1373      vrm.getReMaterializedMI(li.reg) : NULL;
1374    int LdSlot = 0;
1375    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1376    bool isLoad = isLoadSS ||
1377      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1378    bool IsFirstRange = true;
1379    for (LiveInterval::Ranges::const_iterator
1380           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1381      // If this is a split live interval with multiple ranges, it means there
1382      // are two-address instructions that re-defined the value. Only the
1383      // first def can be rematerialized!
1384      if (IsFirstRange) {
1385        // Note ReMatOrigDefMI has already been deleted.
1386        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1387                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1388                             false, vrm, rc, ReMatIds, loopInfo,
1389                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1390                             MBBVRegsMap, NewLIs);
1391      } else {
1392        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1393                             Slot, 0, false, false, false,
1394                             false, vrm, rc, ReMatIds, loopInfo,
1395                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1396                             MBBVRegsMap, NewLIs);
1397      }
1398      IsFirstRange = false;
1399    }
1400    return NewLIs;
1401  }
1402
1403  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1404  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1405    TrySplit = false;
1406  if (TrySplit)
1407    ++numSplits;
1408  bool NeedStackSlot = false;
1409  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1410       i != e; ++i) {
1411    const VNInfo *VNI = *i;
1412    unsigned VN = VNI->id;
1413    unsigned DefIdx = VNI->def;
1414    if (DefIdx == ~1U)
1415      continue; // Dead val#.
1416    // Is the def for the val# rematerializable?
1417    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1418      ? 0 : getInstructionFromIndex(DefIdx);
1419    bool dummy;
1420    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1421      // Remember how to remat the def of this val#.
1422      ReMatOrigDefs[VN] = ReMatDefMI;
1423      // Original def may be modified so we have to make a copy here. vrm must
1424      // delete these!
1425      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1426
1427      bool CanDelete = true;
1428      if (VNI->hasPHIKill) {
1429        // A kill is a phi node, not all of its uses can be rematerialized.
1430        // It must not be deleted.
1431        CanDelete = false;
1432        // Need a stack slot if there is any live range where uses cannot be
1433        // rematerialized.
1434        NeedStackSlot = true;
1435      }
1436      if (CanDelete)
1437        ReMatDelete.set(VN);
1438    } else {
1439      // Need a stack slot if there is any live range where uses cannot be
1440      // rematerialized.
1441      NeedStackSlot = true;
1442    }
1443  }
1444
1445  // One stack slot per live interval.
1446  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1447    Slot = vrm.assignVirt2StackSlot(li.reg);
1448
1449  // Create new intervals and rewrite defs and uses.
1450  for (LiveInterval::Ranges::const_iterator
1451         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1452    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1453    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1454    bool DefIsReMat = ReMatDefMI != NULL;
1455    bool CanDelete = ReMatDelete[I->valno->id];
1456    int LdSlot = 0;
1457    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1458    bool isLoad = isLoadSS ||
1459      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1460    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1461                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1462                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1463                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1464                               MBBVRegsMap, NewLIs);
1465  }
1466
1467  // Insert spills / restores if we are splitting.
1468  if (!TrySplit)
1469    return NewLIs;
1470
1471  SmallPtrSet<LiveInterval*, 4> AddedKill;
1472  SmallVector<unsigned, 2> Ops;
1473  if (NeedStackSlot) {
1474    int Id = SpillMBBs.find_first();
1475    while (Id != -1) {
1476      std::vector<SRInfo> &spills = SpillIdxes[Id];
1477      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1478        int index = spills[i].index;
1479        unsigned VReg = spills[i].vreg;
1480        LiveInterval &nI = getOrCreateInterval(VReg);
1481        bool isReMat = vrm.isReMaterialized(VReg);
1482        MachineInstr *MI = getInstructionFromIndex(index);
1483        bool CanFold = false;
1484        bool FoundUse = false;
1485        Ops.clear();
1486        if (spills[i].canFold) {
1487          CanFold = true;
1488          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1489            MachineOperand &MO = MI->getOperand(j);
1490            if (!MO.isRegister() || MO.getReg() != VReg)
1491              continue;
1492
1493            Ops.push_back(j);
1494            if (MO.isDef())
1495              continue;
1496            if (isReMat ||
1497                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1498                                                RestoreMBBs, RestoreIdxes))) {
1499              // MI has two-address uses of the same register. If the use
1500              // isn't the first and only use in the BB, then we can't fold
1501              // it. FIXME: Move this to rewriteInstructionsForSpills.
1502              CanFold = false;
1503              break;
1504            }
1505            FoundUse = true;
1506          }
1507        }
1508        // Fold the store into the def if possible.
1509        bool Folded = false;
1510        if (CanFold && !Ops.empty()) {
1511          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1512            Folded = true;
1513            if (FoundUse > 0) {
1514              // Also folded uses, do not issue a load.
1515              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1516              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1517            }
1518            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1519          }
1520        }
1521
1522        // Else tell the spiller to issue a spill.
1523        if (!Folded) {
1524          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1525          bool isKill = LR->end == getStoreIndex(index);
1526          vrm.addSpillPoint(VReg, isKill, MI);
1527          if (isKill)
1528            AddedKill.insert(&nI);
1529        }
1530      }
1531      Id = SpillMBBs.find_next(Id);
1532    }
1533  }
1534
1535  int Id = RestoreMBBs.find_first();
1536  while (Id != -1) {
1537    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1538    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1539      int index = restores[i].index;
1540      if (index == -1)
1541        continue;
1542      unsigned VReg = restores[i].vreg;
1543      LiveInterval &nI = getOrCreateInterval(VReg);
1544      MachineInstr *MI = getInstructionFromIndex(index);
1545      bool CanFold = false;
1546      Ops.clear();
1547      if (restores[i].canFold) {
1548        CanFold = true;
1549        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1550          MachineOperand &MO = MI->getOperand(j);
1551          if (!MO.isRegister() || MO.getReg() != VReg)
1552            continue;
1553
1554          if (MO.isDef()) {
1555            // If this restore were to be folded, it would have been folded
1556            // already.
1557            CanFold = false;
1558            break;
1559          }
1560          Ops.push_back(j);
1561        }
1562      }
1563
1564      // Fold the load into the use if possible.
1565      bool Folded = false;
1566      if (CanFold && !Ops.empty()) {
1567        if (!vrm.isReMaterialized(VReg))
1568          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1569        else {
1570          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1571          int LdSlot = 0;
1572          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1573          // If the rematerializable def is a load, also try to fold it.
1574          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1575            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1576                                          Ops, isLoadSS, LdSlot, VReg);
1577          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1578          if (ImpUse) {
1579            // Re-matting an instruction with virtual register use. Add the
1580            // register as an implicit use on the use MI and update the register
1581            // interval's spill weight.
1582            unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1583            LiveInterval &ImpLi = getInterval(ImpUse);
1584            ImpLi.weight +=
1585              getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
1586
1587            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1588          }
1589        }
1590      }
1591      // If folding is not possible / failed, then tell the spiller to issue a
1592      // load / rematerialization for us.
1593      if (Folded)
1594        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1595      else
1596        vrm.addRestorePoint(VReg, MI);
1597    }
1598    Id = RestoreMBBs.find_next(Id);
1599  }
1600
1601  // Finalize intervals: add kills, finalize spill weights, and filter out
1602  // dead intervals.
1603  std::vector<LiveInterval*> RetNewLIs;
1604  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1605    LiveInterval *LI = NewLIs[i];
1606    if (!LI->empty()) {
1607      LI->weight /= LI->getSize();
1608      if (!AddedKill.count(LI)) {
1609        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1610        unsigned LastUseIdx = getBaseIndex(LR->end);
1611        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1612        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1613        assert(UseIdx != -1);
1614        if (LastUse->getOperand(UseIdx).isImplicit() ||
1615            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1616          LastUse->getOperand(UseIdx).setIsKill();
1617          vrm.addKillPoint(LI->reg, LastUseIdx);
1618        }
1619      }
1620      RetNewLIs.push_back(LI);
1621    }
1622  }
1623
1624  return RetNewLIs;
1625}
1626