LiveIntervalAnalysis.cpp revision c2855324fb3a9a48f9c0f8311feb5e1de971aa1f
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  // Virtual registers may be defined multiple times (due to phi
205  // elimination and 2-addr elimination).  Much of what we do only has to be
206  // done once for the vreg.  We use an empty interval to detect the first
207  // time we see a vreg.
208  if (interval.empty()) {
209    // Get the Idx of the defining instructions.
210    unsigned defIndex = getDefIndex(MIIdx);
211    VNInfo *ValNo;
212    MachineInstr *CopyMI = NULL;
213    unsigned SrcReg, DstReg;
214    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215        tii_->isMoveInstr(*mi, SrcReg, DstReg))
216      CopyMI = mi;
217    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      unsigned killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230      else
231        killIdx = defIndex+1;
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.none() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DOUT << " +" << LR << "\n";
241        interval.addKill(ValNo, killIdx);
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex,
251                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252                    ValNo);
253    DOUT << " +" << NewLR;
254    interval.addRange(NewLR);
255
256    // Iterate over all of the blocks that the variable is completely
257    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258    // live interval.
259    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260      if (vi.AliveBlocks[i]) {
261        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262        if (!MBB->empty()) {
263          LiveRange LR(getMBBStartIdx(i),
264                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265                       ValNo);
266          interval.addRange(LR);
267          DOUT << " +" << LR;
268        }
269      }
270    }
271
272    // Finally, this virtual register is live from the start of any killing
273    // block to the 'use' slot of the killing instruction.
274    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275      MachineInstr *Kill = vi.Kills[i];
276      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277      LiveRange LR(getMBBStartIdx(Kill->getParent()),
278                   killIdx, ValNo);
279      interval.addRange(LR);
280      interval.addKill(ValNo, killIdx);
281      DOUT << " +" << LR;
282    }
283
284  } else {
285    // If this is the second time we see a virtual register definition, it
286    // must be due to phi elimination or two addr elimination.  If this is
287    // the result of two address elimination, then the vreg is one of the
288    // def-and-use register operand.
289    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290      // If this is a two-address definition, then we have already processed
291      // the live range.  The only problem is that we didn't realize there
292      // are actually two values in the live interval.  Because of this we
293      // need to take the LiveRegion that defines this register and split it
294      // into two values.
295      assert(interval.containsOneValue());
296      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297      unsigned RedefIndex = getDefIndex(MIIdx);
298
299      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300      VNInfo *OldValNo = OldLR->valno;
301
302      // Delete the initial value, which should be short and continuous,
303      // because the 2-addr copy must be in the same MBB as the redef.
304      interval.removeRange(DefIndex, RedefIndex);
305
306      // Two-address vregs should always only be redefined once.  This means
307      // that at this point, there should be exactly one value number in it.
308      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309
310      // The new value number (#1) is defined by the instruction we claimed
311      // defined value #0.
312      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
313                                            VNInfoAllocator);
314
315      // Value#0 is now defined by the 2-addr instruction.
316      OldValNo->def  = RedefIndex;
317      OldValNo->copy = 0;
318
319      // Add the new live interval which replaces the range for the input copy.
320      LiveRange LR(DefIndex, RedefIndex, ValNo);
321      DOUT << " replace range with " << LR;
322      interval.addRange(LR);
323      interval.addKill(ValNo, RedefIndex);
324
325      // If this redefinition is dead, we need to add a dummy unit live
326      // range covering the def slot.
327      if (lv_->RegisterDefIsDead(mi, interval.reg))
328        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
329
330      DOUT << " RESULT: ";
331      interval.print(DOUT, tri_);
332
333    } else {
334      // Otherwise, this must be because of phi elimination.  If this is the
335      // first redefinition of the vreg that we have seen, go back and change
336      // the live range in the PHI block to be a different value number.
337      if (interval.containsOneValue()) {
338        assert(vi.Kills.size() == 1 &&
339               "PHI elimination vreg should have one kill, the PHI itself!");
340
341        // Remove the old range that we now know has an incorrect number.
342        VNInfo *VNI = interval.getValNumInfo(0);
343        MachineInstr *Killer = vi.Kills[0];
344        unsigned Start = getMBBStartIdx(Killer->getParent());
345        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346        DOUT << " Removing [" << Start << "," << End << "] from: ";
347        interval.print(DOUT, tri_); DOUT << "\n";
348        interval.removeRange(Start, End);
349        VNI->hasPHIKill = true;
350        DOUT << " RESULT: "; interval.print(DOUT, tri_);
351
352        // Replace the interval with one of a NEW value number.  Note that this
353        // value number isn't actually defined by an instruction, weird huh? :)
354        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355        DOUT << " replace range with " << LR;
356        interval.addRange(LR);
357        interval.addKill(LR.valno, End);
358        DOUT << " RESULT: "; interval.print(DOUT, tri_);
359      }
360
361      // In the case of PHI elimination, each variable definition is only
362      // live until the end of the block.  We've already taken care of the
363      // rest of the live range.
364      unsigned defIndex = getDefIndex(MIIdx);
365
366      VNInfo *ValNo;
367      MachineInstr *CopyMI = NULL;
368      unsigned SrcReg, DstReg;
369      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370          tii_->isMoveInstr(*mi, SrcReg, DstReg))
371        CopyMI = mi;
372      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373
374      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375      LiveRange LR(defIndex, killIndex, ValNo);
376      interval.addRange(LR);
377      interval.addKill(ValNo, killIndex);
378      ValNo->hasPHIKill = true;
379      DOUT << " +" << LR;
380    }
381  }
382
383  DOUT << '\n';
384}
385
386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387                                              MachineBasicBlock::iterator mi,
388                                              unsigned MIIdx,
389                                              LiveInterval &interval,
390                                              MachineInstr *CopyMI) {
391  // A physical register cannot be live across basic block, so its
392  // lifetime must end somewhere in its defining basic block.
393  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394
395  unsigned baseIndex = MIIdx;
396  unsigned start = getDefIndex(baseIndex);
397  unsigned end = start;
398
399  // If it is not used after definition, it is considered dead at
400  // the instruction defining it. Hence its interval is:
401  // [defSlot(def), defSlot(def)+1)
402  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
403    DOUT << " dead";
404    end = getDefIndex(start) + 1;
405    goto exit;
406  }
407
408  // If it is not dead on definition, it must be killed by a
409  // subsequent instruction. Hence its interval is:
410  // [defSlot(def), useSlot(kill)+1)
411  while (++mi != MBB->end()) {
412    baseIndex += InstrSlots::NUM;
413    if (lv_->KillsRegister(mi, interval.reg)) {
414      DOUT << " killed";
415      end = getUseIndex(baseIndex) + 1;
416      goto exit;
417    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
418      // Another instruction redefines the register before it is ever read.
419      // Then the register is essentially dead at the instruction that defines
420      // it. Hence its interval is:
421      // [defSlot(def), defSlot(def)+1)
422      DOUT << " dead";
423      end = getDefIndex(start) + 1;
424      goto exit;
425    }
426  }
427
428  // The only case we should have a dead physreg here without a killing or
429  // instruction where we know it's dead is if it is live-in to the function
430  // and never used.
431  assert(!CopyMI && "physreg was not killed in defining block!");
432  end = getDefIndex(start) + 1;  // It's dead.
433
434exit:
435  assert(start < end && "did not find end of interval?");
436
437  // Already exists? Extend old live interval.
438  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439  VNInfo *ValNo = (OldLR != interval.end())
440    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441  LiveRange LR(start, end, ValNo);
442  interval.addRange(LR);
443  interval.addKill(LR.valno, end);
444  DOUT << " +" << LR << '\n';
445}
446
447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448                                      MachineBasicBlock::iterator MI,
449                                      unsigned MIIdx,
450                                      unsigned reg) {
451  if (TargetRegisterInfo::isVirtualRegister(reg))
452    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453  else if (allocatableRegs_[reg]) {
454    MachineInstr *CopyMI = NULL;
455    unsigned SrcReg, DstReg;
456    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457        tii_->isMoveInstr(*MI, SrcReg, DstReg))
458      CopyMI = MI;
459    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460    // Def of a register also defines its sub-registers.
461    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462      // Avoid processing some defs more than once.
463      if (!MI->findRegisterDefOperand(*AS))
464        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
465  }
466}
467
468void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
469                                         unsigned MIIdx,
470                                         LiveInterval &interval, bool isAlias) {
471  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
472
473  // Look for kills, if it reaches a def before it's killed, then it shouldn't
474  // be considered a livein.
475  MachineBasicBlock::iterator mi = MBB->begin();
476  unsigned baseIndex = MIIdx;
477  unsigned start = baseIndex;
478  unsigned end = start;
479  while (mi != MBB->end()) {
480    if (lv_->KillsRegister(mi, interval.reg)) {
481      DOUT << " killed";
482      end = getUseIndex(baseIndex) + 1;
483      goto exit;
484    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
485      // Another instruction redefines the register before it is ever read.
486      // Then the register is essentially dead at the instruction that defines
487      // it. Hence its interval is:
488      // [defSlot(def), defSlot(def)+1)
489      DOUT << " dead";
490      end = getDefIndex(start) + 1;
491      goto exit;
492    }
493
494    baseIndex += InstrSlots::NUM;
495    ++mi;
496  }
497
498exit:
499  // Live-in register might not be used at all.
500  if (end == MIIdx) {
501    if (isAlias) {
502      DOUT << " dead";
503      end = getDefIndex(MIIdx) + 1;
504    } else {
505      DOUT << " live through";
506      end = baseIndex;
507    }
508  }
509
510  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
511  interval.addRange(LR);
512  interval.addKill(LR.valno, end);
513  DOUT << " +" << LR << '\n';
514}
515
516/// computeIntervals - computes the live intervals for virtual
517/// registers. for some ordering of the machine instructions [1,N] a
518/// live interval is an interval [i, j) where 1 <= i <= j < N for
519/// which a variable is live
520void LiveIntervals::computeIntervals() {
521  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
522       << "********** Function: "
523       << ((Value*)mf_->getFunction())->getName() << '\n';
524  // Track the index of the current machine instr.
525  unsigned MIIndex = 0;
526  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
527       MBBI != E; ++MBBI) {
528    MachineBasicBlock *MBB = MBBI;
529    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
530
531    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
532
533    // Create intervals for live-ins to this BB first.
534    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
535           LE = MBB->livein_end(); LI != LE; ++LI) {
536      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
537      // Multiple live-ins can alias the same register.
538      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
539        if (!hasInterval(*AS))
540          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
541                               true);
542    }
543
544    for (; MI != miEnd; ++MI) {
545      DOUT << MIIndex << "\t" << *MI;
546
547      // Handle defs.
548      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
549        MachineOperand &MO = MI->getOperand(i);
550        // handle register defs - build intervals
551        if (MO.isRegister() && MO.getReg() && MO.isDef())
552          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
553      }
554
555      MIIndex += InstrSlots::NUM;
556    }
557  }
558}
559
560bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
561                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
562  std::vector<IdxMBBPair>::const_iterator I =
563    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
564
565  bool ResVal = false;
566  while (I != Idx2MBBMap.end()) {
567    if (LR.end <= I->first)
568      break;
569    MBBs.push_back(I->second);
570    ResVal = true;
571    ++I;
572  }
573  return ResVal;
574}
575
576
577LiveInterval LiveIntervals::createInterval(unsigned reg) {
578  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
579                       HUGE_VALF : 0.0F;
580  return LiveInterval(reg, Weight);
581}
582
583/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
584/// copy field and returns the source register that defines it.
585unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
586  if (!VNI->copy)
587    return 0;
588
589  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
590    return VNI->copy->getOperand(1).getReg();
591  unsigned SrcReg, DstReg;
592  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
593    return SrcReg;
594  assert(0 && "Unrecognized copy instruction!");
595  return 0;
596}
597
598//===----------------------------------------------------------------------===//
599// Register allocator hooks.
600//
601
602/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
603/// allow one) virtual register operand, then its uses are implicitly using
604/// the register. Returns the virtual register.
605unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
606                                            MachineInstr *MI) const {
607  unsigned RegOp = 0;
608  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
609    MachineOperand &MO = MI->getOperand(i);
610    if (!MO.isRegister() || !MO.isUse())
611      continue;
612    unsigned Reg = MO.getReg();
613    if (Reg == 0 || Reg == li.reg)
614      continue;
615    // FIXME: For now, only remat MI with at most one register operand.
616    assert(!RegOp &&
617           "Can't rematerialize instruction with multiple register operand!");
618    RegOp = MO.getReg();
619    break;
620  }
621  return RegOp;
622}
623
624/// isValNoAvailableAt - Return true if the val# of the specified interval
625/// which reaches the given instruction also reaches the specified use index.
626bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
627                                       unsigned UseIdx) const {
628  unsigned Index = getInstructionIndex(MI);
629  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
630  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
631  return UI != li.end() && UI->valno == ValNo;
632}
633
634/// isReMaterializable - Returns true if the definition MI of the specified
635/// val# of the specified interval is re-materializable.
636bool LiveIntervals::isReMaterializable(const LiveInterval &li,
637                                       const VNInfo *ValNo, MachineInstr *MI,
638                                       bool &isLoad) {
639  if (DisableReMat)
640    return false;
641
642  isLoad = false;
643  const TargetInstrDesc &TID = MI->getDesc();
644  if (TID.isImplicitDef())
645    return true;
646
647  int FrameIdx = 0;
648  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
649      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx)) {
650    // This is a load from fixed stack slot. It can be rematerialized unless
651    // it's re-defined by a two-address instruction.
652    isLoad = true;
653    for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
654         i != e; ++i) {
655      const VNInfo *VNI = *i;
656      if (VNI == ValNo)
657        continue;
658      unsigned DefIdx = VNI->def;
659      if (DefIdx == ~1U)
660        continue; // Dead val#.
661      MachineInstr *DefMI = (DefIdx == ~0u)
662        ? NULL : getInstructionFromIndex(DefIdx);
663      if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
664        isLoad = false;
665        return false;
666      }
667    }
668    return true;
669  }
670
671  if (tii_->isTriviallyReMaterializable(MI)) {
672    isLoad = TID.isSimpleLoad();
673
674    unsigned ImpUse = getReMatImplicitUse(li, MI);
675    if (ImpUse) {
676      const LiveInterval &ImpLi = getInterval(ImpUse);
677      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
678             re = mri_->use_end(); ri != re; ++ri) {
679        MachineInstr *UseMI = &*ri;
680        unsigned UseIdx = getInstructionIndex(UseMI);
681        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
682          continue;
683        if (!canFoldMemoryOperand(UseMI, li.reg) &&
684            !isValNoAvailableAt(ImpLi, MI, UseIdx))
685          return false;
686      }
687    }
688    return true;
689  }
690
691  return false;
692}
693
694/// isReMaterializable - Returns true if every definition of MI of every
695/// val# of the specified interval is re-materializable.
696bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
697  isLoad = false;
698  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
699       i != e; ++i) {
700    const VNInfo *VNI = *i;
701    unsigned DefIdx = VNI->def;
702    if (DefIdx == ~1U)
703      continue; // Dead val#.
704    // Is the def for the val# rematerializable?
705    if (DefIdx == ~0u)
706      return false;
707    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
708    bool DefIsLoad = false;
709    if (!ReMatDefMI ||
710        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
711      return false;
712    isLoad |= DefIsLoad;
713  }
714  return true;
715}
716
717/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
718/// slot / to reg or any rematerialized load into ith operand of specified
719/// MI. If it is successul, MI is updated with the newly created MI and
720/// returns true.
721bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
722                                         VirtRegMap &vrm, MachineInstr *DefMI,
723                                         unsigned InstrIdx,
724                                         SmallVector<unsigned, 2> &Ops,
725                                         bool isSS, int Slot, unsigned Reg) {
726  unsigned MRInfo = 0;
727  const TargetInstrDesc &TID = MI->getDesc();
728  // If it is an implicit def instruction, just delete it.
729  if (TID.isImplicitDef()) {
730    RemoveMachineInstrFromMaps(MI);
731    vrm.RemoveMachineInstrFromMaps(MI);
732    MI->eraseFromParent();
733    ++numFolds;
734    return true;
735  }
736
737  SmallVector<unsigned, 2> FoldOps;
738  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
739    unsigned OpIdx = Ops[i];
740    MachineOperand &MO = MI->getOperand(OpIdx);
741    // FIXME: fold subreg use.
742    if (MO.getSubReg())
743      return false;
744    if (MO.isDef())
745      MRInfo |= (unsigned)VirtRegMap::isMod;
746    else {
747      // Filter out two-address use operand(s).
748      if (!MO.isImplicit() &&
749          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
750        MRInfo = VirtRegMap::isModRef;
751        continue;
752      }
753      MRInfo |= (unsigned)VirtRegMap::isRef;
754    }
755    FoldOps.push_back(OpIdx);
756  }
757
758  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
759                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
760  if (fmi) {
761    // Attempt to fold the memory reference into the instruction. If
762    // we can do this, we don't need to insert spill code.
763    if (lv_)
764      lv_->instructionChanged(MI, fmi);
765    else
766      fmi->copyKillDeadInfo(MI, tri_);
767    MachineBasicBlock &MBB = *MI->getParent();
768    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
769      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
770    vrm.transferSpillPts(MI, fmi);
771    vrm.transferRestorePts(MI, fmi);
772    mi2iMap_.erase(MI);
773    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
774    mi2iMap_[fmi] = InstrIdx;
775    MI = MBB.insert(MBB.erase(MI), fmi);
776    ++numFolds;
777    return true;
778  }
779  return false;
780}
781
782/// canFoldMemoryOperand - Returns true if the specified load / store
783/// folding is possible.
784bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
785                                         SmallVector<unsigned, 2> &Ops) const {
786  SmallVector<unsigned, 2> FoldOps;
787  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
788    unsigned OpIdx = Ops[i];
789    // FIXME: fold subreg use.
790    if (MI->getOperand(OpIdx).getSubReg())
791      return false;
792    FoldOps.push_back(OpIdx);
793  }
794
795  return tii_->canFoldMemoryOperand(MI, FoldOps);
796}
797
798bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, unsigned Reg) const {
799  SmallVector<unsigned, 2> FoldOps;
800  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
801    MachineOperand& mop = MI->getOperand(i);
802    if (!mop.isRegister())
803      continue;
804    unsigned UseReg = mop.getReg();
805    if (UseReg != Reg)
806      continue;
807    // FIXME: fold subreg use.
808    if (mop.getSubReg())
809      return false;
810    FoldOps.push_back(i);
811  }
812  return tii_->canFoldMemoryOperand(MI, FoldOps);
813}
814
815bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
816  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
817  for (LiveInterval::Ranges::const_iterator
818         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
819    std::vector<IdxMBBPair>::const_iterator II =
820      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
821    if (II == Idx2MBBMap.end())
822      continue;
823    if (I->end > II->first)  // crossing a MBB.
824      return false;
825    MBBs.insert(II->second);
826    if (MBBs.size() > 1)
827      return false;
828  }
829  return true;
830}
831
832/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
833/// interval on to-be re-materialized operands of MI) with new register.
834void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
835                                       MachineInstr *MI, unsigned NewVReg,
836                                       VirtRegMap &vrm) {
837  // There is an implicit use. That means one of the other operand is
838  // being remat'ed and the remat'ed instruction has li.reg as an
839  // use operand. Make sure we rewrite that as well.
840  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
841    MachineOperand &MO = MI->getOperand(i);
842    if (!MO.isRegister())
843      continue;
844    unsigned Reg = MO.getReg();
845    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
846      continue;
847    if (!vrm.isReMaterialized(Reg))
848      continue;
849    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
850    int OpIdx = ReMatMI->findRegisterUseOperandIdx(li.reg);
851    if (OpIdx != -1)
852      ReMatMI->getOperand(OpIdx).setReg(NewVReg);
853  }
854}
855
856/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
857/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
858bool LiveIntervals::
859rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
860                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
861                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
862                 unsigned Slot, int LdSlot,
863                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
864                 VirtRegMap &vrm,
865                 const TargetRegisterClass* rc,
866                 SmallVector<int, 4> &ReMatIds,
867                 const MachineLoopInfo *loopInfo,
868                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
869                 std::map<unsigned,unsigned> &MBBVRegsMap,
870                 std::vector<LiveInterval*> &NewLIs) {
871  bool CanFold = false;
872 RestartInstruction:
873  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
874    MachineOperand& mop = MI->getOperand(i);
875    if (!mop.isRegister())
876      continue;
877    unsigned Reg = mop.getReg();
878    unsigned RegI = Reg;
879    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
880      continue;
881    if (Reg != li.reg)
882      continue;
883
884    bool TryFold = !DefIsReMat;
885    bool FoldSS = true; // Default behavior unless it's a remat.
886    int FoldSlot = Slot;
887    if (DefIsReMat) {
888      // If this is the rematerializable definition MI itself and
889      // all of its uses are rematerialized, simply delete it.
890      if (MI == ReMatOrigDefMI && CanDelete) {
891        DOUT << "\t\t\t\tErasing re-materlizable def: ";
892        DOUT << MI << '\n';
893        unsigned ImpUse = getReMatImplicitUse(li, MI);
894        if (ImpUse) {
895          // To be deleted MI has a virtual register operand, update the
896          // spill weight of the register interval.
897          unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
898          LiveInterval &ImpLi = getInterval(ImpUse);
899          ImpLi.weight -=
900            getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
901        }
902        RemoveMachineInstrFromMaps(MI);
903        vrm.RemoveMachineInstrFromMaps(MI);
904        MI->eraseFromParent();
905        break;
906      }
907
908      // If def for this use can't be rematerialized, then try folding.
909      // If def is rematerializable and it's a load, also try folding.
910      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
911      if (isLoad) {
912        // Try fold loads (from stack slot, constant pool, etc.) into uses.
913        FoldSS = isLoadSS;
914        FoldSlot = LdSlot;
915      }
916    }
917
918    // Scan all of the operands of this instruction rewriting operands
919    // to use NewVReg instead of li.reg as appropriate.  We do this for
920    // two reasons:
921    //
922    //   1. If the instr reads the same spilled vreg multiple times, we
923    //      want to reuse the NewVReg.
924    //   2. If the instr is a two-addr instruction, we are required to
925    //      keep the src/dst regs pinned.
926    //
927    // Keep track of whether we replace a use and/or def so that we can
928    // create the spill interval with the appropriate range.
929
930    HasUse = mop.isUse();
931    HasDef = mop.isDef();
932    SmallVector<unsigned, 2> Ops;
933    Ops.push_back(i);
934    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
935      const MachineOperand &MOj = MI->getOperand(j);
936      if (!MOj.isRegister())
937        continue;
938      unsigned RegJ = MOj.getReg();
939      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
940        continue;
941      if (RegJ == RegI) {
942        Ops.push_back(j);
943        HasUse |= MOj.isUse();
944        HasDef |= MOj.isDef();
945      }
946    }
947
948    if (TryFold) {
949      // Do not fold load / store here if we are splitting. We'll find an
950      // optimal point to insert a load / store later.
951      if (!TrySplit) {
952        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
953                                 Ops, FoldSS, FoldSlot, Reg)) {
954          // Folding the load/store can completely change the instruction in
955          // unpredictable ways, rescan it from the beginning.
956          HasUse = false;
957          HasDef = false;
958          CanFold = false;
959          goto RestartInstruction;
960        }
961      } else {
962        CanFold = canFoldMemoryOperand(MI, Ops);
963      }
964    } else
965      CanFold = false;
966
967    // Create a new virtual register for the spill interval.
968    bool CreatedNewVReg = false;
969    if (NewVReg == 0) {
970      NewVReg = mri_->createVirtualRegister(rc);
971      vrm.grow();
972      CreatedNewVReg = true;
973    }
974    mop.setReg(NewVReg);
975    if (mop.isImplicit())
976      rewriteImplicitOps(li, MI, NewVReg, vrm);
977
978    // Reuse NewVReg for other reads.
979    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
980      MachineOperand &mopj = MI->getOperand(Ops[j]);
981      mopj.setReg(NewVReg);
982      if (mopj.isImplicit())
983        rewriteImplicitOps(li, MI, NewVReg, vrm);
984    }
985
986    if (CreatedNewVReg) {
987      if (DefIsReMat) {
988        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
989        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
990          // Each valnum may have its own remat id.
991          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
992        } else {
993          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
994        }
995        if (!CanDelete || (HasUse && HasDef)) {
996          // If this is a two-addr instruction then its use operands are
997          // rematerializable but its def is not. It should be assigned a
998          // stack slot.
999          vrm.assignVirt2StackSlot(NewVReg, Slot);
1000        }
1001      } else {
1002        vrm.assignVirt2StackSlot(NewVReg, Slot);
1003      }
1004    } else if (HasUse && HasDef &&
1005               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1006      // If this interval hasn't been assigned a stack slot (because earlier
1007      // def is a deleted remat def), do it now.
1008      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1009      vrm.assignVirt2StackSlot(NewVReg, Slot);
1010    }
1011
1012    // Re-matting an instruction with virtual register use. Add the
1013    // register as an implicit use on the use MI.
1014    if (DefIsReMat && ImpUse)
1015      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1016
1017    // create a new register interval for this spill / remat.
1018    LiveInterval &nI = getOrCreateInterval(NewVReg);
1019    if (CreatedNewVReg) {
1020      NewLIs.push_back(&nI);
1021      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1022      if (TrySplit)
1023        vrm.setIsSplitFromReg(NewVReg, li.reg);
1024    }
1025
1026    if (HasUse) {
1027      if (CreatedNewVReg) {
1028        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1029                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1030        DOUT << " +" << LR;
1031        nI.addRange(LR);
1032      } else {
1033        // Extend the split live interval to this def / use.
1034        unsigned End = getUseIndex(index)+1;
1035        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1036                     nI.getValNumInfo(nI.getNumValNums()-1));
1037        DOUT << " +" << LR;
1038        nI.addRange(LR);
1039      }
1040    }
1041    if (HasDef) {
1042      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1043                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1044      DOUT << " +" << LR;
1045      nI.addRange(LR);
1046    }
1047
1048    DOUT << "\t\t\t\tAdded new interval: ";
1049    nI.print(DOUT, tri_);
1050    DOUT << '\n';
1051  }
1052  return CanFold;
1053}
1054bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1055                                   const VNInfo *VNI,
1056                                   MachineBasicBlock *MBB, unsigned Idx) const {
1057  unsigned End = getMBBEndIdx(MBB);
1058  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1059    unsigned KillIdx = VNI->kills[j];
1060    if (KillIdx > Idx && KillIdx < End)
1061      return true;
1062  }
1063  return false;
1064}
1065
1066static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1067  const VNInfo *VNI = NULL;
1068  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1069         e = li.vni_end(); i != e; ++i)
1070    if ((*i)->def == DefIdx) {
1071      VNI = *i;
1072      break;
1073    }
1074  return VNI;
1075}
1076
1077/// RewriteInfo - Keep track of machine instrs that will be rewritten
1078/// during spilling.
1079struct RewriteInfo {
1080  unsigned Index;
1081  MachineInstr *MI;
1082  bool HasUse;
1083  bool HasDef;
1084  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1085    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1086};
1087
1088struct RewriteInfoCompare {
1089  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1090    return LHS.Index < RHS.Index;
1091  }
1092};
1093
1094void LiveIntervals::
1095rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1096                    LiveInterval::Ranges::const_iterator &I,
1097                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1098                    unsigned Slot, int LdSlot,
1099                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1100                    VirtRegMap &vrm,
1101                    const TargetRegisterClass* rc,
1102                    SmallVector<int, 4> &ReMatIds,
1103                    const MachineLoopInfo *loopInfo,
1104                    BitVector &SpillMBBs,
1105                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1106                    BitVector &RestoreMBBs,
1107                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1108                    std::map<unsigned,unsigned> &MBBVRegsMap,
1109                    std::vector<LiveInterval*> &NewLIs) {
1110  bool AllCanFold = true;
1111  unsigned NewVReg = 0;
1112  unsigned start = getBaseIndex(I->start);
1113  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1114
1115  // First collect all the def / use in this live range that will be rewritten.
1116  // Make sure they are sorted according instruction index.
1117  std::vector<RewriteInfo> RewriteMIs;
1118  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1119         re = mri_->reg_end(); ri != re; ) {
1120    MachineInstr *MI = &(*ri);
1121    MachineOperand &O = ri.getOperand();
1122    ++ri;
1123    unsigned index = getInstructionIndex(MI);
1124    if (index < start || index >= end)
1125      continue;
1126    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1127  }
1128  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1129
1130  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1131  // Now rewrite the defs and uses.
1132  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1133    RewriteInfo &rwi = RewriteMIs[i];
1134    ++i;
1135    unsigned index = rwi.Index;
1136    bool MIHasUse = rwi.HasUse;
1137    bool MIHasDef = rwi.HasDef;
1138    MachineInstr *MI = rwi.MI;
1139    // If MI def and/or use the same register multiple times, then there
1140    // are multiple entries.
1141    unsigned NumUses = MIHasUse;
1142    while (i != e && RewriteMIs[i].MI == MI) {
1143      assert(RewriteMIs[i].Index == index);
1144      bool isUse = RewriteMIs[i].HasUse;
1145      if (isUse) ++NumUses;
1146      MIHasUse |= isUse;
1147      MIHasDef |= RewriteMIs[i].HasDef;
1148      ++i;
1149    }
1150    MachineBasicBlock *MBB = MI->getParent();
1151
1152    if (ImpUse && MI != ReMatDefMI) {
1153      // Re-matting an instruction with virtual register use. Update the
1154      // register interval's spill weight.
1155      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1156      LiveInterval &ImpLi = getInterval(ImpUse);
1157      ImpLi.weight +=
1158        getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize();
1159    }
1160
1161    unsigned MBBId = MBB->getNumber();
1162    unsigned ThisVReg = 0;
1163    if (TrySplit) {
1164      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1165      if (NVI != MBBVRegsMap.end()) {
1166        ThisVReg = NVI->second;
1167        // One common case:
1168        // x = use
1169        // ...
1170        // ...
1171        // def = ...
1172        //     = use
1173        // It's better to start a new interval to avoid artifically
1174        // extend the new interval.
1175        if (MIHasDef && !MIHasUse) {
1176          MBBVRegsMap.erase(MBB->getNumber());
1177          ThisVReg = 0;
1178        }
1179      }
1180    }
1181
1182    bool IsNew = ThisVReg == 0;
1183    if (IsNew) {
1184      // This ends the previous live interval. If all of its def / use
1185      // can be folded, give it a low spill weight.
1186      if (NewVReg && TrySplit && AllCanFold) {
1187        LiveInterval &nI = getOrCreateInterval(NewVReg);
1188        nI.weight /= 10.0F;
1189      }
1190      AllCanFold = true;
1191    }
1192    NewVReg = ThisVReg;
1193
1194    bool HasDef = false;
1195    bool HasUse = false;
1196    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1197                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1198                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1199                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1200                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1201    if (!HasDef && !HasUse)
1202      continue;
1203
1204    AllCanFold &= CanFold;
1205
1206    // Update weight of spill interval.
1207    LiveInterval &nI = getOrCreateInterval(NewVReg);
1208    if (!TrySplit) {
1209      // The spill weight is now infinity as it cannot be spilled again.
1210      nI.weight = HUGE_VALF;
1211      continue;
1212    }
1213
1214    // Keep track of the last def and first use in each MBB.
1215    if (HasDef) {
1216      if (MI != ReMatOrigDefMI || !CanDelete) {
1217        bool HasKill = false;
1218        if (!HasUse)
1219          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1220        else {
1221          // If this is a two-address code, then this index starts a new VNInfo.
1222          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1223          if (VNI)
1224            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1225        }
1226        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1227          SpillIdxes.find(MBBId);
1228        if (!HasKill) {
1229          if (SII == SpillIdxes.end()) {
1230            std::vector<SRInfo> S;
1231            S.push_back(SRInfo(index, NewVReg, true));
1232            SpillIdxes.insert(std::make_pair(MBBId, S));
1233          } else if (SII->second.back().vreg != NewVReg) {
1234            SII->second.push_back(SRInfo(index, NewVReg, true));
1235          } else if ((int)index > SII->second.back().index) {
1236            // If there is an earlier def and this is a two-address
1237            // instruction, then it's not possible to fold the store (which
1238            // would also fold the load).
1239            SRInfo &Info = SII->second.back();
1240            Info.index = index;
1241            Info.canFold = !HasUse;
1242          }
1243          SpillMBBs.set(MBBId);
1244        } else if (SII != SpillIdxes.end() &&
1245                   SII->second.back().vreg == NewVReg &&
1246                   (int)index > SII->second.back().index) {
1247          // There is an earlier def that's not killed (must be two-address).
1248          // The spill is no longer needed.
1249          SII->second.pop_back();
1250          if (SII->second.empty()) {
1251            SpillIdxes.erase(MBBId);
1252            SpillMBBs.reset(MBBId);
1253          }
1254        }
1255      }
1256    }
1257
1258    if (HasUse) {
1259      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1260        SpillIdxes.find(MBBId);
1261      if (SII != SpillIdxes.end() &&
1262          SII->second.back().vreg == NewVReg &&
1263          (int)index > SII->second.back().index)
1264        // Use(s) following the last def, it's not safe to fold the spill.
1265        SII->second.back().canFold = false;
1266      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1267        RestoreIdxes.find(MBBId);
1268      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1269        // If we are splitting live intervals, only fold if it's the first
1270        // use and there isn't another use later in the MBB.
1271        RII->second.back().canFold = false;
1272      else if (IsNew) {
1273        // Only need a reload if there isn't an earlier def / use.
1274        if (RII == RestoreIdxes.end()) {
1275          std::vector<SRInfo> Infos;
1276          Infos.push_back(SRInfo(index, NewVReg, true));
1277          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1278        } else {
1279          RII->second.push_back(SRInfo(index, NewVReg, true));
1280        }
1281        RestoreMBBs.set(MBBId);
1282      }
1283    }
1284
1285    // Update spill weight.
1286    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1287    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1288  }
1289
1290  if (NewVReg && TrySplit && AllCanFold) {
1291    // If all of its def / use can be folded, give it a low spill weight.
1292    LiveInterval &nI = getOrCreateInterval(NewVReg);
1293    nI.weight /= 10.0F;
1294  }
1295}
1296
1297bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1298                        BitVector &RestoreMBBs,
1299                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1300  if (!RestoreMBBs[Id])
1301    return false;
1302  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1303  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1304    if (Restores[i].index == index &&
1305        Restores[i].vreg == vr &&
1306        Restores[i].canFold)
1307      return true;
1308  return false;
1309}
1310
1311void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1312                        BitVector &RestoreMBBs,
1313                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1314  if (!RestoreMBBs[Id])
1315    return;
1316  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1317  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1318    if (Restores[i].index == index && Restores[i].vreg)
1319      Restores[i].index = -1;
1320}
1321
1322
1323std::vector<LiveInterval*> LiveIntervals::
1324addIntervalsForSpills(const LiveInterval &li,
1325                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1326  // Since this is called after the analysis is done we don't know if
1327  // LiveVariables is available
1328  lv_ = getAnalysisToUpdate<LiveVariables>();
1329
1330  assert(li.weight != HUGE_VALF &&
1331         "attempt to spill already spilled interval!");
1332
1333  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1334  li.print(DOUT, tri_);
1335  DOUT << '\n';
1336
1337  // Each bit specify whether it a spill is required in the MBB.
1338  BitVector SpillMBBs(mf_->getNumBlockIDs());
1339  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1340  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1341  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1342  std::map<unsigned,unsigned> MBBVRegsMap;
1343  std::vector<LiveInterval*> NewLIs;
1344  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1345
1346  unsigned NumValNums = li.getNumValNums();
1347  SmallVector<MachineInstr*, 4> ReMatDefs;
1348  ReMatDefs.resize(NumValNums, NULL);
1349  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1350  ReMatOrigDefs.resize(NumValNums, NULL);
1351  SmallVector<int, 4> ReMatIds;
1352  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1353  BitVector ReMatDelete(NumValNums);
1354  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1355
1356  // Spilling a split live interval. It cannot be split any further. Also,
1357  // it's also guaranteed to be a single val# / range interval.
1358  if (vrm.getPreSplitReg(li.reg)) {
1359    vrm.setIsSplitFromReg(li.reg, 0);
1360    // Unset the split kill marker on the last use.
1361    unsigned KillIdx = vrm.getKillPoint(li.reg);
1362    if (KillIdx) {
1363      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1364      assert(KillMI && "Last use disappeared?");
1365      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1366      assert(KillOp != -1 && "Last use disappeared?");
1367      KillMI->getOperand(KillOp).setIsKill(false);
1368    }
1369    vrm.removeKillPoint(li.reg);
1370    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1371    Slot = vrm.getStackSlot(li.reg);
1372    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1373    MachineInstr *ReMatDefMI = DefIsReMat ?
1374      vrm.getReMaterializedMI(li.reg) : NULL;
1375    int LdSlot = 0;
1376    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1377    bool isLoad = isLoadSS ||
1378      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1379    bool IsFirstRange = true;
1380    for (LiveInterval::Ranges::const_iterator
1381           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1382      // If this is a split live interval with multiple ranges, it means there
1383      // are two-address instructions that re-defined the value. Only the
1384      // first def can be rematerialized!
1385      if (IsFirstRange) {
1386        // Note ReMatOrigDefMI has already been deleted.
1387        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1388                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1389                             false, vrm, rc, ReMatIds, loopInfo,
1390                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1391                             MBBVRegsMap, NewLIs);
1392      } else {
1393        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1394                             Slot, 0, false, false, false,
1395                             false, vrm, rc, ReMatIds, loopInfo,
1396                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1397                             MBBVRegsMap, NewLIs);
1398      }
1399      IsFirstRange = false;
1400    }
1401    return NewLIs;
1402  }
1403
1404  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1405  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1406    TrySplit = false;
1407  if (TrySplit)
1408    ++numSplits;
1409  bool NeedStackSlot = false;
1410  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1411       i != e; ++i) {
1412    const VNInfo *VNI = *i;
1413    unsigned VN = VNI->id;
1414    unsigned DefIdx = VNI->def;
1415    if (DefIdx == ~1U)
1416      continue; // Dead val#.
1417    // Is the def for the val# rematerializable?
1418    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1419      ? 0 : getInstructionFromIndex(DefIdx);
1420    bool dummy;
1421    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1422      // Remember how to remat the def of this val#.
1423      ReMatOrigDefs[VN] = ReMatDefMI;
1424      // Original def may be modified so we have to make a copy here. vrm must
1425      // delete these!
1426      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1427
1428      bool CanDelete = true;
1429      if (VNI->hasPHIKill) {
1430        // A kill is a phi node, not all of its uses can be rematerialized.
1431        // It must not be deleted.
1432        CanDelete = false;
1433        // Need a stack slot if there is any live range where uses cannot be
1434        // rematerialized.
1435        NeedStackSlot = true;
1436      }
1437      if (CanDelete)
1438        ReMatDelete.set(VN);
1439    } else {
1440      // Need a stack slot if there is any live range where uses cannot be
1441      // rematerialized.
1442      NeedStackSlot = true;
1443    }
1444  }
1445
1446  // One stack slot per live interval.
1447  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1448    Slot = vrm.assignVirt2StackSlot(li.reg);
1449
1450  // Create new intervals and rewrite defs and uses.
1451  for (LiveInterval::Ranges::const_iterator
1452         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1453    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1454    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1455    bool DefIsReMat = ReMatDefMI != NULL;
1456    bool CanDelete = ReMatDelete[I->valno->id];
1457    int LdSlot = 0;
1458    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1459    bool isLoad = isLoadSS ||
1460      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1461    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1462                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1463                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1464                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1465                               MBBVRegsMap, NewLIs);
1466  }
1467
1468  // Insert spills / restores if we are splitting.
1469  if (!TrySplit)
1470    return NewLIs;
1471
1472  SmallPtrSet<LiveInterval*, 4> AddedKill;
1473  SmallVector<unsigned, 2> Ops;
1474  if (NeedStackSlot) {
1475    int Id = SpillMBBs.find_first();
1476    while (Id != -1) {
1477      std::vector<SRInfo> &spills = SpillIdxes[Id];
1478      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1479        int index = spills[i].index;
1480        unsigned VReg = spills[i].vreg;
1481        LiveInterval &nI = getOrCreateInterval(VReg);
1482        bool isReMat = vrm.isReMaterialized(VReg);
1483        MachineInstr *MI = getInstructionFromIndex(index);
1484        bool CanFold = false;
1485        bool FoundUse = false;
1486        Ops.clear();
1487        if (spills[i].canFold) {
1488          CanFold = true;
1489          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1490            MachineOperand &MO = MI->getOperand(j);
1491            if (!MO.isRegister() || MO.getReg() != VReg)
1492              continue;
1493
1494            Ops.push_back(j);
1495            if (MO.isDef())
1496              continue;
1497            if (isReMat ||
1498                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1499                                                RestoreMBBs, RestoreIdxes))) {
1500              // MI has two-address uses of the same register. If the use
1501              // isn't the first and only use in the BB, then we can't fold
1502              // it. FIXME: Move this to rewriteInstructionsForSpills.
1503              CanFold = false;
1504              break;
1505            }
1506            FoundUse = true;
1507          }
1508        }
1509        // Fold the store into the def if possible.
1510        bool Folded = false;
1511        if (CanFold && !Ops.empty()) {
1512          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1513            Folded = true;
1514            if (FoundUse > 0) {
1515              // Also folded uses, do not issue a load.
1516              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1517              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1518            }
1519            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1520          }
1521        }
1522
1523        // Else tell the spiller to issue a spill.
1524        if (!Folded) {
1525          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1526          bool isKill = LR->end == getStoreIndex(index);
1527          vrm.addSpillPoint(VReg, isKill, MI);
1528          if (isKill)
1529            AddedKill.insert(&nI);
1530        }
1531      }
1532      Id = SpillMBBs.find_next(Id);
1533    }
1534  }
1535
1536  int Id = RestoreMBBs.find_first();
1537  while (Id != -1) {
1538    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1539    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1540      int index = restores[i].index;
1541      if (index == -1)
1542        continue;
1543      unsigned VReg = restores[i].vreg;
1544      LiveInterval &nI = getOrCreateInterval(VReg);
1545      MachineInstr *MI = getInstructionFromIndex(index);
1546      bool CanFold = false;
1547      Ops.clear();
1548      if (restores[i].canFold) {
1549        CanFold = true;
1550        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1551          MachineOperand &MO = MI->getOperand(j);
1552          if (!MO.isRegister() || MO.getReg() != VReg)
1553            continue;
1554
1555          if (MO.isDef()) {
1556            // If this restore were to be folded, it would have been folded
1557            // already.
1558            CanFold = false;
1559            break;
1560          }
1561          Ops.push_back(j);
1562        }
1563      }
1564
1565      // Fold the load into the use if possible.
1566      bool Folded = false;
1567      if (CanFold && !Ops.empty()) {
1568        if (!vrm.isReMaterialized(VReg))
1569          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1570        else {
1571          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1572          int LdSlot = 0;
1573          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1574          // If the rematerializable def is a load, also try to fold it.
1575          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1576            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1577                                          Ops, isLoadSS, LdSlot, VReg);
1578          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1579          if (ImpUse) {
1580            // Re-matting an instruction with virtual register use. Add the
1581            // register as an implicit use on the use MI and update the register
1582            // interval's spill weight.
1583            unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1584            LiveInterval &ImpLi = getInterval(ImpUse);
1585            ImpLi.weight +=
1586              getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
1587
1588            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1589          }
1590        }
1591      }
1592      // If folding is not possible / failed, then tell the spiller to issue a
1593      // load / rematerialization for us.
1594      if (Folded)
1595        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1596      else
1597        vrm.addRestorePoint(VReg, MI);
1598    }
1599    Id = RestoreMBBs.find_next(Id);
1600  }
1601
1602  // Finalize intervals: add kills, finalize spill weights, and filter out
1603  // dead intervals.
1604  std::vector<LiveInterval*> RetNewLIs;
1605  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1606    LiveInterval *LI = NewLIs[i];
1607    if (!LI->empty()) {
1608      LI->weight /= LI->getSize();
1609      if (!AddedKill.count(LI)) {
1610        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1611        unsigned LastUseIdx = getBaseIndex(LR->end);
1612        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1613        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1614        assert(UseIdx != -1);
1615        if (LastUse->getOperand(UseIdx).isImplicit() ||
1616            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1617          LastUse->getOperand(UseIdx).setIsKill();
1618          vrm.addKillPoint(LI->reg, LastUseIdx);
1619        }
1620      }
1621      RetNewLIs.push_back(LI);
1622    }
1623  }
1624
1625  return RetNewLIs;
1626}
1627