LiveIntervalAnalysis.cpp revision 8892b6f30792bbc56f40d01a4ae9834a55cf8b1a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39// Hidden options for help debugging.
40static cl::opt<bool> DisableReMat("disable-rematerialization",
41                                  cl::init(false), cl::Hidden);
42
43static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44                               cl::init(true), cl::Hidden);
45static cl::opt<int> SplitLimit("split-limit",
46                               cl::init(-1), cl::Hidden);
47
48STATISTIC(numIntervals, "Number of original intervals");
49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
51STATISTIC(numSplits   , "Number of intervals split");
52
53char LiveIntervals::ID = 0;
54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
55
56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57  AU.addPreserved<LiveVariables>();
58  AU.addRequired<LiveVariables>();
59  AU.addPreservedID(MachineLoopInfoID);
60  AU.addPreservedID(MachineDominatorsID);
61  AU.addPreservedID(PHIEliminationID);
62  AU.addRequiredID(PHIEliminationID);
63  AU.addRequiredID(TwoAddressInstructionPassID);
64  MachineFunctionPass::getAnalysisUsage(AU);
65}
66
67void LiveIntervals::releaseMemory() {
68  MBB2IdxMap.clear();
69  Idx2MBBMap.clear();
70  mi2iMap_.clear();
71  i2miMap_.clear();
72  r2iMap_.clear();
73  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74  VNInfoAllocator.Reset();
75  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
76    delete ClonedMIs[i];
77}
78
79void LiveIntervals::computeNumbering() {
80  Index2MiMap OldI2MI = i2miMap_;
81
82  Idx2MBBMap.clear();
83  MBB2IdxMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86
87  // Number MachineInstrs and MachineBasicBlocks.
88  // Initialize MBB indexes to a sentinal.
89  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
90
91  unsigned MIIndex = 0;
92  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
93       MBB != E; ++MBB) {
94    unsigned StartIdx = MIIndex;
95
96    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
97         I != E; ++I) {
98      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
99      assert(inserted && "multiple MachineInstr -> index mappings");
100      i2miMap_.push_back(I);
101      MIIndex += InstrSlots::NUM;
102    }
103
104    if (StartIdx == MIIndex) {
105      // Empty MBB
106      MIIndex += InstrSlots::NUM;
107      i2miMap_.push_back(0);
108    }
109    // Set the MBB2IdxMap entry for this MBB.
110    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
111    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
112  }
113  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
114
115  if (!OldI2MI.empty())
116    for (iterator I = begin(), E = end(); I != E; ++I)
117      for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
118           LI != LE; ++LI) {
119
120        // Remap the start index of the live range to the corresponding new
121        // number, or our best guess at what it _should_ correspond to if the
122        // original instruction has been erased.  This is either the following
123        // instruction or its predecessor.
124        unsigned offset = LI->start % InstrSlots::NUM;
125        if (OldI2MI[LI->start / InstrSlots::NUM])
126          LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
127        else {
128          unsigned i = 0;
129          MachineInstr* newInstr = 0;
130          do {
131            newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
132            i++;
133          } while (!newInstr);
134
135          if (mi2iMap_[newInstr] ==
136              MBB2IdxMap[newInstr->getParent()->getNumber()].first)
137            LI->start = mi2iMap_[newInstr];
138          else
139            LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
140        }
141
142        // Remap the ending index in the same way that we remapped the start,
143        // except for the final step where we always map to the immediately
144        // following instruction.
145        if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
146          offset = LI->end % InstrSlots::NUM;
147          if (OldI2MI[LI->end / InstrSlots::NUM])
148            LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
149          else {
150            unsigned i = 0;
151            MachineInstr* newInstr = 0;
152            do {
153              newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
154              i++;
155            } while (!newInstr);
156
157            LI->end = mi2iMap_[newInstr];
158          }
159        } else {
160          LI->end = i2miMap_.size() * InstrSlots::NUM;
161        }
162
163        // Remap the VNInfo def index, which works the same as the
164        // start indices above.
165        VNInfo* vni = LI->valno;
166        offset = vni->def % InstrSlots::NUM;
167        if (OldI2MI[vni->def / InstrSlots::NUM])
168          vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
169        else {
170          unsigned i = 0;
171          MachineInstr* newInstr = 0;
172          do {
173            newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
174            i++;
175          } while (!newInstr);
176
177          if (mi2iMap_[newInstr] ==
178              MBB2IdxMap[newInstr->getParent()->getNumber()].first)
179            vni->def = mi2iMap_[newInstr];
180          else
181            vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
182        }
183
184        // Remap the VNInfo kill indices, which works the same as
185        // the end indices above.
186        for (size_t i = 0; i < vni->kills.size(); ++i) {
187          offset = vni->kills[i] % InstrSlots::NUM;
188          if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
189            vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
190                            offset;
191          else {
192            unsigned e = 0;
193            MachineInstr* newInstr = 0;
194            do {
195              newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
196              e++;
197            } while (!newInstr);
198
199            vni->kills[i] = mi2iMap_[newInstr];
200          }
201        }
202      }
203}
204
205/// runOnMachineFunction - Register allocate the whole function
206///
207bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
208  mf_ = &fn;
209  mri_ = &mf_->getRegInfo();
210  tm_ = &fn.getTarget();
211  tri_ = tm_->getRegisterInfo();
212  tii_ = tm_->getInstrInfo();
213  lv_ = &getAnalysis<LiveVariables>();
214  allocatableRegs_ = tri_->getAllocatableSet(fn);
215
216  computeNumbering();
217  computeIntervals();
218
219  numIntervals += getNumIntervals();
220
221  DOUT << "********** INTERVALS **********\n";
222  for (iterator I = begin(), E = end(); I != E; ++I) {
223    I->second.print(DOUT, tri_);
224    DOUT << "\n";
225  }
226
227  numIntervalsAfter += getNumIntervals();
228  DEBUG(dump());
229  return true;
230}
231
232/// print - Implement the dump method.
233void LiveIntervals::print(std::ostream &O, const Module* ) const {
234  O << "********** INTERVALS **********\n";
235  for (const_iterator I = begin(), E = end(); I != E; ++I) {
236    I->second.print(O, tri_);
237    O << "\n";
238  }
239
240  O << "********** MACHINEINSTRS **********\n";
241  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
242       mbbi != mbbe; ++mbbi) {
243    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
244    for (MachineBasicBlock::iterator mii = mbbi->begin(),
245           mie = mbbi->end(); mii != mie; ++mii) {
246      O << getInstructionIndex(mii) << '\t' << *mii;
247    }
248  }
249}
250
251/// conflictsWithPhysRegDef - Returns true if the specified register
252/// is defined during the duration of the specified interval.
253bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
254                                            VirtRegMap &vrm, unsigned reg) {
255  for (LiveInterval::Ranges::const_iterator
256         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
257    for (unsigned index = getBaseIndex(I->start),
258           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
259         index += InstrSlots::NUM) {
260      // skip deleted instructions
261      while (index != end && !getInstructionFromIndex(index))
262        index += InstrSlots::NUM;
263      if (index == end) break;
264
265      MachineInstr *MI = getInstructionFromIndex(index);
266      unsigned SrcReg, DstReg;
267      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
268        if (SrcReg == li.reg || DstReg == li.reg)
269          continue;
270      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
271        MachineOperand& mop = MI->getOperand(i);
272        if (!mop.isRegister())
273          continue;
274        unsigned PhysReg = mop.getReg();
275        if (PhysReg == 0 || PhysReg == li.reg)
276          continue;
277        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
278          if (!vrm.hasPhys(PhysReg))
279            continue;
280          PhysReg = vrm.getPhys(PhysReg);
281        }
282        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
283          return true;
284      }
285    }
286  }
287
288  return false;
289}
290
291void LiveIntervals::printRegName(unsigned reg) const {
292  if (TargetRegisterInfo::isPhysicalRegister(reg))
293    cerr << tri_->getName(reg);
294  else
295    cerr << "%reg" << reg;
296}
297
298void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
299                                             MachineBasicBlock::iterator mi,
300                                             unsigned MIIdx,
301                                             LiveInterval &interval) {
302  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
303  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
304
305  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
306    DOUT << "is a implicit_def\n";
307    return;
308  }
309
310  // Virtual registers may be defined multiple times (due to phi
311  // elimination and 2-addr elimination).  Much of what we do only has to be
312  // done once for the vreg.  We use an empty interval to detect the first
313  // time we see a vreg.
314  if (interval.empty()) {
315    // Get the Idx of the defining instructions.
316    unsigned defIndex = getDefIndex(MIIdx);
317    VNInfo *ValNo;
318    MachineInstr *CopyMI = NULL;
319    unsigned SrcReg, DstReg;
320    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
321        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
322        tii_->isMoveInstr(*mi, SrcReg, DstReg))
323      CopyMI = mi;
324    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
325
326    assert(ValNo->id == 0 && "First value in interval is not 0?");
327
328    // Loop over all of the blocks that the vreg is defined in.  There are
329    // two cases we have to handle here.  The most common case is a vreg
330    // whose lifetime is contained within a basic block.  In this case there
331    // will be a single kill, in MBB, which comes after the definition.
332    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
333      // FIXME: what about dead vars?
334      unsigned killIdx;
335      if (vi.Kills[0] != mi)
336        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
337      else
338        killIdx = defIndex+1;
339
340      // If the kill happens after the definition, we have an intra-block
341      // live range.
342      if (killIdx > defIndex) {
343        assert(vi.AliveBlocks.none() &&
344               "Shouldn't be alive across any blocks!");
345        LiveRange LR(defIndex, killIdx, ValNo);
346        interval.addRange(LR);
347        DOUT << " +" << LR << "\n";
348        interval.addKill(ValNo, killIdx);
349        return;
350      }
351    }
352
353    // The other case we handle is when a virtual register lives to the end
354    // of the defining block, potentially live across some blocks, then is
355    // live into some number of blocks, but gets killed.  Start by adding a
356    // range that goes from this definition to the end of the defining block.
357    LiveRange NewLR(defIndex,
358                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
359                    ValNo);
360    DOUT << " +" << NewLR;
361    interval.addRange(NewLR);
362
363    // Iterate over all of the blocks that the variable is completely
364    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
365    // live interval.
366    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
367      if (vi.AliveBlocks[i]) {
368        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
369        if (!MBB->empty()) {
370          LiveRange LR(getMBBStartIdx(i),
371                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
372                       ValNo);
373          interval.addRange(LR);
374          DOUT << " +" << LR;
375        }
376      }
377    }
378
379    // Finally, this virtual register is live from the start of any killing
380    // block to the 'use' slot of the killing instruction.
381    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
382      MachineInstr *Kill = vi.Kills[i];
383      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
384      LiveRange LR(getMBBStartIdx(Kill->getParent()),
385                   killIdx, ValNo);
386      interval.addRange(LR);
387      interval.addKill(ValNo, killIdx);
388      DOUT << " +" << LR;
389    }
390
391  } else {
392    // If this is the second time we see a virtual register definition, it
393    // must be due to phi elimination or two addr elimination.  If this is
394    // the result of two address elimination, then the vreg is one of the
395    // def-and-use register operand.
396    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
397      // If this is a two-address definition, then we have already processed
398      // the live range.  The only problem is that we didn't realize there
399      // are actually two values in the live interval.  Because of this we
400      // need to take the LiveRegion that defines this register and split it
401      // into two values.
402      assert(interval.containsOneValue());
403      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
404      unsigned RedefIndex = getDefIndex(MIIdx);
405
406      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
407      VNInfo *OldValNo = OldLR->valno;
408
409      // Delete the initial value, which should be short and continuous,
410      // because the 2-addr copy must be in the same MBB as the redef.
411      interval.removeRange(DefIndex, RedefIndex);
412
413      // Two-address vregs should always only be redefined once.  This means
414      // that at this point, there should be exactly one value number in it.
415      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
416
417      // The new value number (#1) is defined by the instruction we claimed
418      // defined value #0.
419      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
420                                            VNInfoAllocator);
421
422      // Value#0 is now defined by the 2-addr instruction.
423      OldValNo->def  = RedefIndex;
424      OldValNo->copy = 0;
425
426      // Add the new live interval which replaces the range for the input copy.
427      LiveRange LR(DefIndex, RedefIndex, ValNo);
428      DOUT << " replace range with " << LR;
429      interval.addRange(LR);
430      interval.addKill(ValNo, RedefIndex);
431
432      // If this redefinition is dead, we need to add a dummy unit live
433      // range covering the def slot.
434      if (mi->registerDefIsDead(interval.reg, tri_))
435        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
436
437      DOUT << " RESULT: ";
438      interval.print(DOUT, tri_);
439
440    } else {
441      // Otherwise, this must be because of phi elimination.  If this is the
442      // first redefinition of the vreg that we have seen, go back and change
443      // the live range in the PHI block to be a different value number.
444      if (interval.containsOneValue()) {
445        assert(vi.Kills.size() == 1 &&
446               "PHI elimination vreg should have one kill, the PHI itself!");
447
448        // Remove the old range that we now know has an incorrect number.
449        VNInfo *VNI = interval.getValNumInfo(0);
450        MachineInstr *Killer = vi.Kills[0];
451        unsigned Start = getMBBStartIdx(Killer->getParent());
452        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
453        DOUT << " Removing [" << Start << "," << End << "] from: ";
454        interval.print(DOUT, tri_); DOUT << "\n";
455        interval.removeRange(Start, End);
456        VNI->hasPHIKill = true;
457        DOUT << " RESULT: "; interval.print(DOUT, tri_);
458
459        // Replace the interval with one of a NEW value number.  Note that this
460        // value number isn't actually defined by an instruction, weird huh? :)
461        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
462        DOUT << " replace range with " << LR;
463        interval.addRange(LR);
464        interval.addKill(LR.valno, End);
465        DOUT << " RESULT: "; interval.print(DOUT, tri_);
466      }
467
468      // In the case of PHI elimination, each variable definition is only
469      // live until the end of the block.  We've already taken care of the
470      // rest of the live range.
471      unsigned defIndex = getDefIndex(MIIdx);
472
473      VNInfo *ValNo;
474      MachineInstr *CopyMI = NULL;
475      unsigned SrcReg, DstReg;
476      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
477          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
478          tii_->isMoveInstr(*mi, SrcReg, DstReg))
479        CopyMI = mi;
480      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
481
482      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
483      LiveRange LR(defIndex, killIndex, ValNo);
484      interval.addRange(LR);
485      interval.addKill(ValNo, killIndex);
486      ValNo->hasPHIKill = true;
487      DOUT << " +" << LR;
488    }
489  }
490
491  DOUT << '\n';
492}
493
494void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
495                                              MachineBasicBlock::iterator mi,
496                                              unsigned MIIdx,
497                                              LiveInterval &interval,
498                                              MachineInstr *CopyMI) {
499  // A physical register cannot be live across basic block, so its
500  // lifetime must end somewhere in its defining basic block.
501  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
502
503  unsigned baseIndex = MIIdx;
504  unsigned start = getDefIndex(baseIndex);
505  unsigned end = start;
506
507  // If it is not used after definition, it is considered dead at
508  // the instruction defining it. Hence its interval is:
509  // [defSlot(def), defSlot(def)+1)
510  if (mi->registerDefIsDead(interval.reg, tri_)) {
511    DOUT << " dead";
512    end = getDefIndex(start) + 1;
513    goto exit;
514  }
515
516  // If it is not dead on definition, it must be killed by a
517  // subsequent instruction. Hence its interval is:
518  // [defSlot(def), useSlot(kill)+1)
519  while (++mi != MBB->end()) {
520    baseIndex += InstrSlots::NUM;
521    if (mi->killsRegister(interval.reg, tri_)) {
522      DOUT << " killed";
523      end = getUseIndex(baseIndex) + 1;
524      goto exit;
525    } else if (mi->modifiesRegister(interval.reg, tri_)) {
526      // Another instruction redefines the register before it is ever read.
527      // Then the register is essentially dead at the instruction that defines
528      // it. Hence its interval is:
529      // [defSlot(def), defSlot(def)+1)
530      DOUT << " dead";
531      end = getDefIndex(start) + 1;
532      goto exit;
533    }
534  }
535
536  // The only case we should have a dead physreg here without a killing or
537  // instruction where we know it's dead is if it is live-in to the function
538  // and never used.
539  assert(!CopyMI && "physreg was not killed in defining block!");
540  end = getDefIndex(start) + 1;  // It's dead.
541
542exit:
543  assert(start < end && "did not find end of interval?");
544
545  // Already exists? Extend old live interval.
546  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
547  VNInfo *ValNo = (OldLR != interval.end())
548    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
549  LiveRange LR(start, end, ValNo);
550  interval.addRange(LR);
551  interval.addKill(LR.valno, end);
552  DOUT << " +" << LR << '\n';
553}
554
555void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
556                                      MachineBasicBlock::iterator MI,
557                                      unsigned MIIdx,
558                                      unsigned reg) {
559  if (TargetRegisterInfo::isVirtualRegister(reg))
560    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
561  else if (allocatableRegs_[reg]) {
562    MachineInstr *CopyMI = NULL;
563    unsigned SrcReg, DstReg;
564    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
565        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
566        tii_->isMoveInstr(*MI, SrcReg, DstReg))
567      CopyMI = MI;
568    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
569    // Def of a register also defines its sub-registers.
570    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
571      // If MI also modifies the sub-register explicitly, avoid processing it
572      // more than once. Do not pass in TRI here so it checks for exact match.
573      if (!MI->modifiesRegister(*AS))
574        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
575  }
576}
577
578void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
579                                         unsigned MIIdx,
580                                         LiveInterval &interval, bool isAlias) {
581  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
582
583  // Look for kills, if it reaches a def before it's killed, then it shouldn't
584  // be considered a livein.
585  MachineBasicBlock::iterator mi = MBB->begin();
586  unsigned baseIndex = MIIdx;
587  unsigned start = baseIndex;
588  unsigned end = start;
589  while (mi != MBB->end()) {
590    if (mi->killsRegister(interval.reg, tri_)) {
591      DOUT << " killed";
592      end = getUseIndex(baseIndex) + 1;
593      goto exit;
594    } else if (mi->modifiesRegister(interval.reg, tri_)) {
595      // Another instruction redefines the register before it is ever read.
596      // Then the register is essentially dead at the instruction that defines
597      // it. Hence its interval is:
598      // [defSlot(def), defSlot(def)+1)
599      DOUT << " dead";
600      end = getDefIndex(start) + 1;
601      goto exit;
602    }
603
604    baseIndex += InstrSlots::NUM;
605    ++mi;
606  }
607
608exit:
609  // Live-in register might not be used at all.
610  if (end == MIIdx) {
611    if (isAlias) {
612      DOUT << " dead";
613      end = getDefIndex(MIIdx) + 1;
614    } else {
615      DOUT << " live through";
616      end = baseIndex;
617    }
618  }
619
620  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
621  interval.addRange(LR);
622  interval.addKill(LR.valno, end);
623  DOUT << " +" << LR << '\n';
624}
625
626/// computeIntervals - computes the live intervals for virtual
627/// registers. for some ordering of the machine instructions [1,N] a
628/// live interval is an interval [i, j) where 1 <= i <= j < N for
629/// which a variable is live
630void LiveIntervals::computeIntervals() {
631  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
632       << "********** Function: "
633       << ((Value*)mf_->getFunction())->getName() << '\n';
634  // Track the index of the current machine instr.
635  unsigned MIIndex = 0;
636  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
637       MBBI != E; ++MBBI) {
638    MachineBasicBlock *MBB = MBBI;
639    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
640
641    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
642
643    // Create intervals for live-ins to this BB first.
644    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
645           LE = MBB->livein_end(); LI != LE; ++LI) {
646      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
647      // Multiple live-ins can alias the same register.
648      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
649        if (!hasInterval(*AS))
650          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
651                               true);
652    }
653
654    for (; MI != miEnd; ++MI) {
655      DOUT << MIIndex << "\t" << *MI;
656
657      // Handle defs.
658      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
659        MachineOperand &MO = MI->getOperand(i);
660        // handle register defs - build intervals
661        if (MO.isRegister() && MO.getReg() && MO.isDef())
662          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
663      }
664
665      MIIndex += InstrSlots::NUM;
666    }
667
668    if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
669  }
670}
671
672bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
673                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
674  std::vector<IdxMBBPair>::const_iterator I =
675    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
676
677  bool ResVal = false;
678  while (I != Idx2MBBMap.end()) {
679    if (LR.end <= I->first)
680      break;
681    MBBs.push_back(I->second);
682    ResVal = true;
683    ++I;
684  }
685  return ResVal;
686}
687
688
689LiveInterval LiveIntervals::createInterval(unsigned reg) {
690  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
691                       HUGE_VALF : 0.0F;
692  return LiveInterval(reg, Weight);
693}
694
695/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
696/// copy field and returns the source register that defines it.
697unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
698  if (!VNI->copy)
699    return 0;
700
701  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
702    return VNI->copy->getOperand(1).getReg();
703  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
704    return VNI->copy->getOperand(2).getReg();
705  unsigned SrcReg, DstReg;
706  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
707    return SrcReg;
708  assert(0 && "Unrecognized copy instruction!");
709  return 0;
710}
711
712//===----------------------------------------------------------------------===//
713// Register allocator hooks.
714//
715
716/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
717/// allow one) virtual register operand, then its uses are implicitly using
718/// the register. Returns the virtual register.
719unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
720                                            MachineInstr *MI) const {
721  unsigned RegOp = 0;
722  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
723    MachineOperand &MO = MI->getOperand(i);
724    if (!MO.isRegister() || !MO.isUse())
725      continue;
726    unsigned Reg = MO.getReg();
727    if (Reg == 0 || Reg == li.reg)
728      continue;
729    // FIXME: For now, only remat MI with at most one register operand.
730    assert(!RegOp &&
731           "Can't rematerialize instruction with multiple register operand!");
732    RegOp = MO.getReg();
733    break;
734  }
735  return RegOp;
736}
737
738/// isValNoAvailableAt - Return true if the val# of the specified interval
739/// which reaches the given instruction also reaches the specified use index.
740bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
741                                       unsigned UseIdx) const {
742  unsigned Index = getInstructionIndex(MI);
743  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
744  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
745  return UI != li.end() && UI->valno == ValNo;
746}
747
748/// isReMaterializable - Returns true if the definition MI of the specified
749/// val# of the specified interval is re-materializable.
750bool LiveIntervals::isReMaterializable(const LiveInterval &li,
751                                       const VNInfo *ValNo, MachineInstr *MI,
752                                       bool &isLoad) {
753  if (DisableReMat)
754    return false;
755
756  isLoad = false;
757  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
758    return true;
759
760  int FrameIdx = 0;
761  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
762      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
763    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
764    // this but remember this is not safe to fold into a two-address
765    // instruction.
766    // This is a load from fixed stack slot. It can be rematerialized.
767    return true;
768
769  if (tii_->isTriviallyReMaterializable(MI)) {
770    const TargetInstrDesc &TID = MI->getDesc();
771    isLoad = TID.isSimpleLoad();
772
773    unsigned ImpUse = getReMatImplicitUse(li, MI);
774    if (ImpUse) {
775      const LiveInterval &ImpLi = getInterval(ImpUse);
776      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
777             re = mri_->use_end(); ri != re; ++ri) {
778        MachineInstr *UseMI = &*ri;
779        unsigned UseIdx = getInstructionIndex(UseMI);
780        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
781          continue;
782        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
783          return false;
784      }
785    }
786    return true;
787  }
788
789  return false;
790}
791
792/// isReMaterializable - Returns true if every definition of MI of every
793/// val# of the specified interval is re-materializable.
794bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
795  isLoad = false;
796  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
797       i != e; ++i) {
798    const VNInfo *VNI = *i;
799    unsigned DefIdx = VNI->def;
800    if (DefIdx == ~1U)
801      continue; // Dead val#.
802    // Is the def for the val# rematerializable?
803    if (DefIdx == ~0u)
804      return false;
805    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
806    bool DefIsLoad = false;
807    if (!ReMatDefMI ||
808        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
809      return false;
810    isLoad |= DefIsLoad;
811  }
812  return true;
813}
814
815/// FilterFoldedOps - Filter out two-address use operands. Return
816/// true if it finds any issue with the operands that ought to prevent
817/// folding.
818static bool FilterFoldedOps(MachineInstr *MI,
819                            SmallVector<unsigned, 2> &Ops,
820                            unsigned &MRInfo,
821                            SmallVector<unsigned, 2> &FoldOps) {
822  const TargetInstrDesc &TID = MI->getDesc();
823
824  MRInfo = 0;
825  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
826    unsigned OpIdx = Ops[i];
827    MachineOperand &MO = MI->getOperand(OpIdx);
828    // FIXME: fold subreg use.
829    if (MO.getSubReg())
830      return true;
831    if (MO.isDef())
832      MRInfo |= (unsigned)VirtRegMap::isMod;
833    else {
834      // Filter out two-address use operand(s).
835      if (!MO.isImplicit() &&
836          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
837        MRInfo = VirtRegMap::isModRef;
838        continue;
839      }
840      MRInfo |= (unsigned)VirtRegMap::isRef;
841    }
842    FoldOps.push_back(OpIdx);
843  }
844  return false;
845}
846
847
848/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
849/// slot / to reg or any rematerialized load into ith operand of specified
850/// MI. If it is successul, MI is updated with the newly created MI and
851/// returns true.
852bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
853                                         VirtRegMap &vrm, MachineInstr *DefMI,
854                                         unsigned InstrIdx,
855                                         SmallVector<unsigned, 2> &Ops,
856                                         bool isSS, int Slot, unsigned Reg) {
857  // If it is an implicit def instruction, just delete it.
858  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
859    RemoveMachineInstrFromMaps(MI);
860    vrm.RemoveMachineInstrFromMaps(MI);
861    MI->eraseFromParent();
862    ++numFolds;
863    return true;
864  }
865
866  // Filter the list of operand indexes that are to be folded. Abort if
867  // any operand will prevent folding.
868  unsigned MRInfo = 0;
869  SmallVector<unsigned, 2> FoldOps;
870  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
871    return false;
872
873  // The only time it's safe to fold into a two address instruction is when
874  // it's folding reload and spill from / into a spill stack slot.
875  if (DefMI && (MRInfo & VirtRegMap::isMod))
876    return false;
877
878  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
879                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
880  if (fmi) {
881    // Remember this instruction uses the spill slot.
882    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
883
884    // Attempt to fold the memory reference into the instruction. If
885    // we can do this, we don't need to insert spill code.
886    if (lv_)
887      lv_->instructionChanged(MI, fmi);
888    else
889      fmi->copyKillDeadInfo(MI, tri_);
890    MachineBasicBlock &MBB = *MI->getParent();
891    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
892      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
893    vrm.transferSpillPts(MI, fmi);
894    vrm.transferRestorePts(MI, fmi);
895    vrm.transferEmergencySpills(MI, fmi);
896    mi2iMap_.erase(MI);
897    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
898    mi2iMap_[fmi] = InstrIdx;
899    MI = MBB.insert(MBB.erase(MI), fmi);
900    ++numFolds;
901    return true;
902  }
903  return false;
904}
905
906/// canFoldMemoryOperand - Returns true if the specified load / store
907/// folding is possible.
908bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
909                                         SmallVector<unsigned, 2> &Ops,
910                                         bool ReMat) const {
911  // Filter the list of operand indexes that are to be folded. Abort if
912  // any operand will prevent folding.
913  unsigned MRInfo = 0;
914  SmallVector<unsigned, 2> FoldOps;
915  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
916    return false;
917
918  // It's only legal to remat for a use, not a def.
919  if (ReMat && (MRInfo & VirtRegMap::isMod))
920    return false;
921
922  return tii_->canFoldMemoryOperand(MI, FoldOps);
923}
924
925bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
926  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
927  for (LiveInterval::Ranges::const_iterator
928         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
929    std::vector<IdxMBBPair>::const_iterator II =
930      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
931    if (II == Idx2MBBMap.end())
932      continue;
933    if (I->end > II->first)  // crossing a MBB.
934      return false;
935    MBBs.insert(II->second);
936    if (MBBs.size() > 1)
937      return false;
938  }
939  return true;
940}
941
942/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
943/// interval on to-be re-materialized operands of MI) with new register.
944void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
945                                       MachineInstr *MI, unsigned NewVReg,
946                                       VirtRegMap &vrm) {
947  // There is an implicit use. That means one of the other operand is
948  // being remat'ed and the remat'ed instruction has li.reg as an
949  // use operand. Make sure we rewrite that as well.
950  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
951    MachineOperand &MO = MI->getOperand(i);
952    if (!MO.isRegister())
953      continue;
954    unsigned Reg = MO.getReg();
955    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
956      continue;
957    if (!vrm.isReMaterialized(Reg))
958      continue;
959    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
960    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
961    if (UseMO)
962      UseMO->setReg(NewVReg);
963  }
964}
965
966/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
967/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
968bool LiveIntervals::
969rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
970                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
971                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
972                 unsigned Slot, int LdSlot,
973                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
974                 VirtRegMap &vrm,
975                 const TargetRegisterClass* rc,
976                 SmallVector<int, 4> &ReMatIds,
977                 const MachineLoopInfo *loopInfo,
978                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
979                 std::map<unsigned,unsigned> &MBBVRegsMap,
980                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
981  MachineBasicBlock *MBB = MI->getParent();
982  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
983  bool CanFold = false;
984 RestartInstruction:
985  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
986    MachineOperand& mop = MI->getOperand(i);
987    if (!mop.isRegister())
988      continue;
989    unsigned Reg = mop.getReg();
990    unsigned RegI = Reg;
991    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
992      continue;
993    if (Reg != li.reg)
994      continue;
995
996    bool TryFold = !DefIsReMat;
997    bool FoldSS = true; // Default behavior unless it's a remat.
998    int FoldSlot = Slot;
999    if (DefIsReMat) {
1000      // If this is the rematerializable definition MI itself and
1001      // all of its uses are rematerialized, simply delete it.
1002      if (MI == ReMatOrigDefMI && CanDelete) {
1003        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1004        DOUT << MI << '\n';
1005        RemoveMachineInstrFromMaps(MI);
1006        vrm.RemoveMachineInstrFromMaps(MI);
1007        MI->eraseFromParent();
1008        break;
1009      }
1010
1011      // If def for this use can't be rematerialized, then try folding.
1012      // If def is rematerializable and it's a load, also try folding.
1013      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1014      if (isLoad) {
1015        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1016        FoldSS = isLoadSS;
1017        FoldSlot = LdSlot;
1018      }
1019    }
1020
1021    // Scan all of the operands of this instruction rewriting operands
1022    // to use NewVReg instead of li.reg as appropriate.  We do this for
1023    // two reasons:
1024    //
1025    //   1. If the instr reads the same spilled vreg multiple times, we
1026    //      want to reuse the NewVReg.
1027    //   2. If the instr is a two-addr instruction, we are required to
1028    //      keep the src/dst regs pinned.
1029    //
1030    // Keep track of whether we replace a use and/or def so that we can
1031    // create the spill interval with the appropriate range.
1032
1033    HasUse = mop.isUse();
1034    HasDef = mop.isDef();
1035    SmallVector<unsigned, 2> Ops;
1036    Ops.push_back(i);
1037    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1038      const MachineOperand &MOj = MI->getOperand(j);
1039      if (!MOj.isRegister())
1040        continue;
1041      unsigned RegJ = MOj.getReg();
1042      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1043        continue;
1044      if (RegJ == RegI) {
1045        Ops.push_back(j);
1046        HasUse |= MOj.isUse();
1047        HasDef |= MOj.isDef();
1048      }
1049    }
1050
1051    // Update stack slot spill weight if we are splitting.
1052    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1053      if (!TrySplit)
1054      SSWeight += Weight;
1055
1056    if (!TryFold)
1057      CanFold = false;
1058    else {
1059      // Do not fold load / store here if we are splitting. We'll find an
1060      // optimal point to insert a load / store later.
1061      if (!TrySplit) {
1062        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1063                                 Ops, FoldSS, FoldSlot, Reg)) {
1064          // Folding the load/store can completely change the instruction in
1065          // unpredictable ways, rescan it from the beginning.
1066          HasUse = false;
1067          HasDef = false;
1068          CanFold = false;
1069          if (isRemoved(MI)) {
1070            SSWeight -= Weight;
1071            break;
1072          }
1073          goto RestartInstruction;
1074        }
1075      } else {
1076        // We'll try to fold it later if it's profitable.
1077        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1078      }
1079    }
1080
1081    // Create a new virtual register for the spill interval.
1082    bool CreatedNewVReg = false;
1083    if (NewVReg == 0) {
1084      NewVReg = mri_->createVirtualRegister(rc);
1085      vrm.grow();
1086      CreatedNewVReg = true;
1087    }
1088    mop.setReg(NewVReg);
1089    if (mop.isImplicit())
1090      rewriteImplicitOps(li, MI, NewVReg, vrm);
1091
1092    // Reuse NewVReg for other reads.
1093    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1094      MachineOperand &mopj = MI->getOperand(Ops[j]);
1095      mopj.setReg(NewVReg);
1096      if (mopj.isImplicit())
1097        rewriteImplicitOps(li, MI, NewVReg, vrm);
1098    }
1099
1100    if (CreatedNewVReg) {
1101      if (DefIsReMat) {
1102        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1103        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1104          // Each valnum may have its own remat id.
1105          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1106        } else {
1107          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1108        }
1109        if (!CanDelete || (HasUse && HasDef)) {
1110          // If this is a two-addr instruction then its use operands are
1111          // rematerializable but its def is not. It should be assigned a
1112          // stack slot.
1113          vrm.assignVirt2StackSlot(NewVReg, Slot);
1114        }
1115      } else {
1116        vrm.assignVirt2StackSlot(NewVReg, Slot);
1117      }
1118    } else if (HasUse && HasDef &&
1119               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1120      // If this interval hasn't been assigned a stack slot (because earlier
1121      // def is a deleted remat def), do it now.
1122      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1123      vrm.assignVirt2StackSlot(NewVReg, Slot);
1124    }
1125
1126    // Re-matting an instruction with virtual register use. Add the
1127    // register as an implicit use on the use MI.
1128    if (DefIsReMat && ImpUse)
1129      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1130
1131    // create a new register interval for this spill / remat.
1132    LiveInterval &nI = getOrCreateInterval(NewVReg);
1133    if (CreatedNewVReg) {
1134      NewLIs.push_back(&nI);
1135      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1136      if (TrySplit)
1137        vrm.setIsSplitFromReg(NewVReg, li.reg);
1138    }
1139
1140    if (HasUse) {
1141      if (CreatedNewVReg) {
1142        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1143                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1144        DOUT << " +" << LR;
1145        nI.addRange(LR);
1146      } else {
1147        // Extend the split live interval to this def / use.
1148        unsigned End = getUseIndex(index)+1;
1149        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1150                     nI.getValNumInfo(nI.getNumValNums()-1));
1151        DOUT << " +" << LR;
1152        nI.addRange(LR);
1153      }
1154    }
1155    if (HasDef) {
1156      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1157                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1158      DOUT << " +" << LR;
1159      nI.addRange(LR);
1160    }
1161
1162    DOUT << "\t\t\t\tAdded new interval: ";
1163    nI.print(DOUT, tri_);
1164    DOUT << '\n';
1165  }
1166  return CanFold;
1167}
1168bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1169                                   const VNInfo *VNI,
1170                                   MachineBasicBlock *MBB, unsigned Idx) const {
1171  unsigned End = getMBBEndIdx(MBB);
1172  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1173    unsigned KillIdx = VNI->kills[j];
1174    if (KillIdx > Idx && KillIdx < End)
1175      return true;
1176  }
1177  return false;
1178}
1179
1180/// RewriteInfo - Keep track of machine instrs that will be rewritten
1181/// during spilling.
1182namespace {
1183  struct RewriteInfo {
1184    unsigned Index;
1185    MachineInstr *MI;
1186    bool HasUse;
1187    bool HasDef;
1188    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1189      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1190  };
1191
1192  struct RewriteInfoCompare {
1193    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1194      return LHS.Index < RHS.Index;
1195    }
1196  };
1197}
1198
1199void LiveIntervals::
1200rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1201                    LiveInterval::Ranges::const_iterator &I,
1202                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1203                    unsigned Slot, int LdSlot,
1204                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1205                    VirtRegMap &vrm,
1206                    const TargetRegisterClass* rc,
1207                    SmallVector<int, 4> &ReMatIds,
1208                    const MachineLoopInfo *loopInfo,
1209                    BitVector &SpillMBBs,
1210                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1211                    BitVector &RestoreMBBs,
1212                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1213                    std::map<unsigned,unsigned> &MBBVRegsMap,
1214                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1215  bool AllCanFold = true;
1216  unsigned NewVReg = 0;
1217  unsigned start = getBaseIndex(I->start);
1218  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1219
1220  // First collect all the def / use in this live range that will be rewritten.
1221  // Make sure they are sorted according to instruction index.
1222  std::vector<RewriteInfo> RewriteMIs;
1223  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1224         re = mri_->reg_end(); ri != re; ) {
1225    MachineInstr *MI = &*ri;
1226    MachineOperand &O = ri.getOperand();
1227    ++ri;
1228    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1229    unsigned index = getInstructionIndex(MI);
1230    if (index < start || index >= end)
1231      continue;
1232    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1233  }
1234  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1235
1236  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1237  // Now rewrite the defs and uses.
1238  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1239    RewriteInfo &rwi = RewriteMIs[i];
1240    ++i;
1241    unsigned index = rwi.Index;
1242    bool MIHasUse = rwi.HasUse;
1243    bool MIHasDef = rwi.HasDef;
1244    MachineInstr *MI = rwi.MI;
1245    // If MI def and/or use the same register multiple times, then there
1246    // are multiple entries.
1247    unsigned NumUses = MIHasUse;
1248    while (i != e && RewriteMIs[i].MI == MI) {
1249      assert(RewriteMIs[i].Index == index);
1250      bool isUse = RewriteMIs[i].HasUse;
1251      if (isUse) ++NumUses;
1252      MIHasUse |= isUse;
1253      MIHasDef |= RewriteMIs[i].HasDef;
1254      ++i;
1255    }
1256    MachineBasicBlock *MBB = MI->getParent();
1257
1258    if (ImpUse && MI != ReMatDefMI) {
1259      // Re-matting an instruction with virtual register use. Update the
1260      // register interval's spill weight to HUGE_VALF to prevent it from
1261      // being spilled.
1262      LiveInterval &ImpLi = getInterval(ImpUse);
1263      ImpLi.weight = HUGE_VALF;
1264    }
1265
1266    unsigned MBBId = MBB->getNumber();
1267    unsigned ThisVReg = 0;
1268    if (TrySplit) {
1269      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1270      if (NVI != MBBVRegsMap.end()) {
1271        ThisVReg = NVI->second;
1272        // One common case:
1273        // x = use
1274        // ...
1275        // ...
1276        // def = ...
1277        //     = use
1278        // It's better to start a new interval to avoid artifically
1279        // extend the new interval.
1280        if (MIHasDef && !MIHasUse) {
1281          MBBVRegsMap.erase(MBB->getNumber());
1282          ThisVReg = 0;
1283        }
1284      }
1285    }
1286
1287    bool IsNew = ThisVReg == 0;
1288    if (IsNew) {
1289      // This ends the previous live interval. If all of its def / use
1290      // can be folded, give it a low spill weight.
1291      if (NewVReg && TrySplit && AllCanFold) {
1292        LiveInterval &nI = getOrCreateInterval(NewVReg);
1293        nI.weight /= 10.0F;
1294      }
1295      AllCanFold = true;
1296    }
1297    NewVReg = ThisVReg;
1298
1299    bool HasDef = false;
1300    bool HasUse = false;
1301    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1302                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1303                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1304                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1305                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1306    if (!HasDef && !HasUse)
1307      continue;
1308
1309    AllCanFold &= CanFold;
1310
1311    // Update weight of spill interval.
1312    LiveInterval &nI = getOrCreateInterval(NewVReg);
1313    if (!TrySplit) {
1314      // The spill weight is now infinity as it cannot be spilled again.
1315      nI.weight = HUGE_VALF;
1316      continue;
1317    }
1318
1319    // Keep track of the last def and first use in each MBB.
1320    if (HasDef) {
1321      if (MI != ReMatOrigDefMI || !CanDelete) {
1322        bool HasKill = false;
1323        if (!HasUse)
1324          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1325        else {
1326          // If this is a two-address code, then this index starts a new VNInfo.
1327          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1328          if (VNI)
1329            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1330        }
1331        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1332          SpillIdxes.find(MBBId);
1333        if (!HasKill) {
1334          if (SII == SpillIdxes.end()) {
1335            std::vector<SRInfo> S;
1336            S.push_back(SRInfo(index, NewVReg, true));
1337            SpillIdxes.insert(std::make_pair(MBBId, S));
1338          } else if (SII->second.back().vreg != NewVReg) {
1339            SII->second.push_back(SRInfo(index, NewVReg, true));
1340          } else if ((int)index > SII->second.back().index) {
1341            // If there is an earlier def and this is a two-address
1342            // instruction, then it's not possible to fold the store (which
1343            // would also fold the load).
1344            SRInfo &Info = SII->second.back();
1345            Info.index = index;
1346            Info.canFold = !HasUse;
1347          }
1348          SpillMBBs.set(MBBId);
1349        } else if (SII != SpillIdxes.end() &&
1350                   SII->second.back().vreg == NewVReg &&
1351                   (int)index > SII->second.back().index) {
1352          // There is an earlier def that's not killed (must be two-address).
1353          // The spill is no longer needed.
1354          SII->second.pop_back();
1355          if (SII->second.empty()) {
1356            SpillIdxes.erase(MBBId);
1357            SpillMBBs.reset(MBBId);
1358          }
1359        }
1360      }
1361    }
1362
1363    if (HasUse) {
1364      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1365        SpillIdxes.find(MBBId);
1366      if (SII != SpillIdxes.end() &&
1367          SII->second.back().vreg == NewVReg &&
1368          (int)index > SII->second.back().index)
1369        // Use(s) following the last def, it's not safe to fold the spill.
1370        SII->second.back().canFold = false;
1371      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1372        RestoreIdxes.find(MBBId);
1373      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1374        // If we are splitting live intervals, only fold if it's the first
1375        // use and there isn't another use later in the MBB.
1376        RII->second.back().canFold = false;
1377      else if (IsNew) {
1378        // Only need a reload if there isn't an earlier def / use.
1379        if (RII == RestoreIdxes.end()) {
1380          std::vector<SRInfo> Infos;
1381          Infos.push_back(SRInfo(index, NewVReg, true));
1382          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1383        } else {
1384          RII->second.push_back(SRInfo(index, NewVReg, true));
1385        }
1386        RestoreMBBs.set(MBBId);
1387      }
1388    }
1389
1390    // Update spill weight.
1391    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1392    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1393  }
1394
1395  if (NewVReg && TrySplit && AllCanFold) {
1396    // If all of its def / use can be folded, give it a low spill weight.
1397    LiveInterval &nI = getOrCreateInterval(NewVReg);
1398    nI.weight /= 10.0F;
1399  }
1400}
1401
1402bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1403                        BitVector &RestoreMBBs,
1404                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1405  if (!RestoreMBBs[Id])
1406    return false;
1407  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1408  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1409    if (Restores[i].index == index &&
1410        Restores[i].vreg == vr &&
1411        Restores[i].canFold)
1412      return true;
1413  return false;
1414}
1415
1416void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1417                        BitVector &RestoreMBBs,
1418                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1419  if (!RestoreMBBs[Id])
1420    return;
1421  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1422  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1423    if (Restores[i].index == index && Restores[i].vreg)
1424      Restores[i].index = -1;
1425}
1426
1427/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1428/// spilled and create empty intervals for their uses.
1429void
1430LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1431                                    const TargetRegisterClass* rc,
1432                                    std::vector<LiveInterval*> &NewLIs) {
1433  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1434         re = mri_->reg_end(); ri != re; ) {
1435    MachineOperand &O = ri.getOperand();
1436    MachineInstr *MI = &*ri;
1437    ++ri;
1438    if (O.isDef()) {
1439      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1440             "Register def was not rewritten?");
1441      RemoveMachineInstrFromMaps(MI);
1442      vrm.RemoveMachineInstrFromMaps(MI);
1443      MI->eraseFromParent();
1444    } else {
1445      // This must be an use of an implicit_def so it's not part of the live
1446      // interval. Create a new empty live interval for it.
1447      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1448      unsigned NewVReg = mri_->createVirtualRegister(rc);
1449      vrm.grow();
1450      vrm.setIsImplicitlyDefined(NewVReg);
1451      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1452      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1453        MachineOperand &MO = MI->getOperand(i);
1454        if (MO.isReg() && MO.getReg() == li.reg)
1455          MO.setReg(NewVReg);
1456      }
1457    }
1458  }
1459}
1460
1461
1462std::vector<LiveInterval*> LiveIntervals::
1463addIntervalsForSpills(const LiveInterval &li,
1464                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1465                      float &SSWeight) {
1466  // Since this is called after the analysis is done we don't know if
1467  // LiveVariables is available
1468  lv_ = getAnalysisToUpdate<LiveVariables>();
1469
1470  assert(li.weight != HUGE_VALF &&
1471         "attempt to spill already spilled interval!");
1472
1473  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1474  li.print(DOUT, tri_);
1475  DOUT << '\n';
1476
1477  // Spill slot weight.
1478  SSWeight = 0.0f;
1479
1480  // Each bit specify whether it a spill is required in the MBB.
1481  BitVector SpillMBBs(mf_->getNumBlockIDs());
1482  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1483  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1484  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1485  std::map<unsigned,unsigned> MBBVRegsMap;
1486  std::vector<LiveInterval*> NewLIs;
1487  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1488
1489  unsigned NumValNums = li.getNumValNums();
1490  SmallVector<MachineInstr*, 4> ReMatDefs;
1491  ReMatDefs.resize(NumValNums, NULL);
1492  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1493  ReMatOrigDefs.resize(NumValNums, NULL);
1494  SmallVector<int, 4> ReMatIds;
1495  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1496  BitVector ReMatDelete(NumValNums);
1497  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1498
1499  // Spilling a split live interval. It cannot be split any further. Also,
1500  // it's also guaranteed to be a single val# / range interval.
1501  if (vrm.getPreSplitReg(li.reg)) {
1502    vrm.setIsSplitFromReg(li.reg, 0);
1503    // Unset the split kill marker on the last use.
1504    unsigned KillIdx = vrm.getKillPoint(li.reg);
1505    if (KillIdx) {
1506      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1507      assert(KillMI && "Last use disappeared?");
1508      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1509      assert(KillOp != -1 && "Last use disappeared?");
1510      KillMI->getOperand(KillOp).setIsKill(false);
1511    }
1512    vrm.removeKillPoint(li.reg);
1513    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1514    Slot = vrm.getStackSlot(li.reg);
1515    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1516    MachineInstr *ReMatDefMI = DefIsReMat ?
1517      vrm.getReMaterializedMI(li.reg) : NULL;
1518    int LdSlot = 0;
1519    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1520    bool isLoad = isLoadSS ||
1521      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1522    bool IsFirstRange = true;
1523    for (LiveInterval::Ranges::const_iterator
1524           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1525      // If this is a split live interval with multiple ranges, it means there
1526      // are two-address instructions that re-defined the value. Only the
1527      // first def can be rematerialized!
1528      if (IsFirstRange) {
1529        // Note ReMatOrigDefMI has already been deleted.
1530        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1531                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1532                             false, vrm, rc, ReMatIds, loopInfo,
1533                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1534                             MBBVRegsMap, NewLIs, SSWeight);
1535      } else {
1536        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1537                             Slot, 0, false, false, false,
1538                             false, vrm, rc, ReMatIds, loopInfo,
1539                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1540                             MBBVRegsMap, NewLIs, SSWeight);
1541      }
1542      IsFirstRange = false;
1543    }
1544
1545    SSWeight = 0.0f;  // Already accounted for when split.
1546    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1547    return NewLIs;
1548  }
1549
1550  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1551  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1552    TrySplit = false;
1553  if (TrySplit)
1554    ++numSplits;
1555  bool NeedStackSlot = false;
1556  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1557       i != e; ++i) {
1558    const VNInfo *VNI = *i;
1559    unsigned VN = VNI->id;
1560    unsigned DefIdx = VNI->def;
1561    if (DefIdx == ~1U)
1562      continue; // Dead val#.
1563    // Is the def for the val# rematerializable?
1564    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1565      ? 0 : getInstructionFromIndex(DefIdx);
1566    bool dummy;
1567    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1568      // Remember how to remat the def of this val#.
1569      ReMatOrigDefs[VN] = ReMatDefMI;
1570      // Original def may be modified so we have to make a copy here. vrm must
1571      // delete these!
1572      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1573
1574      bool CanDelete = true;
1575      if (VNI->hasPHIKill) {
1576        // A kill is a phi node, not all of its uses can be rematerialized.
1577        // It must not be deleted.
1578        CanDelete = false;
1579        // Need a stack slot if there is any live range where uses cannot be
1580        // rematerialized.
1581        NeedStackSlot = true;
1582      }
1583      if (CanDelete)
1584        ReMatDelete.set(VN);
1585    } else {
1586      // Need a stack slot if there is any live range where uses cannot be
1587      // rematerialized.
1588      NeedStackSlot = true;
1589    }
1590  }
1591
1592  // One stack slot per live interval.
1593  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1594    Slot = vrm.assignVirt2StackSlot(li.reg);
1595
1596  // Create new intervals and rewrite defs and uses.
1597  for (LiveInterval::Ranges::const_iterator
1598         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1599    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1600    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1601    bool DefIsReMat = ReMatDefMI != NULL;
1602    bool CanDelete = ReMatDelete[I->valno->id];
1603    int LdSlot = 0;
1604    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1605    bool isLoad = isLoadSS ||
1606      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1607    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1608                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1609                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1610                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1611                               MBBVRegsMap, NewLIs, SSWeight);
1612  }
1613
1614  // Insert spills / restores if we are splitting.
1615  if (!TrySplit) {
1616    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1617    return NewLIs;
1618  }
1619
1620  SmallPtrSet<LiveInterval*, 4> AddedKill;
1621  SmallVector<unsigned, 2> Ops;
1622  if (NeedStackSlot) {
1623    int Id = SpillMBBs.find_first();
1624    while (Id != -1) {
1625      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1626      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1627      std::vector<SRInfo> &spills = SpillIdxes[Id];
1628      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1629        int index = spills[i].index;
1630        unsigned VReg = spills[i].vreg;
1631        LiveInterval &nI = getOrCreateInterval(VReg);
1632        bool isReMat = vrm.isReMaterialized(VReg);
1633        MachineInstr *MI = getInstructionFromIndex(index);
1634        bool CanFold = false;
1635        bool FoundUse = false;
1636        Ops.clear();
1637        if (spills[i].canFold) {
1638          CanFold = true;
1639          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1640            MachineOperand &MO = MI->getOperand(j);
1641            if (!MO.isRegister() || MO.getReg() != VReg)
1642              continue;
1643
1644            Ops.push_back(j);
1645            if (MO.isDef())
1646              continue;
1647            if (isReMat ||
1648                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1649                                                RestoreMBBs, RestoreIdxes))) {
1650              // MI has two-address uses of the same register. If the use
1651              // isn't the first and only use in the BB, then we can't fold
1652              // it. FIXME: Move this to rewriteInstructionsForSpills.
1653              CanFold = false;
1654              break;
1655            }
1656            FoundUse = true;
1657          }
1658        }
1659        // Fold the store into the def if possible.
1660        bool Folded = false;
1661        if (CanFold && !Ops.empty()) {
1662          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1663            Folded = true;
1664            if (FoundUse > 0) {
1665              // Also folded uses, do not issue a load.
1666              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1667              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1668            }
1669            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1670          }
1671        }
1672
1673        // Otherwise tell the spiller to issue a spill.
1674        if (!Folded) {
1675          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1676          bool isKill = LR->end == getStoreIndex(index);
1677          if (!MI->registerDefIsDead(nI.reg))
1678            // No need to spill a dead def.
1679            vrm.addSpillPoint(VReg, isKill, MI);
1680          if (isKill)
1681            AddedKill.insert(&nI);
1682        }
1683
1684        // Update spill slot weight.
1685        if (!isReMat)
1686          SSWeight += getSpillWeight(true, false, loopDepth);
1687      }
1688      Id = SpillMBBs.find_next(Id);
1689    }
1690  }
1691
1692  int Id = RestoreMBBs.find_first();
1693  while (Id != -1) {
1694    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1695    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1696
1697    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1698    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1699      int index = restores[i].index;
1700      if (index == -1)
1701        continue;
1702      unsigned VReg = restores[i].vreg;
1703      LiveInterval &nI = getOrCreateInterval(VReg);
1704      bool isReMat = vrm.isReMaterialized(VReg);
1705      MachineInstr *MI = getInstructionFromIndex(index);
1706      bool CanFold = false;
1707      Ops.clear();
1708      if (restores[i].canFold) {
1709        CanFold = true;
1710        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1711          MachineOperand &MO = MI->getOperand(j);
1712          if (!MO.isRegister() || MO.getReg() != VReg)
1713            continue;
1714
1715          if (MO.isDef()) {
1716            // If this restore were to be folded, it would have been folded
1717            // already.
1718            CanFold = false;
1719            break;
1720          }
1721          Ops.push_back(j);
1722        }
1723      }
1724
1725      // Fold the load into the use if possible.
1726      bool Folded = false;
1727      if (CanFold && !Ops.empty()) {
1728        if (!isReMat)
1729          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1730        else {
1731          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1732          int LdSlot = 0;
1733          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1734          // If the rematerializable def is a load, also try to fold it.
1735          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1736            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1737                                          Ops, isLoadSS, LdSlot, VReg);
1738          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1739          if (ImpUse) {
1740            // Re-matting an instruction with virtual register use. Add the
1741            // register as an implicit use on the use MI and update the register
1742            // interval's spill weight to HUGE_VALF to prevent it from being
1743            // spilled.
1744            LiveInterval &ImpLi = getInterval(ImpUse);
1745            ImpLi.weight = HUGE_VALF;
1746            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1747          }
1748        }
1749      }
1750      // If folding is not possible / failed, then tell the spiller to issue a
1751      // load / rematerialization for us.
1752      if (Folded)
1753        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1754      else
1755        vrm.addRestorePoint(VReg, MI);
1756
1757      // Update spill slot weight.
1758      if (!isReMat)
1759        SSWeight += getSpillWeight(false, true, loopDepth);
1760    }
1761    Id = RestoreMBBs.find_next(Id);
1762  }
1763
1764  // Finalize intervals: add kills, finalize spill weights, and filter out
1765  // dead intervals.
1766  std::vector<LiveInterval*> RetNewLIs;
1767  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1768    LiveInterval *LI = NewLIs[i];
1769    if (!LI->empty()) {
1770      LI->weight /= LI->getSize();
1771      if (!AddedKill.count(LI)) {
1772        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1773        unsigned LastUseIdx = getBaseIndex(LR->end);
1774        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1775        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1776        assert(UseIdx != -1);
1777        if (LastUse->getOperand(UseIdx).isImplicit() ||
1778            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1779          LastUse->getOperand(UseIdx).setIsKill();
1780          vrm.addKillPoint(LI->reg, LastUseIdx);
1781        }
1782      }
1783      RetNewLIs.push_back(LI);
1784    }
1785  }
1786
1787  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1788  return RetNewLIs;
1789}
1790
1791/// hasAllocatableSuperReg - Return true if the specified physical register has
1792/// any super register that's allocatable.
1793bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1794  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1795    if (allocatableRegs_[*AS] && hasInterval(*AS))
1796      return true;
1797  return false;
1798}
1799
1800/// getRepresentativeReg - Find the largest super register of the specified
1801/// physical register.
1802unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1803  // Find the largest super-register that is allocatable.
1804  unsigned BestReg = Reg;
1805  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1806    unsigned SuperReg = *AS;
1807    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1808      BestReg = SuperReg;
1809      break;
1810    }
1811  }
1812  return BestReg;
1813}
1814
1815/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1816/// specified interval that conflicts with the specified physical register.
1817unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1818                                                   unsigned PhysReg) const {
1819  unsigned NumConflicts = 0;
1820  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1821  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1822         E = mri_->reg_end(); I != E; ++I) {
1823    MachineOperand &O = I.getOperand();
1824    MachineInstr *MI = O.getParent();
1825    unsigned Index = getInstructionIndex(MI);
1826    if (pli.liveAt(Index))
1827      ++NumConflicts;
1828  }
1829  return NumConflicts;
1830}
1831
1832/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1833/// around all defs and uses of the specified interval.
1834void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1835                                            unsigned PhysReg, VirtRegMap &vrm) {
1836  unsigned SpillReg = getRepresentativeReg(PhysReg);
1837
1838  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1839    // If there are registers which alias PhysReg, but which are not a
1840    // sub-register of the chosen representative super register. Assert
1841    // since we can't handle it yet.
1842    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1843           tri_->isSuperRegister(*AS, SpillReg));
1844
1845  LiveInterval &pli = getInterval(SpillReg);
1846  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1847  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1848         E = mri_->reg_end(); I != E; ++I) {
1849    MachineOperand &O = I.getOperand();
1850    MachineInstr *MI = O.getParent();
1851    if (SeenMIs.count(MI))
1852      continue;
1853    SeenMIs.insert(MI);
1854    unsigned Index = getInstructionIndex(MI);
1855    if (pli.liveAt(Index)) {
1856      vrm.addEmergencySpill(SpillReg, MI);
1857      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1858      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1859        if (!hasInterval(*AS))
1860          continue;
1861        LiveInterval &spli = getInterval(*AS);
1862        if (spli.liveAt(Index))
1863          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1864      }
1865    }
1866  }
1867}
1868
1869LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1870                                                   MachineInstr* startInst) {
1871  LiveInterval& Interval = getOrCreateInterval(reg);
1872  VNInfo* VN = Interval.getNextValue(
1873            getInstructionIndex(startInst) + InstrSlots::DEF,
1874            startInst, getVNInfoAllocator());
1875  VN->hasPHIKill = true;
1876  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1877  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1878               getMBBEndIdx(startInst->getParent()) + 1, VN);
1879  Interval.addRange(LR);
1880
1881  return LR;
1882}
1883