LiveIntervalAnalysis.cpp revision 79a796c2b17c7ebd4dd489b5de1078fc2148adba
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39// Hidden options for help debugging.
40static cl::opt<bool> DisableReMat("disable-rematerialization",
41                                  cl::init(false), cl::Hidden);
42
43static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44                               cl::init(true), cl::Hidden);
45static cl::opt<int> SplitLimit("split-limit",
46                               cl::init(-1), cl::Hidden);
47
48STATISTIC(numIntervals, "Number of original intervals");
49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
51STATISTIC(numSplits   , "Number of intervals split");
52
53char LiveIntervals::ID = 0;
54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
55
56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57  AU.addPreserved<LiveVariables>();
58  AU.addRequired<LiveVariables>();
59  AU.addPreservedID(MachineLoopInfoID);
60  AU.addPreservedID(MachineDominatorsID);
61  AU.addPreservedID(PHIEliminationID);
62  AU.addRequiredID(PHIEliminationID);
63  AU.addRequiredID(TwoAddressInstructionPassID);
64  MachineFunctionPass::getAnalysisUsage(AU);
65}
66
67void LiveIntervals::releaseMemory() {
68  MBB2IdxMap.clear();
69  Idx2MBBMap.clear();
70  mi2iMap_.clear();
71  i2miMap_.clear();
72  r2iMap_.clear();
73  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74  VNInfoAllocator.Reset();
75  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
76    mf_->DeleteMachineInstr(ClonedMIs[i]);
77}
78
79void LiveIntervals::computeNumbering() {
80  Index2MiMap OldI2MI = i2miMap_;
81
82  Idx2MBBMap.clear();
83  MBB2IdxMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86
87  // Number MachineInstrs and MachineBasicBlocks.
88  // Initialize MBB indexes to a sentinal.
89  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
90
91  unsigned MIIndex = 0;
92  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
93       MBB != E; ++MBB) {
94    unsigned StartIdx = MIIndex;
95
96    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
97         I != E; ++I) {
98      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
99      assert(inserted && "multiple MachineInstr -> index mappings");
100      i2miMap_.push_back(I);
101      MIIndex += InstrSlots::NUM;
102    }
103
104    if (StartIdx == MIIndex) {
105      // Empty MBB
106      MIIndex += InstrSlots::NUM;
107      i2miMap_.push_back(0);
108    }
109    // Set the MBB2IdxMap entry for this MBB.
110    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
111    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
112  }
113  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
114
115  if (!OldI2MI.empty())
116    for (iterator I = begin(), E = end(); I != E; ++I)
117      for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
118           LI != LE; ++LI) {
119
120        // Remap the start index of the live range to the corresponding new
121        // number, or our best guess at what it _should_ correspond to if the
122        // original instruction has been erased.  This is either the following
123        // instruction or its predecessor.
124        unsigned offset = LI->start % InstrSlots::NUM;
125        if (OldI2MI[LI->start / InstrSlots::NUM])
126          LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
127        else {
128          unsigned i = 0;
129          MachineInstr* newInstr = 0;
130          do {
131            newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
132            i++;
133          } while (!newInstr);
134
135          if (mi2iMap_[newInstr] ==
136              MBB2IdxMap[newInstr->getParent()->getNumber()].first)
137            LI->start = mi2iMap_[newInstr];
138          else
139            LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
140        }
141
142        // Remap the ending index in the same way that we remapped the start,
143        // except for the final step where we always map to the immediately
144        // following instruction.
145        if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
146          offset = LI->end % InstrSlots::NUM;
147          if (OldI2MI[LI->end / InstrSlots::NUM])
148            LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
149          else {
150            unsigned i = 0;
151            MachineInstr* newInstr = 0;
152            do {
153              newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
154              i++;
155            } while (!newInstr);
156
157            LI->end = mi2iMap_[newInstr];
158          }
159        } else {
160          LI->end = i2miMap_.size() * InstrSlots::NUM;
161        }
162
163        // Remap the VNInfo def index, which works the same as the
164        // start indices above.
165        VNInfo* vni = LI->valno;
166        offset = vni->def % InstrSlots::NUM;
167        if (OldI2MI[vni->def / InstrSlots::NUM])
168          vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
169        else {
170          unsigned i = 0;
171          MachineInstr* newInstr = 0;
172          do {
173            newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
174            i++;
175          } while (!newInstr);
176
177          if (mi2iMap_[newInstr] ==
178              MBB2IdxMap[newInstr->getParent()->getNumber()].first)
179            vni->def = mi2iMap_[newInstr];
180          else
181            vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
182        }
183
184        // Remap the VNInfo kill indices, which works the same as
185        // the end indices above.
186        for (size_t i = 0; i < vni->kills.size(); ++i) {
187          offset = vni->kills[i] % InstrSlots::NUM;
188          if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
189            vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
190                            offset;
191          else {
192            unsigned e = 0;
193            MachineInstr* newInstr = 0;
194            do {
195              newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
196              e++;
197            } while (!newInstr);
198
199            vni->kills[i] = mi2iMap_[newInstr];
200          }
201        }
202      }
203}
204
205/// runOnMachineFunction - Register allocate the whole function
206///
207bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
208  mf_ = &fn;
209  mri_ = &mf_->getRegInfo();
210  tm_ = &fn.getTarget();
211  tri_ = tm_->getRegisterInfo();
212  tii_ = tm_->getInstrInfo();
213  lv_ = &getAnalysis<LiveVariables>();
214  allocatableRegs_ = tri_->getAllocatableSet(fn);
215
216  computeNumbering();
217  computeIntervals();
218
219  numIntervals += getNumIntervals();
220
221  DOUT << "********** INTERVALS **********\n";
222  for (iterator I = begin(), E = end(); I != E; ++I) {
223    I->second.print(DOUT, tri_);
224    DOUT << "\n";
225  }
226
227  numIntervalsAfter += getNumIntervals();
228  DEBUG(dump());
229  return true;
230}
231
232/// print - Implement the dump method.
233void LiveIntervals::print(std::ostream &O, const Module* ) const {
234  O << "********** INTERVALS **********\n";
235  for (const_iterator I = begin(), E = end(); I != E; ++I) {
236    I->second.print(O, tri_);
237    O << "\n";
238  }
239
240  O << "********** MACHINEINSTRS **********\n";
241  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
242       mbbi != mbbe; ++mbbi) {
243    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
244    for (MachineBasicBlock::iterator mii = mbbi->begin(),
245           mie = mbbi->end(); mii != mie; ++mii) {
246      O << getInstructionIndex(mii) << '\t' << *mii;
247    }
248  }
249}
250
251/// conflictsWithPhysRegDef - Returns true if the specified register
252/// is defined during the duration of the specified interval.
253bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
254                                            VirtRegMap &vrm, unsigned reg) {
255  for (LiveInterval::Ranges::const_iterator
256         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
257    for (unsigned index = getBaseIndex(I->start),
258           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
259         index += InstrSlots::NUM) {
260      // skip deleted instructions
261      while (index != end && !getInstructionFromIndex(index))
262        index += InstrSlots::NUM;
263      if (index == end) break;
264
265      MachineInstr *MI = getInstructionFromIndex(index);
266      unsigned SrcReg, DstReg;
267      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
268        if (SrcReg == li.reg || DstReg == li.reg)
269          continue;
270      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
271        MachineOperand& mop = MI->getOperand(i);
272        if (!mop.isRegister())
273          continue;
274        unsigned PhysReg = mop.getReg();
275        if (PhysReg == 0 || PhysReg == li.reg)
276          continue;
277        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
278          if (!vrm.hasPhys(PhysReg))
279            continue;
280          PhysReg = vrm.getPhys(PhysReg);
281        }
282        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
283          return true;
284      }
285    }
286  }
287
288  return false;
289}
290
291void LiveIntervals::printRegName(unsigned reg) const {
292  if (TargetRegisterInfo::isPhysicalRegister(reg))
293    cerr << tri_->getName(reg);
294  else
295    cerr << "%reg" << reg;
296}
297
298void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
299                                             MachineBasicBlock::iterator mi,
300                                             unsigned MIIdx, MachineOperand& MO,
301                                             unsigned MOIdx,
302                                             LiveInterval &interval) {
303  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
304  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
305
306  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
307    DOUT << "is a implicit_def\n";
308    return;
309  }
310
311  // Virtual registers may be defined multiple times (due to phi
312  // elimination and 2-addr elimination).  Much of what we do only has to be
313  // done once for the vreg.  We use an empty interval to detect the first
314  // time we see a vreg.
315  if (interval.empty()) {
316    // Get the Idx of the defining instructions.
317    unsigned defIndex = getDefIndex(MIIdx);
318    VNInfo *ValNo;
319    MachineInstr *CopyMI = NULL;
320    unsigned SrcReg, DstReg;
321    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
322        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
323        tii_->isMoveInstr(*mi, SrcReg, DstReg))
324      CopyMI = mi;
325    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
326
327    assert(ValNo->id == 0 && "First value in interval is not 0?");
328
329    // Loop over all of the blocks that the vreg is defined in.  There are
330    // two cases we have to handle here.  The most common case is a vreg
331    // whose lifetime is contained within a basic block.  In this case there
332    // will be a single kill, in MBB, which comes after the definition.
333    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334      // FIXME: what about dead vars?
335      unsigned killIdx;
336      if (vi.Kills[0] != mi)
337        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
338      else
339        killIdx = defIndex+1;
340
341      // If the kill happens after the definition, we have an intra-block
342      // live range.
343      if (killIdx > defIndex) {
344        assert(vi.AliveBlocks.none() &&
345               "Shouldn't be alive across any blocks!");
346        LiveRange LR(defIndex, killIdx, ValNo);
347        interval.addRange(LR);
348        DOUT << " +" << LR << "\n";
349        interval.addKill(ValNo, killIdx);
350        return;
351      }
352    }
353
354    // The other case we handle is when a virtual register lives to the end
355    // of the defining block, potentially live across some blocks, then is
356    // live into some number of blocks, but gets killed.  Start by adding a
357    // range that goes from this definition to the end of the defining block.
358    LiveRange NewLR(defIndex,
359                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
360                    ValNo);
361    DOUT << " +" << NewLR;
362    interval.addRange(NewLR);
363
364    // Iterate over all of the blocks that the variable is completely
365    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
366    // live interval.
367    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
368      if (vi.AliveBlocks[i]) {
369        LiveRange LR(getMBBStartIdx(i),
370                     getMBBEndIdx(i)+1,  // MBB ends at -1.
371                     ValNo);
372        interval.addRange(LR);
373        DOUT << " +" << LR;
374      }
375    }
376
377    // Finally, this virtual register is live from the start of any killing
378    // block to the 'use' slot of the killing instruction.
379    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
380      MachineInstr *Kill = vi.Kills[i];
381      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
382      LiveRange LR(getMBBStartIdx(Kill->getParent()),
383                   killIdx, ValNo);
384      interval.addRange(LR);
385      interval.addKill(ValNo, killIdx);
386      DOUT << " +" << LR;
387    }
388
389  } else {
390    // If this is the second time we see a virtual register definition, it
391    // must be due to phi elimination or two addr elimination.  If this is
392    // the result of two address elimination, then the vreg is one of the
393    // def-and-use register operand.
394    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
395      // If this is a two-address definition, then we have already processed
396      // the live range.  The only problem is that we didn't realize there
397      // are actually two values in the live interval.  Because of this we
398      // need to take the LiveRegion that defines this register and split it
399      // into two values.
400      assert(interval.containsOneValue());
401      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
402      unsigned RedefIndex = getDefIndex(MIIdx);
403
404      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
405      VNInfo *OldValNo = OldLR->valno;
406
407      // Delete the initial value, which should be short and continuous,
408      // because the 2-addr copy must be in the same MBB as the redef.
409      interval.removeRange(DefIndex, RedefIndex);
410
411      // Two-address vregs should always only be redefined once.  This means
412      // that at this point, there should be exactly one value number in it.
413      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
414
415      // The new value number (#1) is defined by the instruction we claimed
416      // defined value #0.
417      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
418                                            VNInfoAllocator);
419
420      // Value#0 is now defined by the 2-addr instruction.
421      OldValNo->def  = RedefIndex;
422      OldValNo->copy = 0;
423
424      // Add the new live interval which replaces the range for the input copy.
425      LiveRange LR(DefIndex, RedefIndex, ValNo);
426      DOUT << " replace range with " << LR;
427      interval.addRange(LR);
428      interval.addKill(ValNo, RedefIndex);
429
430      // If this redefinition is dead, we need to add a dummy unit live
431      // range covering the def slot.
432      if (MO.isDead())
433        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
434
435      DOUT << " RESULT: ";
436      interval.print(DOUT, tri_);
437
438    } else {
439      // Otherwise, this must be because of phi elimination.  If this is the
440      // first redefinition of the vreg that we have seen, go back and change
441      // the live range in the PHI block to be a different value number.
442      if (interval.containsOneValue()) {
443        assert(vi.Kills.size() == 1 &&
444               "PHI elimination vreg should have one kill, the PHI itself!");
445
446        // Remove the old range that we now know has an incorrect number.
447        VNInfo *VNI = interval.getValNumInfo(0);
448        MachineInstr *Killer = vi.Kills[0];
449        unsigned Start = getMBBStartIdx(Killer->getParent());
450        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
451        DOUT << " Removing [" << Start << "," << End << "] from: ";
452        interval.print(DOUT, tri_); DOUT << "\n";
453        interval.removeRange(Start, End);
454        VNI->hasPHIKill = true;
455        DOUT << " RESULT: "; interval.print(DOUT, tri_);
456
457        // Replace the interval with one of a NEW value number.  Note that this
458        // value number isn't actually defined by an instruction, weird huh? :)
459        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
460        DOUT << " replace range with " << LR;
461        interval.addRange(LR);
462        interval.addKill(LR.valno, End);
463        DOUT << " RESULT: "; interval.print(DOUT, tri_);
464      }
465
466      // In the case of PHI elimination, each variable definition is only
467      // live until the end of the block.  We've already taken care of the
468      // rest of the live range.
469      unsigned defIndex = getDefIndex(MIIdx);
470
471      VNInfo *ValNo;
472      MachineInstr *CopyMI = NULL;
473      unsigned SrcReg, DstReg;
474      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
475          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
476          tii_->isMoveInstr(*mi, SrcReg, DstReg))
477        CopyMI = mi;
478      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
479
480      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
481      LiveRange LR(defIndex, killIndex, ValNo);
482      interval.addRange(LR);
483      interval.addKill(ValNo, killIndex);
484      ValNo->hasPHIKill = true;
485      DOUT << " +" << LR;
486    }
487  }
488
489  DOUT << '\n';
490}
491
492void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
493                                              MachineBasicBlock::iterator mi,
494                                              unsigned MIIdx,
495                                              MachineOperand& MO,
496                                              LiveInterval &interval,
497                                              MachineInstr *CopyMI) {
498  // A physical register cannot be live across basic block, so its
499  // lifetime must end somewhere in its defining basic block.
500  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
501
502  unsigned baseIndex = MIIdx;
503  unsigned start = getDefIndex(baseIndex);
504  unsigned end = start;
505
506  // If it is not used after definition, it is considered dead at
507  // the instruction defining it. Hence its interval is:
508  // [defSlot(def), defSlot(def)+1)
509  if (MO.isDead()) {
510    DOUT << " dead";
511    end = getDefIndex(start) + 1;
512    goto exit;
513  }
514
515  // If it is not dead on definition, it must be killed by a
516  // subsequent instruction. Hence its interval is:
517  // [defSlot(def), useSlot(kill)+1)
518  while (++mi != MBB->end()) {
519    baseIndex += InstrSlots::NUM;
520    if (mi->killsRegister(interval.reg, tri_)) {
521      DOUT << " killed";
522      end = getUseIndex(baseIndex) + 1;
523      goto exit;
524    } else if (mi->modifiesRegister(interval.reg, tri_)) {
525      // Another instruction redefines the register before it is ever read.
526      // Then the register is essentially dead at the instruction that defines
527      // it. Hence its interval is:
528      // [defSlot(def), defSlot(def)+1)
529      DOUT << " dead";
530      end = getDefIndex(start) + 1;
531      goto exit;
532    }
533  }
534
535  // The only case we should have a dead physreg here without a killing or
536  // instruction where we know it's dead is if it is live-in to the function
537  // and never used.
538  assert(!CopyMI && "physreg was not killed in defining block!");
539  end = getDefIndex(start) + 1;  // It's dead.
540
541exit:
542  assert(start < end && "did not find end of interval?");
543
544  // Already exists? Extend old live interval.
545  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
546  VNInfo *ValNo = (OldLR != interval.end())
547    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
548  LiveRange LR(start, end, ValNo);
549  interval.addRange(LR);
550  interval.addKill(LR.valno, end);
551  DOUT << " +" << LR << '\n';
552}
553
554void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
555                                      MachineBasicBlock::iterator MI,
556                                      unsigned MIIdx,
557                                      MachineOperand& MO,
558                                      unsigned MOIdx) {
559  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
560    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
561                             getOrCreateInterval(MO.getReg()));
562  else if (allocatableRegs_[MO.getReg()]) {
563    MachineInstr *CopyMI = NULL;
564    unsigned SrcReg, DstReg;
565    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
566        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
567        tii_->isMoveInstr(*MI, SrcReg, DstReg))
568      CopyMI = MI;
569    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
570                              getOrCreateInterval(MO.getReg()), CopyMI);
571    // Def of a register also defines its sub-registers.
572    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
573      // If MI also modifies the sub-register explicitly, avoid processing it
574      // more than once. Do not pass in TRI here so it checks for exact match.
575      if (!MI->modifiesRegister(*AS))
576        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
577                                  getOrCreateInterval(*AS), 0);
578  }
579}
580
581void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
582                                         unsigned MIIdx,
583                                         LiveInterval &interval, bool isAlias) {
584  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
585
586  // Look for kills, if it reaches a def before it's killed, then it shouldn't
587  // be considered a livein.
588  MachineBasicBlock::iterator mi = MBB->begin();
589  unsigned baseIndex = MIIdx;
590  unsigned start = baseIndex;
591  unsigned end = start;
592  while (mi != MBB->end()) {
593    if (mi->killsRegister(interval.reg, tri_)) {
594      DOUT << " killed";
595      end = getUseIndex(baseIndex) + 1;
596      goto exit;
597    } else if (mi->modifiesRegister(interval.reg, tri_)) {
598      // Another instruction redefines the register before it is ever read.
599      // Then the register is essentially dead at the instruction that defines
600      // it. Hence its interval is:
601      // [defSlot(def), defSlot(def)+1)
602      DOUT << " dead";
603      end = getDefIndex(start) + 1;
604      goto exit;
605    }
606
607    baseIndex += InstrSlots::NUM;
608    ++mi;
609  }
610
611exit:
612  // Live-in register might not be used at all.
613  if (end == MIIdx) {
614    if (isAlias) {
615      DOUT << " dead";
616      end = getDefIndex(MIIdx) + 1;
617    } else {
618      DOUT << " live through";
619      end = baseIndex;
620    }
621  }
622
623  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
624  interval.addRange(LR);
625  interval.addKill(LR.valno, end);
626  DOUT << " +" << LR << '\n';
627}
628
629/// computeIntervals - computes the live intervals for virtual
630/// registers. for some ordering of the machine instructions [1,N] a
631/// live interval is an interval [i, j) where 1 <= i <= j < N for
632/// which a variable is live
633void LiveIntervals::computeIntervals() {
634  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
635       << "********** Function: "
636       << ((Value*)mf_->getFunction())->getName() << '\n';
637  // Track the index of the current machine instr.
638  unsigned MIIndex = 0;
639  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
640       MBBI != E; ++MBBI) {
641    MachineBasicBlock *MBB = MBBI;
642    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
643
644    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
645
646    // Create intervals for live-ins to this BB first.
647    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
648           LE = MBB->livein_end(); LI != LE; ++LI) {
649      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
650      // Multiple live-ins can alias the same register.
651      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
652        if (!hasInterval(*AS))
653          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
654                               true);
655    }
656
657    for (; MI != miEnd; ++MI) {
658      DOUT << MIIndex << "\t" << *MI;
659
660      // Handle defs.
661      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
662        MachineOperand &MO = MI->getOperand(i);
663        // handle register defs - build intervals
664        if (MO.isRegister() && MO.getReg() && MO.isDef())
665          handleRegisterDef(MBB, MI, MIIndex, MO, i);
666      }
667
668      MIIndex += InstrSlots::NUM;
669    }
670
671    if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
672  }
673}
674
675bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
676                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
677  std::vector<IdxMBBPair>::const_iterator I =
678    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
679
680  bool ResVal = false;
681  while (I != Idx2MBBMap.end()) {
682    if (LR.end <= I->first)
683      break;
684    MBBs.push_back(I->second);
685    ResVal = true;
686    ++I;
687  }
688  return ResVal;
689}
690
691
692LiveInterval LiveIntervals::createInterval(unsigned reg) {
693  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
694                       HUGE_VALF : 0.0F;
695  return LiveInterval(reg, Weight);
696}
697
698/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
699/// copy field and returns the source register that defines it.
700unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
701  if (!VNI->copy)
702    return 0;
703
704  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
705    return VNI->copy->getOperand(1).getReg();
706  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
707    return VNI->copy->getOperand(2).getReg();
708  unsigned SrcReg, DstReg;
709  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
710    return SrcReg;
711  assert(0 && "Unrecognized copy instruction!");
712  return 0;
713}
714
715//===----------------------------------------------------------------------===//
716// Register allocator hooks.
717//
718
719/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
720/// allow one) virtual register operand, then its uses are implicitly using
721/// the register. Returns the virtual register.
722unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
723                                            MachineInstr *MI) const {
724  unsigned RegOp = 0;
725  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
726    MachineOperand &MO = MI->getOperand(i);
727    if (!MO.isRegister() || !MO.isUse())
728      continue;
729    unsigned Reg = MO.getReg();
730    if (Reg == 0 || Reg == li.reg)
731      continue;
732    // FIXME: For now, only remat MI with at most one register operand.
733    assert(!RegOp &&
734           "Can't rematerialize instruction with multiple register operand!");
735    RegOp = MO.getReg();
736    break;
737  }
738  return RegOp;
739}
740
741/// isValNoAvailableAt - Return true if the val# of the specified interval
742/// which reaches the given instruction also reaches the specified use index.
743bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
744                                       unsigned UseIdx) const {
745  unsigned Index = getInstructionIndex(MI);
746  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
747  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
748  return UI != li.end() && UI->valno == ValNo;
749}
750
751/// isReMaterializable - Returns true if the definition MI of the specified
752/// val# of the specified interval is re-materializable.
753bool LiveIntervals::isReMaterializable(const LiveInterval &li,
754                                       const VNInfo *ValNo, MachineInstr *MI,
755                                       bool &isLoad) {
756  if (DisableReMat)
757    return false;
758
759  isLoad = false;
760  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
761    return true;
762
763  int FrameIdx = 0;
764  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
765      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
766    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
767    // this but remember this is not safe to fold into a two-address
768    // instruction.
769    // This is a load from fixed stack slot. It can be rematerialized.
770    return true;
771
772  if (tii_->isTriviallyReMaterializable(MI)) {
773    const TargetInstrDesc &TID = MI->getDesc();
774    isLoad = TID.isSimpleLoad();
775
776    unsigned ImpUse = getReMatImplicitUse(li, MI);
777    if (ImpUse) {
778      const LiveInterval &ImpLi = getInterval(ImpUse);
779      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
780             re = mri_->use_end(); ri != re; ++ri) {
781        MachineInstr *UseMI = &*ri;
782        unsigned UseIdx = getInstructionIndex(UseMI);
783        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
784          continue;
785        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
786          return false;
787      }
788    }
789    return true;
790  }
791
792  return false;
793}
794
795/// isReMaterializable - Returns true if every definition of MI of every
796/// val# of the specified interval is re-materializable.
797bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
798  isLoad = false;
799  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
800       i != e; ++i) {
801    const VNInfo *VNI = *i;
802    unsigned DefIdx = VNI->def;
803    if (DefIdx == ~1U)
804      continue; // Dead val#.
805    // Is the def for the val# rematerializable?
806    if (DefIdx == ~0u)
807      return false;
808    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
809    bool DefIsLoad = false;
810    if (!ReMatDefMI ||
811        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
812      return false;
813    isLoad |= DefIsLoad;
814  }
815  return true;
816}
817
818/// FilterFoldedOps - Filter out two-address use operands. Return
819/// true if it finds any issue with the operands that ought to prevent
820/// folding.
821static bool FilterFoldedOps(MachineInstr *MI,
822                            SmallVector<unsigned, 2> &Ops,
823                            unsigned &MRInfo,
824                            SmallVector<unsigned, 2> &FoldOps) {
825  const TargetInstrDesc &TID = MI->getDesc();
826
827  MRInfo = 0;
828  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
829    unsigned OpIdx = Ops[i];
830    MachineOperand &MO = MI->getOperand(OpIdx);
831    // FIXME: fold subreg use.
832    if (MO.getSubReg())
833      return true;
834    if (MO.isDef())
835      MRInfo |= (unsigned)VirtRegMap::isMod;
836    else {
837      // Filter out two-address use operand(s).
838      if (!MO.isImplicit() &&
839          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
840        MRInfo = VirtRegMap::isModRef;
841        continue;
842      }
843      MRInfo |= (unsigned)VirtRegMap::isRef;
844    }
845    FoldOps.push_back(OpIdx);
846  }
847  return false;
848}
849
850
851/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
852/// slot / to reg or any rematerialized load into ith operand of specified
853/// MI. If it is successul, MI is updated with the newly created MI and
854/// returns true.
855bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
856                                         VirtRegMap &vrm, MachineInstr *DefMI,
857                                         unsigned InstrIdx,
858                                         SmallVector<unsigned, 2> &Ops,
859                                         bool isSS, int Slot, unsigned Reg) {
860  // If it is an implicit def instruction, just delete it.
861  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
862    RemoveMachineInstrFromMaps(MI);
863    vrm.RemoveMachineInstrFromMaps(MI);
864    MI->eraseFromParent();
865    ++numFolds;
866    return true;
867  }
868
869  // Filter the list of operand indexes that are to be folded. Abort if
870  // any operand will prevent folding.
871  unsigned MRInfo = 0;
872  SmallVector<unsigned, 2> FoldOps;
873  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
874    return false;
875
876  // The only time it's safe to fold into a two address instruction is when
877  // it's folding reload and spill from / into a spill stack slot.
878  if (DefMI && (MRInfo & VirtRegMap::isMod))
879    return false;
880
881  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
882                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
883  if (fmi) {
884    // Remember this instruction uses the spill slot.
885    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
886
887    // Attempt to fold the memory reference into the instruction. If
888    // we can do this, we don't need to insert spill code.
889    MachineBasicBlock &MBB = *MI->getParent();
890    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
891      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
892    vrm.transferSpillPts(MI, fmi);
893    vrm.transferRestorePts(MI, fmi);
894    vrm.transferEmergencySpills(MI, fmi);
895    mi2iMap_.erase(MI);
896    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
897    mi2iMap_[fmi] = InstrIdx;
898    MI = MBB.insert(MBB.erase(MI), fmi);
899    ++numFolds;
900    return true;
901  }
902  return false;
903}
904
905/// canFoldMemoryOperand - Returns true if the specified load / store
906/// folding is possible.
907bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
908                                         SmallVector<unsigned, 2> &Ops,
909                                         bool ReMat) const {
910  // Filter the list of operand indexes that are to be folded. Abort if
911  // any operand will prevent folding.
912  unsigned MRInfo = 0;
913  SmallVector<unsigned, 2> FoldOps;
914  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
915    return false;
916
917  // It's only legal to remat for a use, not a def.
918  if (ReMat && (MRInfo & VirtRegMap::isMod))
919    return false;
920
921  return tii_->canFoldMemoryOperand(MI, FoldOps);
922}
923
924bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
925  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
926  for (LiveInterval::Ranges::const_iterator
927         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
928    std::vector<IdxMBBPair>::const_iterator II =
929      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
930    if (II == Idx2MBBMap.end())
931      continue;
932    if (I->end > II->first)  // crossing a MBB.
933      return false;
934    MBBs.insert(II->second);
935    if (MBBs.size() > 1)
936      return false;
937  }
938  return true;
939}
940
941/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
942/// interval on to-be re-materialized operands of MI) with new register.
943void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
944                                       MachineInstr *MI, unsigned NewVReg,
945                                       VirtRegMap &vrm) {
946  // There is an implicit use. That means one of the other operand is
947  // being remat'ed and the remat'ed instruction has li.reg as an
948  // use operand. Make sure we rewrite that as well.
949  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
950    MachineOperand &MO = MI->getOperand(i);
951    if (!MO.isRegister())
952      continue;
953    unsigned Reg = MO.getReg();
954    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
955      continue;
956    if (!vrm.isReMaterialized(Reg))
957      continue;
958    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
959    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
960    if (UseMO)
961      UseMO->setReg(NewVReg);
962  }
963}
964
965/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
966/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
967bool LiveIntervals::
968rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
969                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
970                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
971                 unsigned Slot, int LdSlot,
972                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
973                 VirtRegMap &vrm,
974                 const TargetRegisterClass* rc,
975                 SmallVector<int, 4> &ReMatIds,
976                 const MachineLoopInfo *loopInfo,
977                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
978                 std::map<unsigned,unsigned> &MBBVRegsMap,
979                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
980  MachineBasicBlock *MBB = MI->getParent();
981  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
982  bool CanFold = false;
983 RestartInstruction:
984  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
985    MachineOperand& mop = MI->getOperand(i);
986    if (!mop.isRegister())
987      continue;
988    unsigned Reg = mop.getReg();
989    unsigned RegI = Reg;
990    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
991      continue;
992    if (Reg != li.reg)
993      continue;
994
995    bool TryFold = !DefIsReMat;
996    bool FoldSS = true; // Default behavior unless it's a remat.
997    int FoldSlot = Slot;
998    if (DefIsReMat) {
999      // If this is the rematerializable definition MI itself and
1000      // all of its uses are rematerialized, simply delete it.
1001      if (MI == ReMatOrigDefMI && CanDelete) {
1002        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1003        DOUT << MI << '\n';
1004        RemoveMachineInstrFromMaps(MI);
1005        vrm.RemoveMachineInstrFromMaps(MI);
1006        MI->eraseFromParent();
1007        break;
1008      }
1009
1010      // If def for this use can't be rematerialized, then try folding.
1011      // If def is rematerializable and it's a load, also try folding.
1012      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1013      if (isLoad) {
1014        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1015        FoldSS = isLoadSS;
1016        FoldSlot = LdSlot;
1017      }
1018    }
1019
1020    // Scan all of the operands of this instruction rewriting operands
1021    // to use NewVReg instead of li.reg as appropriate.  We do this for
1022    // two reasons:
1023    //
1024    //   1. If the instr reads the same spilled vreg multiple times, we
1025    //      want to reuse the NewVReg.
1026    //   2. If the instr is a two-addr instruction, we are required to
1027    //      keep the src/dst regs pinned.
1028    //
1029    // Keep track of whether we replace a use and/or def so that we can
1030    // create the spill interval with the appropriate range.
1031
1032    HasUse = mop.isUse();
1033    HasDef = mop.isDef();
1034    SmallVector<unsigned, 2> Ops;
1035    Ops.push_back(i);
1036    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1037      const MachineOperand &MOj = MI->getOperand(j);
1038      if (!MOj.isRegister())
1039        continue;
1040      unsigned RegJ = MOj.getReg();
1041      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1042        continue;
1043      if (RegJ == RegI) {
1044        Ops.push_back(j);
1045        HasUse |= MOj.isUse();
1046        HasDef |= MOj.isDef();
1047      }
1048    }
1049
1050    if (HasUse && !li.liveAt(getUseIndex(index)))
1051      // Must be defined by an implicit def. It should not be spilled. Note,
1052      // this is for correctness reason. e.g.
1053      // 8   %reg1024<def> = IMPLICIT_DEF
1054      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1055      // The live range [12, 14) are not part of the r1024 live interval since
1056      // it's defined by an implicit def. It will not conflicts with live
1057      // interval of r1025. Now suppose both registers are spilled, you can
1058      // easier see a situation where both registers are reloaded before
1059      // the INSERT_SUBREG and both target registers that would overlap.
1060      HasUse = false;
1061
1062    // Update stack slot spill weight if we are splitting.
1063    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1064      if (!TrySplit)
1065      SSWeight += Weight;
1066
1067    if (!TryFold)
1068      CanFold = false;
1069    else {
1070      // Do not fold load / store here if we are splitting. We'll find an
1071      // optimal point to insert a load / store later.
1072      if (!TrySplit) {
1073        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1074                                 Ops, FoldSS, FoldSlot, Reg)) {
1075          // Folding the load/store can completely change the instruction in
1076          // unpredictable ways, rescan it from the beginning.
1077          HasUse = false;
1078          HasDef = false;
1079          CanFold = false;
1080          if (isRemoved(MI)) {
1081            SSWeight -= Weight;
1082            break;
1083          }
1084          goto RestartInstruction;
1085        }
1086      } else {
1087        // We'll try to fold it later if it's profitable.
1088        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1089      }
1090    }
1091
1092    // Create a new virtual register for the spill interval.
1093    bool CreatedNewVReg = false;
1094    if (NewVReg == 0) {
1095      NewVReg = mri_->createVirtualRegister(rc);
1096      vrm.grow();
1097      CreatedNewVReg = true;
1098    }
1099    mop.setReg(NewVReg);
1100    if (mop.isImplicit())
1101      rewriteImplicitOps(li, MI, NewVReg, vrm);
1102
1103    // Reuse NewVReg for other reads.
1104    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1105      MachineOperand &mopj = MI->getOperand(Ops[j]);
1106      mopj.setReg(NewVReg);
1107      if (mopj.isImplicit())
1108        rewriteImplicitOps(li, MI, NewVReg, vrm);
1109    }
1110
1111    if (CreatedNewVReg) {
1112      if (DefIsReMat) {
1113        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1114        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1115          // Each valnum may have its own remat id.
1116          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1117        } else {
1118          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1119        }
1120        if (!CanDelete || (HasUse && HasDef)) {
1121          // If this is a two-addr instruction then its use operands are
1122          // rematerializable but its def is not. It should be assigned a
1123          // stack slot.
1124          vrm.assignVirt2StackSlot(NewVReg, Slot);
1125        }
1126      } else {
1127        vrm.assignVirt2StackSlot(NewVReg, Slot);
1128      }
1129    } else if (HasUse && HasDef &&
1130               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1131      // If this interval hasn't been assigned a stack slot (because earlier
1132      // def is a deleted remat def), do it now.
1133      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1134      vrm.assignVirt2StackSlot(NewVReg, Slot);
1135    }
1136
1137    // Re-matting an instruction with virtual register use. Add the
1138    // register as an implicit use on the use MI.
1139    if (DefIsReMat && ImpUse)
1140      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1141
1142    // create a new register interval for this spill / remat.
1143    LiveInterval &nI = getOrCreateInterval(NewVReg);
1144    if (CreatedNewVReg) {
1145      NewLIs.push_back(&nI);
1146      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1147      if (TrySplit)
1148        vrm.setIsSplitFromReg(NewVReg, li.reg);
1149    }
1150
1151    if (HasUse) {
1152      if (CreatedNewVReg) {
1153        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1154                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1155        DOUT << " +" << LR;
1156        nI.addRange(LR);
1157      } else {
1158        // Extend the split live interval to this def / use.
1159        unsigned End = getUseIndex(index)+1;
1160        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1161                     nI.getValNumInfo(nI.getNumValNums()-1));
1162        DOUT << " +" << LR;
1163        nI.addRange(LR);
1164      }
1165    }
1166    if (HasDef) {
1167      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1168                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1169      DOUT << " +" << LR;
1170      nI.addRange(LR);
1171    }
1172
1173    DOUT << "\t\t\t\tAdded new interval: ";
1174    nI.print(DOUT, tri_);
1175    DOUT << '\n';
1176  }
1177  return CanFold;
1178}
1179bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1180                                   const VNInfo *VNI,
1181                                   MachineBasicBlock *MBB, unsigned Idx) const {
1182  unsigned End = getMBBEndIdx(MBB);
1183  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1184    unsigned KillIdx = VNI->kills[j];
1185    if (KillIdx > Idx && KillIdx < End)
1186      return true;
1187  }
1188  return false;
1189}
1190
1191/// RewriteInfo - Keep track of machine instrs that will be rewritten
1192/// during spilling.
1193namespace {
1194  struct RewriteInfo {
1195    unsigned Index;
1196    MachineInstr *MI;
1197    bool HasUse;
1198    bool HasDef;
1199    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1200      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1201  };
1202
1203  struct RewriteInfoCompare {
1204    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1205      return LHS.Index < RHS.Index;
1206    }
1207  };
1208}
1209
1210void LiveIntervals::
1211rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1212                    LiveInterval::Ranges::const_iterator &I,
1213                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1214                    unsigned Slot, int LdSlot,
1215                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1216                    VirtRegMap &vrm,
1217                    const TargetRegisterClass* rc,
1218                    SmallVector<int, 4> &ReMatIds,
1219                    const MachineLoopInfo *loopInfo,
1220                    BitVector &SpillMBBs,
1221                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1222                    BitVector &RestoreMBBs,
1223                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1224                    std::map<unsigned,unsigned> &MBBVRegsMap,
1225                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1226  bool AllCanFold = true;
1227  unsigned NewVReg = 0;
1228  unsigned start = getBaseIndex(I->start);
1229  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1230
1231  // First collect all the def / use in this live range that will be rewritten.
1232  // Make sure they are sorted according to instruction index.
1233  std::vector<RewriteInfo> RewriteMIs;
1234  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1235         re = mri_->reg_end(); ri != re; ) {
1236    MachineInstr *MI = &*ri;
1237    MachineOperand &O = ri.getOperand();
1238    ++ri;
1239    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1240    unsigned index = getInstructionIndex(MI);
1241    if (index < start || index >= end)
1242      continue;
1243    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1244      // Must be defined by an implicit def. It should not be spilled. Note,
1245      // this is for correctness reason. e.g.
1246      // 8   %reg1024<def> = IMPLICIT_DEF
1247      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1248      // The live range [12, 14) are not part of the r1024 live interval since
1249      // it's defined by an implicit def. It will not conflicts with live
1250      // interval of r1025. Now suppose both registers are spilled, you can
1251      // easier see a situation where both registers are reloaded before
1252      // the INSERT_SUBREG and both target registers that would overlap.
1253      continue;
1254    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1255  }
1256  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1257
1258  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1259  // Now rewrite the defs and uses.
1260  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1261    RewriteInfo &rwi = RewriteMIs[i];
1262    ++i;
1263    unsigned index = rwi.Index;
1264    bool MIHasUse = rwi.HasUse;
1265    bool MIHasDef = rwi.HasDef;
1266    MachineInstr *MI = rwi.MI;
1267    // If MI def and/or use the same register multiple times, then there
1268    // are multiple entries.
1269    unsigned NumUses = MIHasUse;
1270    while (i != e && RewriteMIs[i].MI == MI) {
1271      assert(RewriteMIs[i].Index == index);
1272      bool isUse = RewriteMIs[i].HasUse;
1273      if (isUse) ++NumUses;
1274      MIHasUse |= isUse;
1275      MIHasDef |= RewriteMIs[i].HasDef;
1276      ++i;
1277    }
1278    MachineBasicBlock *MBB = MI->getParent();
1279
1280    if (ImpUse && MI != ReMatDefMI) {
1281      // Re-matting an instruction with virtual register use. Update the
1282      // register interval's spill weight to HUGE_VALF to prevent it from
1283      // being spilled.
1284      LiveInterval &ImpLi = getInterval(ImpUse);
1285      ImpLi.weight = HUGE_VALF;
1286    }
1287
1288    unsigned MBBId = MBB->getNumber();
1289    unsigned ThisVReg = 0;
1290    if (TrySplit) {
1291      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1292      if (NVI != MBBVRegsMap.end()) {
1293        ThisVReg = NVI->second;
1294        // One common case:
1295        // x = use
1296        // ...
1297        // ...
1298        // def = ...
1299        //     = use
1300        // It's better to start a new interval to avoid artifically
1301        // extend the new interval.
1302        if (MIHasDef && !MIHasUse) {
1303          MBBVRegsMap.erase(MBB->getNumber());
1304          ThisVReg = 0;
1305        }
1306      }
1307    }
1308
1309    bool IsNew = ThisVReg == 0;
1310    if (IsNew) {
1311      // This ends the previous live interval. If all of its def / use
1312      // can be folded, give it a low spill weight.
1313      if (NewVReg && TrySplit && AllCanFold) {
1314        LiveInterval &nI = getOrCreateInterval(NewVReg);
1315        nI.weight /= 10.0F;
1316      }
1317      AllCanFold = true;
1318    }
1319    NewVReg = ThisVReg;
1320
1321    bool HasDef = false;
1322    bool HasUse = false;
1323    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1324                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1325                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1326                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1327                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1328    if (!HasDef && !HasUse)
1329      continue;
1330
1331    AllCanFold &= CanFold;
1332
1333    // Update weight of spill interval.
1334    LiveInterval &nI = getOrCreateInterval(NewVReg);
1335    if (!TrySplit) {
1336      // The spill weight is now infinity as it cannot be spilled again.
1337      nI.weight = HUGE_VALF;
1338      continue;
1339    }
1340
1341    // Keep track of the last def and first use in each MBB.
1342    if (HasDef) {
1343      if (MI != ReMatOrigDefMI || !CanDelete) {
1344        bool HasKill = false;
1345        if (!HasUse)
1346          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1347        else {
1348          // If this is a two-address code, then this index starts a new VNInfo.
1349          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1350          if (VNI)
1351            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1352        }
1353        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1354          SpillIdxes.find(MBBId);
1355        if (!HasKill) {
1356          if (SII == SpillIdxes.end()) {
1357            std::vector<SRInfo> S;
1358            S.push_back(SRInfo(index, NewVReg, true));
1359            SpillIdxes.insert(std::make_pair(MBBId, S));
1360          } else if (SII->second.back().vreg != NewVReg) {
1361            SII->second.push_back(SRInfo(index, NewVReg, true));
1362          } else if ((int)index > SII->second.back().index) {
1363            // If there is an earlier def and this is a two-address
1364            // instruction, then it's not possible to fold the store (which
1365            // would also fold the load).
1366            SRInfo &Info = SII->second.back();
1367            Info.index = index;
1368            Info.canFold = !HasUse;
1369          }
1370          SpillMBBs.set(MBBId);
1371        } else if (SII != SpillIdxes.end() &&
1372                   SII->second.back().vreg == NewVReg &&
1373                   (int)index > SII->second.back().index) {
1374          // There is an earlier def that's not killed (must be two-address).
1375          // The spill is no longer needed.
1376          SII->second.pop_back();
1377          if (SII->second.empty()) {
1378            SpillIdxes.erase(MBBId);
1379            SpillMBBs.reset(MBBId);
1380          }
1381        }
1382      }
1383    }
1384
1385    if (HasUse) {
1386      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1387        SpillIdxes.find(MBBId);
1388      if (SII != SpillIdxes.end() &&
1389          SII->second.back().vreg == NewVReg &&
1390          (int)index > SII->second.back().index)
1391        // Use(s) following the last def, it's not safe to fold the spill.
1392        SII->second.back().canFold = false;
1393      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1394        RestoreIdxes.find(MBBId);
1395      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1396        // If we are splitting live intervals, only fold if it's the first
1397        // use and there isn't another use later in the MBB.
1398        RII->second.back().canFold = false;
1399      else if (IsNew) {
1400        // Only need a reload if there isn't an earlier def / use.
1401        if (RII == RestoreIdxes.end()) {
1402          std::vector<SRInfo> Infos;
1403          Infos.push_back(SRInfo(index, NewVReg, true));
1404          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1405        } else {
1406          RII->second.push_back(SRInfo(index, NewVReg, true));
1407        }
1408        RestoreMBBs.set(MBBId);
1409      }
1410    }
1411
1412    // Update spill weight.
1413    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1414    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1415  }
1416
1417  if (NewVReg && TrySplit && AllCanFold) {
1418    // If all of its def / use can be folded, give it a low spill weight.
1419    LiveInterval &nI = getOrCreateInterval(NewVReg);
1420    nI.weight /= 10.0F;
1421  }
1422}
1423
1424bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1425                        BitVector &RestoreMBBs,
1426                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1427  if (!RestoreMBBs[Id])
1428    return false;
1429  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1430  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1431    if (Restores[i].index == index &&
1432        Restores[i].vreg == vr &&
1433        Restores[i].canFold)
1434      return true;
1435  return false;
1436}
1437
1438void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1439                        BitVector &RestoreMBBs,
1440                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1441  if (!RestoreMBBs[Id])
1442    return;
1443  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1444  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1445    if (Restores[i].index == index && Restores[i].vreg)
1446      Restores[i].index = -1;
1447}
1448
1449/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1450/// spilled and create empty intervals for their uses.
1451void
1452LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1453                                    const TargetRegisterClass* rc,
1454                                    std::vector<LiveInterval*> &NewLIs) {
1455  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1456         re = mri_->reg_end(); ri != re; ) {
1457    MachineOperand &O = ri.getOperand();
1458    MachineInstr *MI = &*ri;
1459    ++ri;
1460    if (O.isDef()) {
1461      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1462             "Register def was not rewritten?");
1463      RemoveMachineInstrFromMaps(MI);
1464      vrm.RemoveMachineInstrFromMaps(MI);
1465      MI->eraseFromParent();
1466    } else {
1467      // This must be an use of an implicit_def so it's not part of the live
1468      // interval. Create a new empty live interval for it.
1469      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1470      unsigned NewVReg = mri_->createVirtualRegister(rc);
1471      vrm.grow();
1472      vrm.setIsImplicitlyDefined(NewVReg);
1473      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1474      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1475        MachineOperand &MO = MI->getOperand(i);
1476        if (MO.isReg() && MO.getReg() == li.reg)
1477          MO.setReg(NewVReg);
1478      }
1479    }
1480  }
1481}
1482
1483
1484std::vector<LiveInterval*> LiveIntervals::
1485addIntervalsForSpills(const LiveInterval &li,
1486                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1487                      float &SSWeight) {
1488  assert(li.weight != HUGE_VALF &&
1489         "attempt to spill already spilled interval!");
1490
1491  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1492  li.print(DOUT, tri_);
1493  DOUT << '\n';
1494
1495  // Spill slot weight.
1496  SSWeight = 0.0f;
1497
1498  // Each bit specify whether it a spill is required in the MBB.
1499  BitVector SpillMBBs(mf_->getNumBlockIDs());
1500  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1501  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1502  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1503  std::map<unsigned,unsigned> MBBVRegsMap;
1504  std::vector<LiveInterval*> NewLIs;
1505  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1506
1507  unsigned NumValNums = li.getNumValNums();
1508  SmallVector<MachineInstr*, 4> ReMatDefs;
1509  ReMatDefs.resize(NumValNums, NULL);
1510  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1511  ReMatOrigDefs.resize(NumValNums, NULL);
1512  SmallVector<int, 4> ReMatIds;
1513  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1514  BitVector ReMatDelete(NumValNums);
1515  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1516
1517  // Spilling a split live interval. It cannot be split any further. Also,
1518  // it's also guaranteed to be a single val# / range interval.
1519  if (vrm.getPreSplitReg(li.reg)) {
1520    vrm.setIsSplitFromReg(li.reg, 0);
1521    // Unset the split kill marker on the last use.
1522    unsigned KillIdx = vrm.getKillPoint(li.reg);
1523    if (KillIdx) {
1524      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1525      assert(KillMI && "Last use disappeared?");
1526      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1527      assert(KillOp != -1 && "Last use disappeared?");
1528      KillMI->getOperand(KillOp).setIsKill(false);
1529    }
1530    vrm.removeKillPoint(li.reg);
1531    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1532    Slot = vrm.getStackSlot(li.reg);
1533    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1534    MachineInstr *ReMatDefMI = DefIsReMat ?
1535      vrm.getReMaterializedMI(li.reg) : NULL;
1536    int LdSlot = 0;
1537    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1538    bool isLoad = isLoadSS ||
1539      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1540    bool IsFirstRange = true;
1541    for (LiveInterval::Ranges::const_iterator
1542           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1543      // If this is a split live interval with multiple ranges, it means there
1544      // are two-address instructions that re-defined the value. Only the
1545      // first def can be rematerialized!
1546      if (IsFirstRange) {
1547        // Note ReMatOrigDefMI has already been deleted.
1548        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1549                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1550                             false, vrm, rc, ReMatIds, loopInfo,
1551                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1552                             MBBVRegsMap, NewLIs, SSWeight);
1553      } else {
1554        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1555                             Slot, 0, false, false, false,
1556                             false, vrm, rc, ReMatIds, loopInfo,
1557                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1558                             MBBVRegsMap, NewLIs, SSWeight);
1559      }
1560      IsFirstRange = false;
1561    }
1562
1563    SSWeight = 0.0f;  // Already accounted for when split.
1564    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1565    return NewLIs;
1566  }
1567
1568  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1569  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1570    TrySplit = false;
1571  if (TrySplit)
1572    ++numSplits;
1573  bool NeedStackSlot = false;
1574  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1575       i != e; ++i) {
1576    const VNInfo *VNI = *i;
1577    unsigned VN = VNI->id;
1578    unsigned DefIdx = VNI->def;
1579    if (DefIdx == ~1U)
1580      continue; // Dead val#.
1581    // Is the def for the val# rematerializable?
1582    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1583      ? 0 : getInstructionFromIndex(DefIdx);
1584    bool dummy;
1585    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1586      // Remember how to remat the def of this val#.
1587      ReMatOrigDefs[VN] = ReMatDefMI;
1588      // Original def may be modified so we have to make a copy here. vrm must
1589      // delete these!
1590      ReMatDefs[VN] = ReMatDefMI = mf_->CloneMachineInstr(ReMatDefMI);
1591
1592      bool CanDelete = true;
1593      if (VNI->hasPHIKill) {
1594        // A kill is a phi node, not all of its uses can be rematerialized.
1595        // It must not be deleted.
1596        CanDelete = false;
1597        // Need a stack slot if there is any live range where uses cannot be
1598        // rematerialized.
1599        NeedStackSlot = true;
1600      }
1601      if (CanDelete)
1602        ReMatDelete.set(VN);
1603    } else {
1604      // Need a stack slot if there is any live range where uses cannot be
1605      // rematerialized.
1606      NeedStackSlot = true;
1607    }
1608  }
1609
1610  // One stack slot per live interval.
1611  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1612    Slot = vrm.assignVirt2StackSlot(li.reg);
1613
1614  // Create new intervals and rewrite defs and uses.
1615  for (LiveInterval::Ranges::const_iterator
1616         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1617    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1618    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1619    bool DefIsReMat = ReMatDefMI != NULL;
1620    bool CanDelete = ReMatDelete[I->valno->id];
1621    int LdSlot = 0;
1622    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1623    bool isLoad = isLoadSS ||
1624      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1625    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1626                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1627                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1628                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1629                               MBBVRegsMap, NewLIs, SSWeight);
1630  }
1631
1632  // Insert spills / restores if we are splitting.
1633  if (!TrySplit) {
1634    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1635    return NewLIs;
1636  }
1637
1638  SmallPtrSet<LiveInterval*, 4> AddedKill;
1639  SmallVector<unsigned, 2> Ops;
1640  if (NeedStackSlot) {
1641    int Id = SpillMBBs.find_first();
1642    while (Id != -1) {
1643      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1644      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1645      std::vector<SRInfo> &spills = SpillIdxes[Id];
1646      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1647        int index = spills[i].index;
1648        unsigned VReg = spills[i].vreg;
1649        LiveInterval &nI = getOrCreateInterval(VReg);
1650        bool isReMat = vrm.isReMaterialized(VReg);
1651        MachineInstr *MI = getInstructionFromIndex(index);
1652        bool CanFold = false;
1653        bool FoundUse = false;
1654        Ops.clear();
1655        if (spills[i].canFold) {
1656          CanFold = true;
1657          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1658            MachineOperand &MO = MI->getOperand(j);
1659            if (!MO.isRegister() || MO.getReg() != VReg)
1660              continue;
1661
1662            Ops.push_back(j);
1663            if (MO.isDef())
1664              continue;
1665            if (isReMat ||
1666                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1667                                                RestoreMBBs, RestoreIdxes))) {
1668              // MI has two-address uses of the same register. If the use
1669              // isn't the first and only use in the BB, then we can't fold
1670              // it. FIXME: Move this to rewriteInstructionsForSpills.
1671              CanFold = false;
1672              break;
1673            }
1674            FoundUse = true;
1675          }
1676        }
1677        // Fold the store into the def if possible.
1678        bool Folded = false;
1679        if (CanFold && !Ops.empty()) {
1680          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1681            Folded = true;
1682            if (FoundUse > 0) {
1683              // Also folded uses, do not issue a load.
1684              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1685              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1686            }
1687            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1688          }
1689        }
1690
1691        // Otherwise tell the spiller to issue a spill.
1692        if (!Folded) {
1693          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1694          bool isKill = LR->end == getStoreIndex(index);
1695          if (!MI->registerDefIsDead(nI.reg))
1696            // No need to spill a dead def.
1697            vrm.addSpillPoint(VReg, isKill, MI);
1698          if (isKill)
1699            AddedKill.insert(&nI);
1700        }
1701
1702        // Update spill slot weight.
1703        if (!isReMat)
1704          SSWeight += getSpillWeight(true, false, loopDepth);
1705      }
1706      Id = SpillMBBs.find_next(Id);
1707    }
1708  }
1709
1710  int Id = RestoreMBBs.find_first();
1711  while (Id != -1) {
1712    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1713    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1714
1715    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1716    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1717      int index = restores[i].index;
1718      if (index == -1)
1719        continue;
1720      unsigned VReg = restores[i].vreg;
1721      LiveInterval &nI = getOrCreateInterval(VReg);
1722      bool isReMat = vrm.isReMaterialized(VReg);
1723      MachineInstr *MI = getInstructionFromIndex(index);
1724      bool CanFold = false;
1725      Ops.clear();
1726      if (restores[i].canFold) {
1727        CanFold = true;
1728        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1729          MachineOperand &MO = MI->getOperand(j);
1730          if (!MO.isRegister() || MO.getReg() != VReg)
1731            continue;
1732
1733          if (MO.isDef()) {
1734            // If this restore were to be folded, it would have been folded
1735            // already.
1736            CanFold = false;
1737            break;
1738          }
1739          Ops.push_back(j);
1740        }
1741      }
1742
1743      // Fold the load into the use if possible.
1744      bool Folded = false;
1745      if (CanFold && !Ops.empty()) {
1746        if (!isReMat)
1747          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1748        else {
1749          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1750          int LdSlot = 0;
1751          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1752          // If the rematerializable def is a load, also try to fold it.
1753          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1754            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1755                                          Ops, isLoadSS, LdSlot, VReg);
1756          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1757          if (ImpUse) {
1758            // Re-matting an instruction with virtual register use. Add the
1759            // register as an implicit use on the use MI and update the register
1760            // interval's spill weight to HUGE_VALF to prevent it from being
1761            // spilled.
1762            LiveInterval &ImpLi = getInterval(ImpUse);
1763            ImpLi.weight = HUGE_VALF;
1764            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1765          }
1766        }
1767      }
1768      // If folding is not possible / failed, then tell the spiller to issue a
1769      // load / rematerialization for us.
1770      if (Folded)
1771        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1772      else
1773        vrm.addRestorePoint(VReg, MI);
1774
1775      // Update spill slot weight.
1776      if (!isReMat)
1777        SSWeight += getSpillWeight(false, true, loopDepth);
1778    }
1779    Id = RestoreMBBs.find_next(Id);
1780  }
1781
1782  // Finalize intervals: add kills, finalize spill weights, and filter out
1783  // dead intervals.
1784  std::vector<LiveInterval*> RetNewLIs;
1785  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1786    LiveInterval *LI = NewLIs[i];
1787    if (!LI->empty()) {
1788      LI->weight /= LI->getSize();
1789      if (!AddedKill.count(LI)) {
1790        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1791        unsigned LastUseIdx = getBaseIndex(LR->end);
1792        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1793        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1794        assert(UseIdx != -1);
1795        if (LastUse->getOperand(UseIdx).isImplicit() ||
1796            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1797          LastUse->getOperand(UseIdx).setIsKill();
1798          vrm.addKillPoint(LI->reg, LastUseIdx);
1799        }
1800      }
1801      RetNewLIs.push_back(LI);
1802    }
1803  }
1804
1805  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1806  return RetNewLIs;
1807}
1808
1809/// hasAllocatableSuperReg - Return true if the specified physical register has
1810/// any super register that's allocatable.
1811bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1812  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1813    if (allocatableRegs_[*AS] && hasInterval(*AS))
1814      return true;
1815  return false;
1816}
1817
1818/// getRepresentativeReg - Find the largest super register of the specified
1819/// physical register.
1820unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1821  // Find the largest super-register that is allocatable.
1822  unsigned BestReg = Reg;
1823  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1824    unsigned SuperReg = *AS;
1825    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1826      BestReg = SuperReg;
1827      break;
1828    }
1829  }
1830  return BestReg;
1831}
1832
1833/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1834/// specified interval that conflicts with the specified physical register.
1835unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1836                                                   unsigned PhysReg) const {
1837  unsigned NumConflicts = 0;
1838  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1839  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1840         E = mri_->reg_end(); I != E; ++I) {
1841    MachineOperand &O = I.getOperand();
1842    MachineInstr *MI = O.getParent();
1843    unsigned Index = getInstructionIndex(MI);
1844    if (pli.liveAt(Index))
1845      ++NumConflicts;
1846  }
1847  return NumConflicts;
1848}
1849
1850/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1851/// around all defs and uses of the specified interval.
1852void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1853                                            unsigned PhysReg, VirtRegMap &vrm) {
1854  unsigned SpillReg = getRepresentativeReg(PhysReg);
1855
1856  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1857    // If there are registers which alias PhysReg, but which are not a
1858    // sub-register of the chosen representative super register. Assert
1859    // since we can't handle it yet.
1860    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1861           tri_->isSuperRegister(*AS, SpillReg));
1862
1863  LiveInterval &pli = getInterval(SpillReg);
1864  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1865  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1866         E = mri_->reg_end(); I != E; ++I) {
1867    MachineOperand &O = I.getOperand();
1868    MachineInstr *MI = O.getParent();
1869    if (SeenMIs.count(MI))
1870      continue;
1871    SeenMIs.insert(MI);
1872    unsigned Index = getInstructionIndex(MI);
1873    if (pli.liveAt(Index)) {
1874      vrm.addEmergencySpill(SpillReg, MI);
1875      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1876      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1877        if (!hasInterval(*AS))
1878          continue;
1879        LiveInterval &spli = getInterval(*AS);
1880        if (spli.liveAt(Index))
1881          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1882      }
1883    }
1884  }
1885}
1886
1887LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1888                                                   MachineInstr* startInst) {
1889  LiveInterval& Interval = getOrCreateInterval(reg);
1890  VNInfo* VN = Interval.getNextValue(
1891            getInstructionIndex(startInst) + InstrSlots::DEF,
1892            startInst, getVNInfoAllocator());
1893  VN->hasPHIKill = true;
1894  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1895  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1896               getMBBEndIdx(startInst->getParent()) + 1, VN);
1897  Interval.addRange(LR);
1898
1899  return LR;
1900}
1901