LiveIntervalAnalysis.cpp revision d7dcbecdbcf5d7a1efc5ed65ddcc26bb8c20c1e6
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52STATISTIC(numIntervals, "Number of original intervals");
53STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits   , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addRequired<AliasAnalysis>();
62  AU.addPreserved<AliasAnalysis>();
63  AU.addPreserved<LiveVariables>();
64  AU.addRequired<LiveVariables>();
65  AU.addPreservedID(MachineLoopInfoID);
66  AU.addPreservedID(MachineDominatorsID);
67  AU.addPreservedID(PHIEliminationID);
68  AU.addRequiredID(PHIEliminationID);
69  AU.addRequiredID(TwoAddressInstructionPassID);
70  MachineFunctionPass::getAnalysisUsage(AU);
71}
72
73void LiveIntervals::releaseMemory() {
74  MBB2IdxMap.clear();
75  Idx2MBBMap.clear();
76  mi2iMap_.clear();
77  i2miMap_.clear();
78  r2iMap_.clear();
79  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80  VNInfoAllocator.Reset();
81  while (!ClonedMIs.empty()) {
82    MachineInstr *MI = ClonedMIs.back();
83    ClonedMIs.pop_back();
84    mf_->DeleteMachineInstr(MI);
85  }
86}
87
88void LiveIntervals::computeNumbering() {
89  Index2MiMap OldI2MI = i2miMap_;
90  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
91
92  Idx2MBBMap.clear();
93  MBB2IdxMap.clear();
94  mi2iMap_.clear();
95  i2miMap_.clear();
96
97  FunctionSize = 0;
98
99  // Number MachineInstrs and MachineBasicBlocks.
100  // Initialize MBB indexes to a sentinal.
101  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
102
103  unsigned MIIndex = 0;
104  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
105       MBB != E; ++MBB) {
106    unsigned StartIdx = MIIndex;
107
108    // Insert an empty slot at the beginning of each block.
109    MIIndex += InstrSlots::NUM;
110    i2miMap_.push_back(0);
111
112    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
113         I != E; ++I) {
114      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115      assert(inserted && "multiple MachineInstr -> index mappings");
116      i2miMap_.push_back(I);
117      MIIndex += InstrSlots::NUM;
118      FunctionSize++;
119
120      // Insert an empty slot after every instruction.
121      MIIndex += InstrSlots::NUM;
122      i2miMap_.push_back(0);
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  if (!OldI2MI.empty())
132    for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
133      for (LiveInterval::iterator LI = OI->second.begin(),
134           LE = OI->second.end(); LI != LE; ++LI) {
135
136        // Remap the start index of the live range to the corresponding new
137        // number, or our best guess at what it _should_ correspond to if the
138        // original instruction has been erased.  This is either the following
139        // instruction or its predecessor.
140        unsigned index = LI->start / InstrSlots::NUM;
141        unsigned offset = LI->start % InstrSlots::NUM;
142        if (offset == InstrSlots::LOAD) {
143          std::vector<IdxMBBPair>::const_iterator I =
144                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145          // Take the pair containing the index
146          std::vector<IdxMBBPair>::const_iterator J =
147                    ((I != OldI2MBB.end() && I->first > index) ||
148                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
149
150          LI->start = getMBBStartIdx(J->second);
151        } else {
152          LI->start = mi2iMap_[OldI2MI[index]] + offset;
153        }
154
155        // Remap the ending index in the same way that we remapped the start,
156        // except for the final step where we always map to the immediately
157        // following instruction.
158        index = (LI->end - 1) / InstrSlots::NUM;
159        offset  = LI->end % InstrSlots::NUM;
160        if (offset == InstrSlots::USE) {
161          std::vector<IdxMBBPair>::const_iterator I =
162                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
163          // Take the pair containing the index
164          std::vector<IdxMBBPair>::const_iterator J =
165                    ((I != OldI2MBB.end() && I->first > index) ||
166                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
167
168          LI->end = getMBBEndIdx(J->second) + 1;
169        } else {
170          unsigned idx = index;
171          while (!OldI2MI[index]) ++index;
172          LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
173        }
174
175        // Remap the VNInfo def index, which works the same as the
176        // start indices above.
177        VNInfo* vni = LI->valno;
178        index = vni->def / InstrSlots::NUM;
179        offset = vni->def % InstrSlots::NUM;
180        if (offset == InstrSlots::LOAD) {
181          std::vector<IdxMBBPair>::const_iterator I =
182                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
183          // Take the pair containing the index
184          std::vector<IdxMBBPair>::const_iterator J =
185                    ((I != OldI2MBB.end() && I->first > index) ||
186                    (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
187
188          vni->def = getMBBStartIdx(J->second);
189
190        } else {
191          vni->def = mi2iMap_[OldI2MI[index]] + offset;
192        }
193
194        // Remap the VNInfo kill indices, which works the same as
195        // the end indices above.
196        for (size_t i = 0; i < vni->kills.size(); ++i) {
197          index = (vni->kills[i]-1) / InstrSlots::NUM;
198          offset = vni->kills[i] % InstrSlots::NUM;
199          if (offset == InstrSlots::USE) {
200            std::vector<IdxMBBPair>::const_iterator I =
201             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
202            // Take the pair containing the index
203            std::vector<IdxMBBPair>::const_iterator J =
204                      ((I != OldI2MBB.end() && I->first > index) ||
205                      (I == OldI2MBB.end() && OldI2MBB.size()>0)) ? (I-1): I;
206
207            vni->kills[i] = getMBBEndIdx(J->second) + 1;
208          } else {
209            unsigned idx = index;
210            while (!OldI2MI[index]) ++index;
211            vni->kills[i] = mi2iMap_[OldI2MI[index]] +
212                            (idx == index ? offset : 0);
213          }
214        }
215      }
216}
217
218/// runOnMachineFunction - Register allocate the whole function
219///
220bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
221  mf_ = &fn;
222  mri_ = &mf_->getRegInfo();
223  tm_ = &fn.getTarget();
224  tri_ = tm_->getRegisterInfo();
225  tii_ = tm_->getInstrInfo();
226  aa_ = &getAnalysis<AliasAnalysis>();
227  lv_ = &getAnalysis<LiveVariables>();
228  allocatableRegs_ = tri_->getAllocatableSet(fn);
229
230  computeNumbering();
231  computeIntervals();
232
233  numIntervals += getNumIntervals();
234
235  DOUT << "********** INTERVALS **********\n";
236  for (iterator I = begin(), E = end(); I != E; ++I) {
237    I->second.print(DOUT, tri_);
238    DOUT << "\n";
239  }
240
241  numIntervalsAfter += getNumIntervals();
242  DEBUG(dump());
243  return true;
244}
245
246/// print - Implement the dump method.
247void LiveIntervals::print(std::ostream &O, const Module* ) const {
248  O << "********** INTERVALS **********\n";
249  for (const_iterator I = begin(), E = end(); I != E; ++I) {
250    I->second.print(O, tri_);
251    O << "\n";
252  }
253
254  O << "********** MACHINEINSTRS **********\n";
255  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
256       mbbi != mbbe; ++mbbi) {
257    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
258    for (MachineBasicBlock::iterator mii = mbbi->begin(),
259           mie = mbbi->end(); mii != mie; ++mii) {
260      O << getInstructionIndex(mii) << '\t' << *mii;
261    }
262  }
263}
264
265/// conflictsWithPhysRegDef - Returns true if the specified register
266/// is defined during the duration of the specified interval.
267bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
268                                            VirtRegMap &vrm, unsigned reg) {
269  for (LiveInterval::Ranges::const_iterator
270         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
271    for (unsigned index = getBaseIndex(I->start),
272           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
273         index += InstrSlots::NUM) {
274      // skip deleted instructions
275      while (index != end && !getInstructionFromIndex(index))
276        index += InstrSlots::NUM;
277      if (index == end) break;
278
279      MachineInstr *MI = getInstructionFromIndex(index);
280      unsigned SrcReg, DstReg;
281      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
282        if (SrcReg == li.reg || DstReg == li.reg)
283          continue;
284      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
285        MachineOperand& mop = MI->getOperand(i);
286        if (!mop.isRegister())
287          continue;
288        unsigned PhysReg = mop.getReg();
289        if (PhysReg == 0 || PhysReg == li.reg)
290          continue;
291        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
292          if (!vrm.hasPhys(PhysReg))
293            continue;
294          PhysReg = vrm.getPhys(PhysReg);
295        }
296        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
297          return true;
298      }
299    }
300  }
301
302  return false;
303}
304
305void LiveIntervals::printRegName(unsigned reg) const {
306  if (TargetRegisterInfo::isPhysicalRegister(reg))
307    cerr << tri_->getName(reg);
308  else
309    cerr << "%reg" << reg;
310}
311
312void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
313                                             MachineBasicBlock::iterator mi,
314                                             unsigned MIIdx, MachineOperand& MO,
315                                             unsigned MOIdx,
316                                             LiveInterval &interval) {
317  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
318  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
319
320  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
321    DOUT << "is a implicit_def\n";
322    return;
323  }
324
325  // Virtual registers may be defined multiple times (due to phi
326  // elimination and 2-addr elimination).  Much of what we do only has to be
327  // done once for the vreg.  We use an empty interval to detect the first
328  // time we see a vreg.
329  if (interval.empty()) {
330    // Get the Idx of the defining instructions.
331    unsigned defIndex = getDefIndex(MIIdx);
332    VNInfo *ValNo;
333    MachineInstr *CopyMI = NULL;
334    unsigned SrcReg, DstReg;
335    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
336        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
337        tii_->isMoveInstr(*mi, SrcReg, DstReg))
338      CopyMI = mi;
339    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
340
341    assert(ValNo->id == 0 && "First value in interval is not 0?");
342
343    // Loop over all of the blocks that the vreg is defined in.  There are
344    // two cases we have to handle here.  The most common case is a vreg
345    // whose lifetime is contained within a basic block.  In this case there
346    // will be a single kill, in MBB, which comes after the definition.
347    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
348      // FIXME: what about dead vars?
349      unsigned killIdx;
350      if (vi.Kills[0] != mi)
351        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
352      else
353        killIdx = defIndex+1;
354
355      // If the kill happens after the definition, we have an intra-block
356      // live range.
357      if (killIdx > defIndex) {
358        assert(vi.AliveBlocks.none() &&
359               "Shouldn't be alive across any blocks!");
360        LiveRange LR(defIndex, killIdx, ValNo);
361        interval.addRange(LR);
362        DOUT << " +" << LR << "\n";
363        interval.addKill(ValNo, killIdx);
364        return;
365      }
366    }
367
368    // The other case we handle is when a virtual register lives to the end
369    // of the defining block, potentially live across some blocks, then is
370    // live into some number of blocks, but gets killed.  Start by adding a
371    // range that goes from this definition to the end of the defining block.
372    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
373    DOUT << " +" << NewLR;
374    interval.addRange(NewLR);
375
376    // Iterate over all of the blocks that the variable is completely
377    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
378    // live interval.
379    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
380      if (vi.AliveBlocks[i]) {
381        LiveRange LR(getMBBStartIdx(i),
382                     getMBBEndIdx(i)+1,  // MBB ends at -1.
383                     ValNo);
384        interval.addRange(LR);
385        DOUT << " +" << LR;
386      }
387    }
388
389    // Finally, this virtual register is live from the start of any killing
390    // block to the 'use' slot of the killing instruction.
391    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
392      MachineInstr *Kill = vi.Kills[i];
393      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
394      LiveRange LR(getMBBStartIdx(Kill->getParent()),
395                   killIdx, ValNo);
396      interval.addRange(LR);
397      interval.addKill(ValNo, killIdx);
398      DOUT << " +" << LR;
399    }
400
401  } else {
402    // If this is the second time we see a virtual register definition, it
403    // must be due to phi elimination or two addr elimination.  If this is
404    // the result of two address elimination, then the vreg is one of the
405    // def-and-use register operand.
406    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
407      // If this is a two-address definition, then we have already processed
408      // the live range.  The only problem is that we didn't realize there
409      // are actually two values in the live interval.  Because of this we
410      // need to take the LiveRegion that defines this register and split it
411      // into two values.
412      assert(interval.containsOneValue());
413      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
414      unsigned RedefIndex = getDefIndex(MIIdx);
415
416      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
417      VNInfo *OldValNo = OldLR->valno;
418
419      // Delete the initial value, which should be short and continuous,
420      // because the 2-addr copy must be in the same MBB as the redef.
421      interval.removeRange(DefIndex, RedefIndex);
422
423      // Two-address vregs should always only be redefined once.  This means
424      // that at this point, there should be exactly one value number in it.
425      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
426
427      // The new value number (#1) is defined by the instruction we claimed
428      // defined value #0.
429      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
430                                            VNInfoAllocator);
431
432      // Value#0 is now defined by the 2-addr instruction.
433      OldValNo->def  = RedefIndex;
434      OldValNo->copy = 0;
435
436      // Add the new live interval which replaces the range for the input copy.
437      LiveRange LR(DefIndex, RedefIndex, ValNo);
438      DOUT << " replace range with " << LR;
439      interval.addRange(LR);
440      interval.addKill(ValNo, RedefIndex);
441
442      // If this redefinition is dead, we need to add a dummy unit live
443      // range covering the def slot.
444      if (MO.isDead())
445        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
446
447      DOUT << " RESULT: ";
448      interval.print(DOUT, tri_);
449
450    } else {
451      // Otherwise, this must be because of phi elimination.  If this is the
452      // first redefinition of the vreg that we have seen, go back and change
453      // the live range in the PHI block to be a different value number.
454      if (interval.containsOneValue()) {
455        assert(vi.Kills.size() == 1 &&
456               "PHI elimination vreg should have one kill, the PHI itself!");
457
458        // Remove the old range that we now know has an incorrect number.
459        VNInfo *VNI = interval.getValNumInfo(0);
460        MachineInstr *Killer = vi.Kills[0];
461        unsigned Start = getMBBStartIdx(Killer->getParent());
462        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
463        DOUT << " Removing [" << Start << "," << End << "] from: ";
464        interval.print(DOUT, tri_); DOUT << "\n";
465        interval.removeRange(Start, End);
466        VNI->hasPHIKill = true;
467        DOUT << " RESULT: "; interval.print(DOUT, tri_);
468
469        // Replace the interval with one of a NEW value number.  Note that this
470        // value number isn't actually defined by an instruction, weird huh? :)
471        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
472        DOUT << " replace range with " << LR;
473        interval.addRange(LR);
474        interval.addKill(LR.valno, End);
475        DOUT << " RESULT: "; interval.print(DOUT, tri_);
476      }
477
478      // In the case of PHI elimination, each variable definition is only
479      // live until the end of the block.  We've already taken care of the
480      // rest of the live range.
481      unsigned defIndex = getDefIndex(MIIdx);
482
483      VNInfo *ValNo;
484      MachineInstr *CopyMI = NULL;
485      unsigned SrcReg, DstReg;
486      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
487          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
488          tii_->isMoveInstr(*mi, SrcReg, DstReg))
489        CopyMI = mi;
490      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
491
492      unsigned killIndex = getMBBEndIdx(mbb) + 1;
493      LiveRange LR(defIndex, killIndex, ValNo);
494      interval.addRange(LR);
495      interval.addKill(ValNo, killIndex);
496      ValNo->hasPHIKill = true;
497      DOUT << " +" << LR;
498    }
499  }
500
501  DOUT << '\n';
502}
503
504void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
505                                              MachineBasicBlock::iterator mi,
506                                              unsigned MIIdx,
507                                              MachineOperand& MO,
508                                              LiveInterval &interval,
509                                              MachineInstr *CopyMI) {
510  // A physical register cannot be live across basic block, so its
511  // lifetime must end somewhere in its defining basic block.
512  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
513
514  unsigned baseIndex = MIIdx;
515  unsigned start = getDefIndex(baseIndex);
516  unsigned end = start;
517
518  // If it is not used after definition, it is considered dead at
519  // the instruction defining it. Hence its interval is:
520  // [defSlot(def), defSlot(def)+1)
521  if (MO.isDead()) {
522    DOUT << " dead";
523    end = getDefIndex(start) + 1;
524    goto exit;
525  }
526
527  // If it is not dead on definition, it must be killed by a
528  // subsequent instruction. Hence its interval is:
529  // [defSlot(def), useSlot(kill)+1)
530  baseIndex += InstrSlots::NUM;
531  while (++mi != MBB->end()) {
532    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
533           getInstructionFromIndex(baseIndex) == 0)
534      baseIndex += InstrSlots::NUM;
535    if (mi->killsRegister(interval.reg, tri_)) {
536      DOUT << " killed";
537      end = getUseIndex(baseIndex) + 1;
538      goto exit;
539    } else if (mi->modifiesRegister(interval.reg, tri_)) {
540      // Another instruction redefines the register before it is ever read.
541      // Then the register is essentially dead at the instruction that defines
542      // it. Hence its interval is:
543      // [defSlot(def), defSlot(def)+1)
544      DOUT << " dead";
545      end = getDefIndex(start) + 1;
546      goto exit;
547    }
548
549    baseIndex += InstrSlots::NUM;
550  }
551
552  // The only case we should have a dead physreg here without a killing or
553  // instruction where we know it's dead is if it is live-in to the function
554  // and never used.
555  assert(!CopyMI && "physreg was not killed in defining block!");
556  end = getDefIndex(start) + 1;  // It's dead.
557
558exit:
559  assert(start < end && "did not find end of interval?");
560
561  // Already exists? Extend old live interval.
562  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
563  VNInfo *ValNo = (OldLR != interval.end())
564    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
565  LiveRange LR(start, end, ValNo);
566  interval.addRange(LR);
567  interval.addKill(LR.valno, end);
568  DOUT << " +" << LR << '\n';
569}
570
571void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
572                                      MachineBasicBlock::iterator MI,
573                                      unsigned MIIdx,
574                                      MachineOperand& MO,
575                                      unsigned MOIdx) {
576  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
577    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
578                             getOrCreateInterval(MO.getReg()));
579  else if (allocatableRegs_[MO.getReg()]) {
580    MachineInstr *CopyMI = NULL;
581    unsigned SrcReg, DstReg;
582    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
583        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
584        tii_->isMoveInstr(*MI, SrcReg, DstReg))
585      CopyMI = MI;
586    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                              getOrCreateInterval(MO.getReg()), CopyMI);
588    // Def of a register also defines its sub-registers.
589    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
590      // If MI also modifies the sub-register explicitly, avoid processing it
591      // more than once. Do not pass in TRI here so it checks for exact match.
592      if (!MI->modifiesRegister(*AS))
593        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594                                  getOrCreateInterval(*AS), 0);
595  }
596}
597
598void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
599                                         unsigned MIIdx,
600                                         LiveInterval &interval, bool isAlias) {
601  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
602
603  // Look for kills, if it reaches a def before it's killed, then it shouldn't
604  // be considered a livein.
605  MachineBasicBlock::iterator mi = MBB->begin();
606  unsigned baseIndex = MIIdx;
607  unsigned start = baseIndex;
608  unsigned end = start;
609  while (mi != MBB->end()) {
610    if (mi->killsRegister(interval.reg, tri_)) {
611      DOUT << " killed";
612      end = getUseIndex(baseIndex) + 1;
613      goto exit;
614    } else if (mi->modifiesRegister(interval.reg, tri_)) {
615      // Another instruction redefines the register before it is ever read.
616      // Then the register is essentially dead at the instruction that defines
617      // it. Hence its interval is:
618      // [defSlot(def), defSlot(def)+1)
619      DOUT << " dead";
620      end = getDefIndex(start) + 1;
621      goto exit;
622    }
623
624    baseIndex += InstrSlots::NUM;
625    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
626           getInstructionFromIndex(baseIndex) == 0)
627      baseIndex += InstrSlots::NUM;
628    ++mi;
629  }
630
631exit:
632  // Live-in register might not be used at all.
633  if (end == MIIdx) {
634    if (isAlias) {
635      DOUT << " dead";
636      end = getDefIndex(MIIdx) + 1;
637    } else {
638      DOUT << " live through";
639      end = baseIndex;
640    }
641  }
642
643  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
644  interval.addRange(LR);
645  interval.addKill(LR.valno, end);
646  DOUT << " +" << LR << '\n';
647}
648
649/// computeIntervals - computes the live intervals for virtual
650/// registers. for some ordering of the machine instructions [1,N] a
651/// live interval is an interval [i, j) where 1 <= i <= j < N for
652/// which a variable is live
653void LiveIntervals::computeIntervals() {
654  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
655       << "********** Function: "
656       << ((Value*)mf_->getFunction())->getName() << '\n';
657  // Track the index of the current machine instr.
658  unsigned MIIndex = 0;
659
660  // Skip over empty initial indices.
661  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
662         getInstructionFromIndex(MIIndex) == 0)
663    MIIndex += InstrSlots::NUM;
664
665  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
666       MBBI != E; ++MBBI) {
667    MachineBasicBlock *MBB = MBBI;
668    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
669
670    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
671
672    // Create intervals for live-ins to this BB first.
673    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
674           LE = MBB->livein_end(); LI != LE; ++LI) {
675      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
676      // Multiple live-ins can alias the same register.
677      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
678        if (!hasInterval(*AS))
679          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
680                               true);
681    }
682
683    for (; MI != miEnd; ++MI) {
684      DOUT << MIIndex << "\t" << *MI;
685
686      // Handle defs.
687      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
688        MachineOperand &MO = MI->getOperand(i);
689        // handle register defs - build intervals
690        if (MO.isRegister() && MO.getReg() && MO.isDef())
691          handleRegisterDef(MBB, MI, MIIndex, MO, i);
692      }
693
694      MIIndex += InstrSlots::NUM;
695
696      // Skip over empty indices.
697      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
698             getInstructionFromIndex(MIIndex) == 0)
699        MIIndex += InstrSlots::NUM;
700    }
701  }
702}
703
704bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
705                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
706  std::vector<IdxMBBPair>::const_iterator I =
707    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
708
709  bool ResVal = false;
710  while (I != Idx2MBBMap.end()) {
711    if (LR.end <= I->first)
712      break;
713    MBBs.push_back(I->second);
714    ResVal = true;
715    ++I;
716  }
717  return ResVal;
718}
719
720
721LiveInterval LiveIntervals::createInterval(unsigned reg) {
722  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
723                       HUGE_VALF : 0.0F;
724  return LiveInterval(reg, Weight);
725}
726
727/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
728/// copy field and returns the source register that defines it.
729unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
730  if (!VNI->copy)
731    return 0;
732
733  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
734    return VNI->copy->getOperand(1).getReg();
735  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
736    return VNI->copy->getOperand(2).getReg();
737  unsigned SrcReg, DstReg;
738  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
739    return SrcReg;
740  assert(0 && "Unrecognized copy instruction!");
741  return 0;
742}
743
744//===----------------------------------------------------------------------===//
745// Register allocator hooks.
746//
747
748/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
749/// allow one) virtual register operand, then its uses are implicitly using
750/// the register. Returns the virtual register.
751unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
752                                            MachineInstr *MI) const {
753  unsigned RegOp = 0;
754  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
755    MachineOperand &MO = MI->getOperand(i);
756    if (!MO.isRegister() || !MO.isUse())
757      continue;
758    unsigned Reg = MO.getReg();
759    if (Reg == 0 || Reg == li.reg)
760      continue;
761    // FIXME: For now, only remat MI with at most one register operand.
762    assert(!RegOp &&
763           "Can't rematerialize instruction with multiple register operand!");
764    RegOp = MO.getReg();
765#ifndef NDEBUG
766    break;
767#endif
768  }
769  return RegOp;
770}
771
772/// isValNoAvailableAt - Return true if the val# of the specified interval
773/// which reaches the given instruction also reaches the specified use index.
774bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
775                                       unsigned UseIdx) const {
776  unsigned Index = getInstructionIndex(MI);
777  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
778  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
779  return UI != li.end() && UI->valno == ValNo;
780}
781
782/// isReMaterializable - Returns true if the definition MI of the specified
783/// val# of the specified interval is re-materializable.
784bool LiveIntervals::isReMaterializable(const LiveInterval &li,
785                                       const VNInfo *ValNo, MachineInstr *MI,
786                                       bool &isLoad) {
787  if (DisableReMat)
788    return false;
789
790  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
791    return true;
792
793  int FrameIdx = 0;
794  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
795      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
796    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
797    // this but remember this is not safe to fold into a two-address
798    // instruction.
799    // This is a load from fixed stack slot. It can be rematerialized.
800    return true;
801
802  // If the target-specific rules don't identify an instruction as
803  // being trivially rematerializable, use some target-independent
804  // rules.
805  if (!MI->getDesc().isRematerializable() ||
806      !tii_->isTriviallyReMaterializable(MI)) {
807    if (!EnableAggressiveRemat)
808      return false;
809
810    // If the instruction access memory but the memoperands have been lost,
811    // we can't analyze it.
812    const TargetInstrDesc &TID = MI->getDesc();
813    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
814      return false;
815
816    // Avoid instructions obviously unsafe for remat.
817    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
818      return false;
819
820    // If the instruction accesses memory and the memory could be non-constant,
821    // assume the instruction is not rematerializable.
822    for (alist<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
823         E = MI->memoperands_end(); I != E; ++I) {
824      const MachineMemOperand &MMO = *I;
825      if (MMO.isVolatile() || MMO.isStore())
826        return false;
827      const Value *V = MMO.getValue();
828      if (!V)
829        return false;
830      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
831        if (!PSV->isConstant(mf_->getFrameInfo()))
832          return false;
833      } else if (!aa_->pointsToConstantMemory(V))
834        return false;
835    }
836
837    // If any of the registers accessed are non-constant, conservatively assume
838    // the instruction is not rematerializable.
839    unsigned ImpUse = 0;
840    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
841      const MachineOperand &MO = MI->getOperand(i);
842      if (MO.isReg()) {
843        unsigned Reg = MO.getReg();
844        if (Reg == 0)
845          continue;
846        if (TargetRegisterInfo::isPhysicalRegister(Reg))
847          return false;
848
849        // Only allow one def, and that in the first operand.
850        if (MO.isDef() != (i == 0))
851          return false;
852
853        // Only allow constant-valued registers.
854        bool IsLiveIn = mri_->isLiveIn(Reg);
855        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
856                                          E = mri_->def_end();
857
858        // For the def, it should be the only def.
859        if (MO.isDef() && (next(I) != E || IsLiveIn))
860          return false;
861
862        if (MO.isUse()) {
863          // Only allow one use other register use, as that's all the
864          // remat mechanisms support currently.
865          if (Reg != li.reg) {
866            if (ImpUse == 0)
867              ImpUse = Reg;
868            else if (Reg != ImpUse)
869              return false;
870          }
871          // For uses, there should be only one associate def.
872          if (I != E && (next(I) != E || IsLiveIn))
873            return false;
874        }
875      }
876    }
877  }
878
879  unsigned ImpUse = getReMatImplicitUse(li, MI);
880  if (ImpUse) {
881    const LiveInterval &ImpLi = getInterval(ImpUse);
882    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
883           re = mri_->use_end(); ri != re; ++ri) {
884      MachineInstr *UseMI = &*ri;
885      unsigned UseIdx = getInstructionIndex(UseMI);
886      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
887        continue;
888      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
889        return false;
890    }
891  }
892  return true;
893}
894
895/// isReMaterializable - Returns true if every definition of MI of every
896/// val# of the specified interval is re-materializable.
897bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
898  isLoad = false;
899  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
900       i != e; ++i) {
901    const VNInfo *VNI = *i;
902    unsigned DefIdx = VNI->def;
903    if (DefIdx == ~1U)
904      continue; // Dead val#.
905    // Is the def for the val# rematerializable?
906    if (DefIdx == ~0u)
907      return false;
908    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
909    bool DefIsLoad = false;
910    if (!ReMatDefMI ||
911        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
912      return false;
913    isLoad |= DefIsLoad;
914  }
915  return true;
916}
917
918/// FilterFoldedOps - Filter out two-address use operands. Return
919/// true if it finds any issue with the operands that ought to prevent
920/// folding.
921static bool FilterFoldedOps(MachineInstr *MI,
922                            SmallVector<unsigned, 2> &Ops,
923                            unsigned &MRInfo,
924                            SmallVector<unsigned, 2> &FoldOps) {
925  const TargetInstrDesc &TID = MI->getDesc();
926
927  MRInfo = 0;
928  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
929    unsigned OpIdx = Ops[i];
930    MachineOperand &MO = MI->getOperand(OpIdx);
931    // FIXME: fold subreg use.
932    if (MO.getSubReg())
933      return true;
934    if (MO.isDef())
935      MRInfo |= (unsigned)VirtRegMap::isMod;
936    else {
937      // Filter out two-address use operand(s).
938      if (!MO.isImplicit() &&
939          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
940        MRInfo = VirtRegMap::isModRef;
941        continue;
942      }
943      MRInfo |= (unsigned)VirtRegMap::isRef;
944    }
945    FoldOps.push_back(OpIdx);
946  }
947  return false;
948}
949
950
951/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
952/// slot / to reg or any rematerialized load into ith operand of specified
953/// MI. If it is successul, MI is updated with the newly created MI and
954/// returns true.
955bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
956                                         VirtRegMap &vrm, MachineInstr *DefMI,
957                                         unsigned InstrIdx,
958                                         SmallVector<unsigned, 2> &Ops,
959                                         bool isSS, int Slot, unsigned Reg) {
960  // If it is an implicit def instruction, just delete it.
961  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
962    RemoveMachineInstrFromMaps(MI);
963    vrm.RemoveMachineInstrFromMaps(MI);
964    MI->eraseFromParent();
965    ++numFolds;
966    return true;
967  }
968
969  // Filter the list of operand indexes that are to be folded. Abort if
970  // any operand will prevent folding.
971  unsigned MRInfo = 0;
972  SmallVector<unsigned, 2> FoldOps;
973  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
974    return false;
975
976  // The only time it's safe to fold into a two address instruction is when
977  // it's folding reload and spill from / into a spill stack slot.
978  if (DefMI && (MRInfo & VirtRegMap::isMod))
979    return false;
980
981  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
982                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
983  if (fmi) {
984    // Remember this instruction uses the spill slot.
985    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
986
987    // Attempt to fold the memory reference into the instruction. If
988    // we can do this, we don't need to insert spill code.
989    MachineBasicBlock &MBB = *MI->getParent();
990    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
991      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
992    vrm.transferSpillPts(MI, fmi);
993    vrm.transferRestorePts(MI, fmi);
994    vrm.transferEmergencySpills(MI, fmi);
995    mi2iMap_.erase(MI);
996    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
997    mi2iMap_[fmi] = InstrIdx;
998    MI = MBB.insert(MBB.erase(MI), fmi);
999    ++numFolds;
1000    return true;
1001  }
1002  return false;
1003}
1004
1005/// canFoldMemoryOperand - Returns true if the specified load / store
1006/// folding is possible.
1007bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1008                                         SmallVector<unsigned, 2> &Ops,
1009                                         bool ReMat) const {
1010  // Filter the list of operand indexes that are to be folded. Abort if
1011  // any operand will prevent folding.
1012  unsigned MRInfo = 0;
1013  SmallVector<unsigned, 2> FoldOps;
1014  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1015    return false;
1016
1017  // It's only legal to remat for a use, not a def.
1018  if (ReMat && (MRInfo & VirtRegMap::isMod))
1019    return false;
1020
1021  return tii_->canFoldMemoryOperand(MI, FoldOps);
1022}
1023
1024bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1025  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1026  for (LiveInterval::Ranges::const_iterator
1027         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1028    std::vector<IdxMBBPair>::const_iterator II =
1029      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1030    if (II == Idx2MBBMap.end())
1031      continue;
1032    if (I->end > II->first)  // crossing a MBB.
1033      return false;
1034    MBBs.insert(II->second);
1035    if (MBBs.size() > 1)
1036      return false;
1037  }
1038  return true;
1039}
1040
1041/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1042/// interval on to-be re-materialized operands of MI) with new register.
1043void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1044                                       MachineInstr *MI, unsigned NewVReg,
1045                                       VirtRegMap &vrm) {
1046  // There is an implicit use. That means one of the other operand is
1047  // being remat'ed and the remat'ed instruction has li.reg as an
1048  // use operand. Make sure we rewrite that as well.
1049  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1050    MachineOperand &MO = MI->getOperand(i);
1051    if (!MO.isRegister())
1052      continue;
1053    unsigned Reg = MO.getReg();
1054    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1055      continue;
1056    if (!vrm.isReMaterialized(Reg))
1057      continue;
1058    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1059    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1060    if (UseMO)
1061      UseMO->setReg(NewVReg);
1062  }
1063}
1064
1065/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1066/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1067bool LiveIntervals::
1068rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1069                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1070                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1071                 unsigned Slot, int LdSlot,
1072                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1073                 VirtRegMap &vrm,
1074                 const TargetRegisterClass* rc,
1075                 SmallVector<int, 4> &ReMatIds,
1076                 const MachineLoopInfo *loopInfo,
1077                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1078                 std::map<unsigned,unsigned> &MBBVRegsMap,
1079                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1080  MachineBasicBlock *MBB = MI->getParent();
1081  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1082  bool CanFold = false;
1083 RestartInstruction:
1084  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1085    MachineOperand& mop = MI->getOperand(i);
1086    if (!mop.isRegister())
1087      continue;
1088    unsigned Reg = mop.getReg();
1089    unsigned RegI = Reg;
1090    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1091      continue;
1092    if (Reg != li.reg)
1093      continue;
1094
1095    bool TryFold = !DefIsReMat;
1096    bool FoldSS = true; // Default behavior unless it's a remat.
1097    int FoldSlot = Slot;
1098    if (DefIsReMat) {
1099      // If this is the rematerializable definition MI itself and
1100      // all of its uses are rematerialized, simply delete it.
1101      if (MI == ReMatOrigDefMI && CanDelete) {
1102        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1103        DOUT << MI << '\n';
1104        RemoveMachineInstrFromMaps(MI);
1105        vrm.RemoveMachineInstrFromMaps(MI);
1106        MI->eraseFromParent();
1107        break;
1108      }
1109
1110      // If def for this use can't be rematerialized, then try folding.
1111      // If def is rematerializable and it's a load, also try folding.
1112      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1113      if (isLoad) {
1114        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1115        FoldSS = isLoadSS;
1116        FoldSlot = LdSlot;
1117      }
1118    }
1119
1120    // Scan all of the operands of this instruction rewriting operands
1121    // to use NewVReg instead of li.reg as appropriate.  We do this for
1122    // two reasons:
1123    //
1124    //   1. If the instr reads the same spilled vreg multiple times, we
1125    //      want to reuse the NewVReg.
1126    //   2. If the instr is a two-addr instruction, we are required to
1127    //      keep the src/dst regs pinned.
1128    //
1129    // Keep track of whether we replace a use and/or def so that we can
1130    // create the spill interval with the appropriate range.
1131
1132    HasUse = mop.isUse();
1133    HasDef = mop.isDef();
1134    SmallVector<unsigned, 2> Ops;
1135    Ops.push_back(i);
1136    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1137      const MachineOperand &MOj = MI->getOperand(j);
1138      if (!MOj.isRegister())
1139        continue;
1140      unsigned RegJ = MOj.getReg();
1141      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1142        continue;
1143      if (RegJ == RegI) {
1144        Ops.push_back(j);
1145        HasUse |= MOj.isUse();
1146        HasDef |= MOj.isDef();
1147      }
1148    }
1149
1150    if (HasUse && !li.liveAt(getUseIndex(index)))
1151      // Must be defined by an implicit def. It should not be spilled. Note,
1152      // this is for correctness reason. e.g.
1153      // 8   %reg1024<def> = IMPLICIT_DEF
1154      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1155      // The live range [12, 14) are not part of the r1024 live interval since
1156      // it's defined by an implicit def. It will not conflicts with live
1157      // interval of r1025. Now suppose both registers are spilled, you can
1158      // easily see a situation where both registers are reloaded before
1159      // the INSERT_SUBREG and both target registers that would overlap.
1160      HasUse = false;
1161
1162    // Update stack slot spill weight if we are splitting.
1163    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1164      if (!TrySplit)
1165      SSWeight += Weight;
1166
1167    if (!TryFold)
1168      CanFold = false;
1169    else {
1170      // Do not fold load / store here if we are splitting. We'll find an
1171      // optimal point to insert a load / store later.
1172      if (!TrySplit) {
1173        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1174                                 Ops, FoldSS, FoldSlot, Reg)) {
1175          // Folding the load/store can completely change the instruction in
1176          // unpredictable ways, rescan it from the beginning.
1177          HasUse = false;
1178          HasDef = false;
1179          CanFold = false;
1180          if (isRemoved(MI)) {
1181            SSWeight -= Weight;
1182            break;
1183          }
1184          goto RestartInstruction;
1185        }
1186      } else {
1187        // We'll try to fold it later if it's profitable.
1188        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1189      }
1190    }
1191
1192    // Create a new virtual register for the spill interval.
1193    bool CreatedNewVReg = false;
1194    if (NewVReg == 0) {
1195      NewVReg = mri_->createVirtualRegister(rc);
1196      vrm.grow();
1197      CreatedNewVReg = true;
1198    }
1199    mop.setReg(NewVReg);
1200    if (mop.isImplicit())
1201      rewriteImplicitOps(li, MI, NewVReg, vrm);
1202
1203    // Reuse NewVReg for other reads.
1204    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1205      MachineOperand &mopj = MI->getOperand(Ops[j]);
1206      mopj.setReg(NewVReg);
1207      if (mopj.isImplicit())
1208        rewriteImplicitOps(li, MI, NewVReg, vrm);
1209    }
1210
1211    if (CreatedNewVReg) {
1212      if (DefIsReMat) {
1213        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1214        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1215          // Each valnum may have its own remat id.
1216          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1217        } else {
1218          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1219        }
1220        if (!CanDelete || (HasUse && HasDef)) {
1221          // If this is a two-addr instruction then its use operands are
1222          // rematerializable but its def is not. It should be assigned a
1223          // stack slot.
1224          vrm.assignVirt2StackSlot(NewVReg, Slot);
1225        }
1226      } else {
1227        vrm.assignVirt2StackSlot(NewVReg, Slot);
1228      }
1229    } else if (HasUse && HasDef &&
1230               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1231      // If this interval hasn't been assigned a stack slot (because earlier
1232      // def is a deleted remat def), do it now.
1233      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1234      vrm.assignVirt2StackSlot(NewVReg, Slot);
1235    }
1236
1237    // Re-matting an instruction with virtual register use. Add the
1238    // register as an implicit use on the use MI.
1239    if (DefIsReMat && ImpUse)
1240      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1241
1242    // create a new register interval for this spill / remat.
1243    LiveInterval &nI = getOrCreateInterval(NewVReg);
1244    if (CreatedNewVReg) {
1245      NewLIs.push_back(&nI);
1246      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1247      if (TrySplit)
1248        vrm.setIsSplitFromReg(NewVReg, li.reg);
1249    }
1250
1251    if (HasUse) {
1252      if (CreatedNewVReg) {
1253        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1254                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1255        DOUT << " +" << LR;
1256        nI.addRange(LR);
1257      } else {
1258        // Extend the split live interval to this def / use.
1259        unsigned End = getUseIndex(index)+1;
1260        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1261                     nI.getValNumInfo(nI.getNumValNums()-1));
1262        DOUT << " +" << LR;
1263        nI.addRange(LR);
1264      }
1265    }
1266    if (HasDef) {
1267      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1268                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1269      DOUT << " +" << LR;
1270      nI.addRange(LR);
1271    }
1272
1273    DOUT << "\t\t\t\tAdded new interval: ";
1274    nI.print(DOUT, tri_);
1275    DOUT << '\n';
1276  }
1277  return CanFold;
1278}
1279bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1280                                   const VNInfo *VNI,
1281                                   MachineBasicBlock *MBB, unsigned Idx) const {
1282  unsigned End = getMBBEndIdx(MBB);
1283  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1284    unsigned KillIdx = VNI->kills[j];
1285    if (KillIdx > Idx && KillIdx < End)
1286      return true;
1287  }
1288  return false;
1289}
1290
1291/// RewriteInfo - Keep track of machine instrs that will be rewritten
1292/// during spilling.
1293namespace {
1294  struct RewriteInfo {
1295    unsigned Index;
1296    MachineInstr *MI;
1297    bool HasUse;
1298    bool HasDef;
1299    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1300      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1301  };
1302
1303  struct RewriteInfoCompare {
1304    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1305      return LHS.Index < RHS.Index;
1306    }
1307  };
1308}
1309
1310void LiveIntervals::
1311rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1312                    LiveInterval::Ranges::const_iterator &I,
1313                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1314                    unsigned Slot, int LdSlot,
1315                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1316                    VirtRegMap &vrm,
1317                    const TargetRegisterClass* rc,
1318                    SmallVector<int, 4> &ReMatIds,
1319                    const MachineLoopInfo *loopInfo,
1320                    BitVector &SpillMBBs,
1321                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1322                    BitVector &RestoreMBBs,
1323                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1324                    std::map<unsigned,unsigned> &MBBVRegsMap,
1325                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1326  bool AllCanFold = true;
1327  unsigned NewVReg = 0;
1328  unsigned start = getBaseIndex(I->start);
1329  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1330
1331  // First collect all the def / use in this live range that will be rewritten.
1332  // Make sure they are sorted according to instruction index.
1333  std::vector<RewriteInfo> RewriteMIs;
1334  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1335         re = mri_->reg_end(); ri != re; ) {
1336    MachineInstr *MI = &*ri;
1337    MachineOperand &O = ri.getOperand();
1338    ++ri;
1339    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1340    unsigned index = getInstructionIndex(MI);
1341    if (index < start || index >= end)
1342      continue;
1343    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1344      // Must be defined by an implicit def. It should not be spilled. Note,
1345      // this is for correctness reason. e.g.
1346      // 8   %reg1024<def> = IMPLICIT_DEF
1347      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1348      // The live range [12, 14) are not part of the r1024 live interval since
1349      // it's defined by an implicit def. It will not conflicts with live
1350      // interval of r1025. Now suppose both registers are spilled, you can
1351      // easily see a situation where both registers are reloaded before
1352      // the INSERT_SUBREG and both target registers that would overlap.
1353      continue;
1354    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1355  }
1356  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1357
1358  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1359  // Now rewrite the defs and uses.
1360  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1361    RewriteInfo &rwi = RewriteMIs[i];
1362    ++i;
1363    unsigned index = rwi.Index;
1364    bool MIHasUse = rwi.HasUse;
1365    bool MIHasDef = rwi.HasDef;
1366    MachineInstr *MI = rwi.MI;
1367    // If MI def and/or use the same register multiple times, then there
1368    // are multiple entries.
1369    unsigned NumUses = MIHasUse;
1370    while (i != e && RewriteMIs[i].MI == MI) {
1371      assert(RewriteMIs[i].Index == index);
1372      bool isUse = RewriteMIs[i].HasUse;
1373      if (isUse) ++NumUses;
1374      MIHasUse |= isUse;
1375      MIHasDef |= RewriteMIs[i].HasDef;
1376      ++i;
1377    }
1378    MachineBasicBlock *MBB = MI->getParent();
1379
1380    if (ImpUse && MI != ReMatDefMI) {
1381      // Re-matting an instruction with virtual register use. Update the
1382      // register interval's spill weight to HUGE_VALF to prevent it from
1383      // being spilled.
1384      LiveInterval &ImpLi = getInterval(ImpUse);
1385      ImpLi.weight = HUGE_VALF;
1386    }
1387
1388    unsigned MBBId = MBB->getNumber();
1389    unsigned ThisVReg = 0;
1390    if (TrySplit) {
1391      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1392      if (NVI != MBBVRegsMap.end()) {
1393        ThisVReg = NVI->second;
1394        // One common case:
1395        // x = use
1396        // ...
1397        // ...
1398        // def = ...
1399        //     = use
1400        // It's better to start a new interval to avoid artifically
1401        // extend the new interval.
1402        if (MIHasDef && !MIHasUse) {
1403          MBBVRegsMap.erase(MBB->getNumber());
1404          ThisVReg = 0;
1405        }
1406      }
1407    }
1408
1409    bool IsNew = ThisVReg == 0;
1410    if (IsNew) {
1411      // This ends the previous live interval. If all of its def / use
1412      // can be folded, give it a low spill weight.
1413      if (NewVReg && TrySplit && AllCanFold) {
1414        LiveInterval &nI = getOrCreateInterval(NewVReg);
1415        nI.weight /= 10.0F;
1416      }
1417      AllCanFold = true;
1418    }
1419    NewVReg = ThisVReg;
1420
1421    bool HasDef = false;
1422    bool HasUse = false;
1423    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1424                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1425                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1426                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1427                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1428    if (!HasDef && !HasUse)
1429      continue;
1430
1431    AllCanFold &= CanFold;
1432
1433    // Update weight of spill interval.
1434    LiveInterval &nI = getOrCreateInterval(NewVReg);
1435    if (!TrySplit) {
1436      // The spill weight is now infinity as it cannot be spilled again.
1437      nI.weight = HUGE_VALF;
1438      continue;
1439    }
1440
1441    // Keep track of the last def and first use in each MBB.
1442    if (HasDef) {
1443      if (MI != ReMatOrigDefMI || !CanDelete) {
1444        bool HasKill = false;
1445        if (!HasUse)
1446          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1447        else {
1448          // If this is a two-address code, then this index starts a new VNInfo.
1449          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1450          if (VNI)
1451            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1452        }
1453        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1454          SpillIdxes.find(MBBId);
1455        if (!HasKill) {
1456          if (SII == SpillIdxes.end()) {
1457            std::vector<SRInfo> S;
1458            S.push_back(SRInfo(index, NewVReg, true));
1459            SpillIdxes.insert(std::make_pair(MBBId, S));
1460          } else if (SII->second.back().vreg != NewVReg) {
1461            SII->second.push_back(SRInfo(index, NewVReg, true));
1462          } else if ((int)index > SII->second.back().index) {
1463            // If there is an earlier def and this is a two-address
1464            // instruction, then it's not possible to fold the store (which
1465            // would also fold the load).
1466            SRInfo &Info = SII->second.back();
1467            Info.index = index;
1468            Info.canFold = !HasUse;
1469          }
1470          SpillMBBs.set(MBBId);
1471        } else if (SII != SpillIdxes.end() &&
1472                   SII->second.back().vreg == NewVReg &&
1473                   (int)index > SII->second.back().index) {
1474          // There is an earlier def that's not killed (must be two-address).
1475          // The spill is no longer needed.
1476          SII->second.pop_back();
1477          if (SII->second.empty()) {
1478            SpillIdxes.erase(MBBId);
1479            SpillMBBs.reset(MBBId);
1480          }
1481        }
1482      }
1483    }
1484
1485    if (HasUse) {
1486      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1487        SpillIdxes.find(MBBId);
1488      if (SII != SpillIdxes.end() &&
1489          SII->second.back().vreg == NewVReg &&
1490          (int)index > SII->second.back().index)
1491        // Use(s) following the last def, it's not safe to fold the spill.
1492        SII->second.back().canFold = false;
1493      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1494        RestoreIdxes.find(MBBId);
1495      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1496        // If we are splitting live intervals, only fold if it's the first
1497        // use and there isn't another use later in the MBB.
1498        RII->second.back().canFold = false;
1499      else if (IsNew) {
1500        // Only need a reload if there isn't an earlier def / use.
1501        if (RII == RestoreIdxes.end()) {
1502          std::vector<SRInfo> Infos;
1503          Infos.push_back(SRInfo(index, NewVReg, true));
1504          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1505        } else {
1506          RII->second.push_back(SRInfo(index, NewVReg, true));
1507        }
1508        RestoreMBBs.set(MBBId);
1509      }
1510    }
1511
1512    // Update spill weight.
1513    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1514    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1515  }
1516
1517  if (NewVReg && TrySplit && AllCanFold) {
1518    // If all of its def / use can be folded, give it a low spill weight.
1519    LiveInterval &nI = getOrCreateInterval(NewVReg);
1520    nI.weight /= 10.0F;
1521  }
1522}
1523
1524bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1525                        BitVector &RestoreMBBs,
1526                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1527  if (!RestoreMBBs[Id])
1528    return false;
1529  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1530  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1531    if (Restores[i].index == index &&
1532        Restores[i].vreg == vr &&
1533        Restores[i].canFold)
1534      return true;
1535  return false;
1536}
1537
1538void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1539                        BitVector &RestoreMBBs,
1540                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1541  if (!RestoreMBBs[Id])
1542    return;
1543  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1544  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1545    if (Restores[i].index == index && Restores[i].vreg)
1546      Restores[i].index = -1;
1547}
1548
1549/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1550/// spilled and create empty intervals for their uses.
1551void
1552LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1553                                    const TargetRegisterClass* rc,
1554                                    std::vector<LiveInterval*> &NewLIs) {
1555  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1556         re = mri_->reg_end(); ri != re; ) {
1557    MachineOperand &O = ri.getOperand();
1558    MachineInstr *MI = &*ri;
1559    ++ri;
1560    if (O.isDef()) {
1561      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1562             "Register def was not rewritten?");
1563      RemoveMachineInstrFromMaps(MI);
1564      vrm.RemoveMachineInstrFromMaps(MI);
1565      MI->eraseFromParent();
1566    } else {
1567      // This must be an use of an implicit_def so it's not part of the live
1568      // interval. Create a new empty live interval for it.
1569      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1570      unsigned NewVReg = mri_->createVirtualRegister(rc);
1571      vrm.grow();
1572      vrm.setIsImplicitlyDefined(NewVReg);
1573      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1574      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1575        MachineOperand &MO = MI->getOperand(i);
1576        if (MO.isReg() && MO.getReg() == li.reg)
1577          MO.setReg(NewVReg);
1578      }
1579    }
1580  }
1581}
1582
1583
1584std::vector<LiveInterval*> LiveIntervals::
1585addIntervalsForSpills(const LiveInterval &li,
1586                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1587                      float &SSWeight) {
1588  assert(li.weight != HUGE_VALF &&
1589         "attempt to spill already spilled interval!");
1590
1591  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1592  li.print(DOUT, tri_);
1593  DOUT << '\n';
1594
1595  // Spill slot weight.
1596  SSWeight = 0.0f;
1597
1598  // Each bit specify whether it a spill is required in the MBB.
1599  BitVector SpillMBBs(mf_->getNumBlockIDs());
1600  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1601  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1602  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1603  std::map<unsigned,unsigned> MBBVRegsMap;
1604  std::vector<LiveInterval*> NewLIs;
1605  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1606
1607  unsigned NumValNums = li.getNumValNums();
1608  SmallVector<MachineInstr*, 4> ReMatDefs;
1609  ReMatDefs.resize(NumValNums, NULL);
1610  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1611  ReMatOrigDefs.resize(NumValNums, NULL);
1612  SmallVector<int, 4> ReMatIds;
1613  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1614  BitVector ReMatDelete(NumValNums);
1615  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1616
1617  // Spilling a split live interval. It cannot be split any further. Also,
1618  // it's also guaranteed to be a single val# / range interval.
1619  if (vrm.getPreSplitReg(li.reg)) {
1620    vrm.setIsSplitFromReg(li.reg, 0);
1621    // Unset the split kill marker on the last use.
1622    unsigned KillIdx = vrm.getKillPoint(li.reg);
1623    if (KillIdx) {
1624      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1625      assert(KillMI && "Last use disappeared?");
1626      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1627      assert(KillOp != -1 && "Last use disappeared?");
1628      KillMI->getOperand(KillOp).setIsKill(false);
1629    }
1630    vrm.removeKillPoint(li.reg);
1631    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1632    Slot = vrm.getStackSlot(li.reg);
1633    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1634    MachineInstr *ReMatDefMI = DefIsReMat ?
1635      vrm.getReMaterializedMI(li.reg) : NULL;
1636    int LdSlot = 0;
1637    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1638    bool isLoad = isLoadSS ||
1639      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1640    bool IsFirstRange = true;
1641    for (LiveInterval::Ranges::const_iterator
1642           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1643      // If this is a split live interval with multiple ranges, it means there
1644      // are two-address instructions that re-defined the value. Only the
1645      // first def can be rematerialized!
1646      if (IsFirstRange) {
1647        // Note ReMatOrigDefMI has already been deleted.
1648        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1649                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1650                             false, vrm, rc, ReMatIds, loopInfo,
1651                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1652                             MBBVRegsMap, NewLIs, SSWeight);
1653      } else {
1654        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1655                             Slot, 0, false, false, false,
1656                             false, vrm, rc, ReMatIds, loopInfo,
1657                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1658                             MBBVRegsMap, NewLIs, SSWeight);
1659      }
1660      IsFirstRange = false;
1661    }
1662
1663    SSWeight = 0.0f;  // Already accounted for when split.
1664    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1665    return NewLIs;
1666  }
1667
1668  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1669  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1670    TrySplit = false;
1671  if (TrySplit)
1672    ++numSplits;
1673  bool NeedStackSlot = false;
1674  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1675       i != e; ++i) {
1676    const VNInfo *VNI = *i;
1677    unsigned VN = VNI->id;
1678    unsigned DefIdx = VNI->def;
1679    if (DefIdx == ~1U)
1680      continue; // Dead val#.
1681    // Is the def for the val# rematerializable?
1682    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1683      ? 0 : getInstructionFromIndex(DefIdx);
1684    bool dummy;
1685    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1686      // Remember how to remat the def of this val#.
1687      ReMatOrigDefs[VN] = ReMatDefMI;
1688      // Original def may be modified so we have to make a copy here.
1689      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1690      ClonedMIs.push_back(Clone);
1691      ReMatDefs[VN] = Clone;
1692
1693      bool CanDelete = true;
1694      if (VNI->hasPHIKill) {
1695        // A kill is a phi node, not all of its uses can be rematerialized.
1696        // It must not be deleted.
1697        CanDelete = false;
1698        // Need a stack slot if there is any live range where uses cannot be
1699        // rematerialized.
1700        NeedStackSlot = true;
1701      }
1702      if (CanDelete)
1703        ReMatDelete.set(VN);
1704    } else {
1705      // Need a stack slot if there is any live range where uses cannot be
1706      // rematerialized.
1707      NeedStackSlot = true;
1708    }
1709  }
1710
1711  // One stack slot per live interval.
1712  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1713    Slot = vrm.assignVirt2StackSlot(li.reg);
1714
1715  // Create new intervals and rewrite defs and uses.
1716  for (LiveInterval::Ranges::const_iterator
1717         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1719    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1720    bool DefIsReMat = ReMatDefMI != NULL;
1721    bool CanDelete = ReMatDelete[I->valno->id];
1722    int LdSlot = 0;
1723    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1724    bool isLoad = isLoadSS ||
1725      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1726    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1727                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1728                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1729                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1730                               MBBVRegsMap, NewLIs, SSWeight);
1731  }
1732
1733  // Insert spills / restores if we are splitting.
1734  if (!TrySplit) {
1735    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1736    return NewLIs;
1737  }
1738
1739  SmallPtrSet<LiveInterval*, 4> AddedKill;
1740  SmallVector<unsigned, 2> Ops;
1741  if (NeedStackSlot) {
1742    int Id = SpillMBBs.find_first();
1743    while (Id != -1) {
1744      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1745      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1746      std::vector<SRInfo> &spills = SpillIdxes[Id];
1747      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1748        int index = spills[i].index;
1749        unsigned VReg = spills[i].vreg;
1750        LiveInterval &nI = getOrCreateInterval(VReg);
1751        bool isReMat = vrm.isReMaterialized(VReg);
1752        MachineInstr *MI = getInstructionFromIndex(index);
1753        bool CanFold = false;
1754        bool FoundUse = false;
1755        Ops.clear();
1756        if (spills[i].canFold) {
1757          CanFold = true;
1758          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1759            MachineOperand &MO = MI->getOperand(j);
1760            if (!MO.isRegister() || MO.getReg() != VReg)
1761              continue;
1762
1763            Ops.push_back(j);
1764            if (MO.isDef())
1765              continue;
1766            if (isReMat ||
1767                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1768                                                RestoreMBBs, RestoreIdxes))) {
1769              // MI has two-address uses of the same register. If the use
1770              // isn't the first and only use in the BB, then we can't fold
1771              // it. FIXME: Move this to rewriteInstructionsForSpills.
1772              CanFold = false;
1773              break;
1774            }
1775            FoundUse = true;
1776          }
1777        }
1778        // Fold the store into the def if possible.
1779        bool Folded = false;
1780        if (CanFold && !Ops.empty()) {
1781          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1782            Folded = true;
1783            if (FoundUse > 0) {
1784              // Also folded uses, do not issue a load.
1785              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1786              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1787            }
1788            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1789          }
1790        }
1791
1792        // Otherwise tell the spiller to issue a spill.
1793        if (!Folded) {
1794          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1795          bool isKill = LR->end == getStoreIndex(index);
1796          if (!MI->registerDefIsDead(nI.reg))
1797            // No need to spill a dead def.
1798            vrm.addSpillPoint(VReg, isKill, MI);
1799          if (isKill)
1800            AddedKill.insert(&nI);
1801        }
1802
1803        // Update spill slot weight.
1804        if (!isReMat)
1805          SSWeight += getSpillWeight(true, false, loopDepth);
1806      }
1807      Id = SpillMBBs.find_next(Id);
1808    }
1809  }
1810
1811  int Id = RestoreMBBs.find_first();
1812  while (Id != -1) {
1813    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1814    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1815
1816    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1817    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1818      int index = restores[i].index;
1819      if (index == -1)
1820        continue;
1821      unsigned VReg = restores[i].vreg;
1822      LiveInterval &nI = getOrCreateInterval(VReg);
1823      bool isReMat = vrm.isReMaterialized(VReg);
1824      MachineInstr *MI = getInstructionFromIndex(index);
1825      bool CanFold = false;
1826      Ops.clear();
1827      if (restores[i].canFold) {
1828        CanFold = true;
1829        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1830          MachineOperand &MO = MI->getOperand(j);
1831          if (!MO.isRegister() || MO.getReg() != VReg)
1832            continue;
1833
1834          if (MO.isDef()) {
1835            // If this restore were to be folded, it would have been folded
1836            // already.
1837            CanFold = false;
1838            break;
1839          }
1840          Ops.push_back(j);
1841        }
1842      }
1843
1844      // Fold the load into the use if possible.
1845      bool Folded = false;
1846      if (CanFold && !Ops.empty()) {
1847        if (!isReMat)
1848          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1849        else {
1850          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1851          int LdSlot = 0;
1852          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1853          // If the rematerializable def is a load, also try to fold it.
1854          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1855            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1856                                          Ops, isLoadSS, LdSlot, VReg);
1857          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1858          if (ImpUse) {
1859            // Re-matting an instruction with virtual register use. Add the
1860            // register as an implicit use on the use MI and update the register
1861            // interval's spill weight to HUGE_VALF to prevent it from being
1862            // spilled.
1863            LiveInterval &ImpLi = getInterval(ImpUse);
1864            ImpLi.weight = HUGE_VALF;
1865            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1866          }
1867        }
1868      }
1869      // If folding is not possible / failed, then tell the spiller to issue a
1870      // load / rematerialization for us.
1871      if (Folded)
1872        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1873      else
1874        vrm.addRestorePoint(VReg, MI);
1875
1876      // Update spill slot weight.
1877      if (!isReMat)
1878        SSWeight += getSpillWeight(false, true, loopDepth);
1879    }
1880    Id = RestoreMBBs.find_next(Id);
1881  }
1882
1883  // Finalize intervals: add kills, finalize spill weights, and filter out
1884  // dead intervals.
1885  std::vector<LiveInterval*> RetNewLIs;
1886  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1887    LiveInterval *LI = NewLIs[i];
1888    if (!LI->empty()) {
1889      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1890      if (!AddedKill.count(LI)) {
1891        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1892        unsigned LastUseIdx = getBaseIndex(LR->end);
1893        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1894        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1895        assert(UseIdx != -1);
1896        if (LastUse->getOperand(UseIdx).isImplicit() ||
1897            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1898          LastUse->getOperand(UseIdx).setIsKill();
1899          vrm.addKillPoint(LI->reg, LastUseIdx);
1900        }
1901      }
1902      RetNewLIs.push_back(LI);
1903    }
1904  }
1905
1906  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1907  return RetNewLIs;
1908}
1909
1910/// hasAllocatableSuperReg - Return true if the specified physical register has
1911/// any super register that's allocatable.
1912bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1913  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1914    if (allocatableRegs_[*AS] && hasInterval(*AS))
1915      return true;
1916  return false;
1917}
1918
1919/// getRepresentativeReg - Find the largest super register of the specified
1920/// physical register.
1921unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1922  // Find the largest super-register that is allocatable.
1923  unsigned BestReg = Reg;
1924  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1925    unsigned SuperReg = *AS;
1926    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1927      BestReg = SuperReg;
1928      break;
1929    }
1930  }
1931  return BestReg;
1932}
1933
1934/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1935/// specified interval that conflicts with the specified physical register.
1936unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1937                                                   unsigned PhysReg) const {
1938  unsigned NumConflicts = 0;
1939  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1940  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1941         E = mri_->reg_end(); I != E; ++I) {
1942    MachineOperand &O = I.getOperand();
1943    MachineInstr *MI = O.getParent();
1944    unsigned Index = getInstructionIndex(MI);
1945    if (pli.liveAt(Index))
1946      ++NumConflicts;
1947  }
1948  return NumConflicts;
1949}
1950
1951/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1952/// around all defs and uses of the specified interval.
1953void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1954                                            unsigned PhysReg, VirtRegMap &vrm) {
1955  unsigned SpillReg = getRepresentativeReg(PhysReg);
1956
1957  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1958    // If there are registers which alias PhysReg, but which are not a
1959    // sub-register of the chosen representative super register. Assert
1960    // since we can't handle it yet.
1961    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1962           tri_->isSuperRegister(*AS, SpillReg));
1963
1964  LiveInterval &pli = getInterval(SpillReg);
1965  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1966  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1967         E = mri_->reg_end(); I != E; ++I) {
1968    MachineOperand &O = I.getOperand();
1969    MachineInstr *MI = O.getParent();
1970    if (SeenMIs.count(MI))
1971      continue;
1972    SeenMIs.insert(MI);
1973    unsigned Index = getInstructionIndex(MI);
1974    if (pli.liveAt(Index)) {
1975      vrm.addEmergencySpill(SpillReg, MI);
1976      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1977      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1978        if (!hasInterval(*AS))
1979          continue;
1980        LiveInterval &spli = getInterval(*AS);
1981        if (spli.liveAt(Index))
1982          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1983      }
1984    }
1985  }
1986}
1987
1988LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1989                                                   MachineInstr* startInst) {
1990  LiveInterval& Interval = getOrCreateInterval(reg);
1991  VNInfo* VN = Interval.getNextValue(
1992            getInstructionIndex(startInst) + InstrSlots::DEF,
1993            startInst, getVNInfoAllocator());
1994  VN->hasPHIKill = true;
1995  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1996  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1997               getMBBEndIdx(startInst->getParent()) + 1, VN);
1998  Interval.addRange(LR);
1999
2000  return LR;
2001}
2002