LiveIntervalAnalysis.cpp revision 3fe23ede7821a156ed0914b0ba11c3e4e644c73d
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52STATISTIC(numIntervals, "Number of original intervals");
53STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits   , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addRequired<AliasAnalysis>();
62  AU.addPreserved<AliasAnalysis>();
63  AU.addPreserved<LiveVariables>();
64  AU.addRequired<LiveVariables>();
65  AU.addPreservedID(MachineLoopInfoID);
66  AU.addPreservedID(MachineDominatorsID);
67  AU.addPreservedID(PHIEliminationID);
68  AU.addRequiredID(PHIEliminationID);
69  AU.addRequiredID(TwoAddressInstructionPassID);
70  MachineFunctionPass::getAnalysisUsage(AU);
71}
72
73void LiveIntervals::releaseMemory() {
74  MBB2IdxMap.clear();
75  Idx2MBBMap.clear();
76  mi2iMap_.clear();
77  i2miMap_.clear();
78  r2iMap_.clear();
79  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80  VNInfoAllocator.Reset();
81  while (!ClonedMIs.empty()) {
82    MachineInstr *MI = ClonedMIs.back();
83    ClonedMIs.pop_back();
84    mf_->DeleteMachineInstr(MI);
85  }
86}
87
88void LiveIntervals::computeNumbering() {
89  Index2MiMap OldI2MI = i2miMap_;
90  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
91
92  Idx2MBBMap.clear();
93  MBB2IdxMap.clear();
94  mi2iMap_.clear();
95  i2miMap_.clear();
96
97  FunctionSize = 0;
98
99  // Number MachineInstrs and MachineBasicBlocks.
100  // Initialize MBB indexes to a sentinal.
101  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
102
103  unsigned MIIndex = 0;
104  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
105       MBB != E; ++MBB) {
106    unsigned StartIdx = MIIndex;
107
108    // Insert an empty slot at the beginning of each block.
109    MIIndex += InstrSlots::NUM;
110    i2miMap_.push_back(0);
111
112    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
113         I != E; ++I) {
114      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115      assert(inserted && "multiple MachineInstr -> index mappings");
116      i2miMap_.push_back(I);
117      MIIndex += InstrSlots::NUM;
118      FunctionSize++;
119
120      // Insert an empty slot after every instruction.
121      MIIndex += InstrSlots::NUM;
122      i2miMap_.push_back(0);
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  if (!OldI2MI.empty())
132    for (iterator OI = begin(), OE = end(); OI != OE; ++OI)
133      for (LiveInterval::iterator LI = OI->second.begin(),
134           LE = OI->second.end(); LI != LE; ++LI) {
135
136        // Remap the start index of the live range to the corresponding new
137        // number, or our best guess at what it _should_ correspond to if the
138        // original instruction has been erased.  This is either the following
139        // instruction or its predecessor.
140        unsigned index = LI->start / InstrSlots::NUM;
141        unsigned offset = LI->start % InstrSlots::NUM;
142        if (offset == InstrSlots::LOAD) {
143          std::vector<IdxMBBPair>::const_iterator I =
144                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145          // Take the pair containing the index
146          std::vector<IdxMBBPair>::const_iterator J =
147                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
148
149          LI->start = getMBBStartIdx(J->second);
150        } else {
151          LI->start = mi2iMap_[OldI2MI[index]] + offset;
152        }
153
154        // Remap the ending index in the same way that we remapped the start,
155        // except for the final step where we always map to the immediately
156        // following instruction.
157        index = (LI->end - 1) / InstrSlots::NUM;
158        offset  = LI->end % InstrSlots::NUM;
159        if (offset == InstrSlots::USE) {
160          std::vector<IdxMBBPair>::const_iterator I =
161                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
162          // Take the pair containing the index
163          std::vector<IdxMBBPair>::const_iterator J =
164                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165
166          LI->end = getMBBEndIdx(J->second) + 1;
167        } else {
168          unsigned idx = index;
169          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
170
171          if (index != OldI2MI.size())
172            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
173          else
174            LI->end = InstrSlots::NUM * i2miMap_.size();
175        }
176
177        // Remap the VNInfo def index, which works the same as the
178        // start indices above.
179        VNInfo* vni = LI->valno;
180        index = vni->def / InstrSlots::NUM;
181        offset = vni->def % InstrSlots::NUM;
182        if (offset == InstrSlots::LOAD) {
183          std::vector<IdxMBBPair>::const_iterator I =
184                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
185          // Take the pair containing the index
186          std::vector<IdxMBBPair>::const_iterator J =
187                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
188
189          vni->def = getMBBStartIdx(J->second);
190
191        } else {
192          vni->def = mi2iMap_[OldI2MI[index]] + offset;
193        }
194
195        // Remap the VNInfo kill indices, which works the same as
196        // the end indices above.
197        for (size_t i = 0; i < vni->kills.size(); ++i) {
198          index = (vni->kills[i]-1) / InstrSlots::NUM;
199          offset = vni->kills[i] % InstrSlots::NUM;
200          if (offset == InstrSlots::USE) {
201            std::vector<IdxMBBPair>::const_iterator I =
202             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
203            // Take the pair containing the index
204            std::vector<IdxMBBPair>::const_iterator J =
205                      (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
206
207            vni->kills[i] = getMBBEndIdx(J->second) + 1;
208          } else {
209            unsigned idx = index;
210            while (!OldI2MI[index]) ++index;
211            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
212
213            if (index != OldI2MI.size())
214              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
215                              (idx == index ? offset : 0);
216            else
217              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
218          }
219        }
220      }
221}
222
223/// runOnMachineFunction - Register allocate the whole function
224///
225bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
226  mf_ = &fn;
227  mri_ = &mf_->getRegInfo();
228  tm_ = &fn.getTarget();
229  tri_ = tm_->getRegisterInfo();
230  tii_ = tm_->getInstrInfo();
231  aa_ = &getAnalysis<AliasAnalysis>();
232  lv_ = &getAnalysis<LiveVariables>();
233  allocatableRegs_ = tri_->getAllocatableSet(fn);
234
235  computeNumbering();
236  computeIntervals();
237
238  numIntervals += getNumIntervals();
239
240  DOUT << "********** INTERVALS **********\n";
241  for (iterator I = begin(), E = end(); I != E; ++I) {
242    I->second.print(DOUT, tri_);
243    DOUT << "\n";
244  }
245
246  numIntervalsAfter += getNumIntervals();
247  DEBUG(dump());
248  return true;
249}
250
251/// print - Implement the dump method.
252void LiveIntervals::print(std::ostream &O, const Module* ) const {
253  O << "********** INTERVALS **********\n";
254  for (const_iterator I = begin(), E = end(); I != E; ++I) {
255    I->second.print(O, tri_);
256    O << "\n";
257  }
258
259  O << "********** MACHINEINSTRS **********\n";
260  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
261       mbbi != mbbe; ++mbbi) {
262    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
263    for (MachineBasicBlock::iterator mii = mbbi->begin(),
264           mie = mbbi->end(); mii != mie; ++mii) {
265      O << getInstructionIndex(mii) << '\t' << *mii;
266    }
267  }
268}
269
270/// conflictsWithPhysRegDef - Returns true if the specified register
271/// is defined during the duration of the specified interval.
272bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
273                                            VirtRegMap &vrm, unsigned reg) {
274  for (LiveInterval::Ranges::const_iterator
275         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
276    for (unsigned index = getBaseIndex(I->start),
277           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
278         index += InstrSlots::NUM) {
279      // skip deleted instructions
280      while (index != end && !getInstructionFromIndex(index))
281        index += InstrSlots::NUM;
282      if (index == end) break;
283
284      MachineInstr *MI = getInstructionFromIndex(index);
285      unsigned SrcReg, DstReg;
286      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
287        if (SrcReg == li.reg || DstReg == li.reg)
288          continue;
289      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
290        MachineOperand& mop = MI->getOperand(i);
291        if (!mop.isRegister())
292          continue;
293        unsigned PhysReg = mop.getReg();
294        if (PhysReg == 0 || PhysReg == li.reg)
295          continue;
296        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
297          if (!vrm.hasPhys(PhysReg))
298            continue;
299          PhysReg = vrm.getPhys(PhysReg);
300        }
301        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
302          return true;
303      }
304    }
305  }
306
307  return false;
308}
309
310void LiveIntervals::printRegName(unsigned reg) const {
311  if (TargetRegisterInfo::isPhysicalRegister(reg))
312    cerr << tri_->getName(reg);
313  else
314    cerr << "%reg" << reg;
315}
316
317void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
318                                             MachineBasicBlock::iterator mi,
319                                             unsigned MIIdx, MachineOperand& MO,
320                                             unsigned MOIdx,
321                                             LiveInterval &interval) {
322  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
323  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
324
325  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
326    DOUT << "is a implicit_def\n";
327    return;
328  }
329
330  // Virtual registers may be defined multiple times (due to phi
331  // elimination and 2-addr elimination).  Much of what we do only has to be
332  // done once for the vreg.  We use an empty interval to detect the first
333  // time we see a vreg.
334  if (interval.empty()) {
335    // Get the Idx of the defining instructions.
336    unsigned defIndex = getDefIndex(MIIdx);
337    VNInfo *ValNo;
338    MachineInstr *CopyMI = NULL;
339    unsigned SrcReg, DstReg;
340    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
341        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
342        tii_->isMoveInstr(*mi, SrcReg, DstReg))
343      CopyMI = mi;
344    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
345
346    assert(ValNo->id == 0 && "First value in interval is not 0?");
347
348    // Loop over all of the blocks that the vreg is defined in.  There are
349    // two cases we have to handle here.  The most common case is a vreg
350    // whose lifetime is contained within a basic block.  In this case there
351    // will be a single kill, in MBB, which comes after the definition.
352    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
353      // FIXME: what about dead vars?
354      unsigned killIdx;
355      if (vi.Kills[0] != mi)
356        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
357      else
358        killIdx = defIndex+1;
359
360      // If the kill happens after the definition, we have an intra-block
361      // live range.
362      if (killIdx > defIndex) {
363        assert(vi.AliveBlocks.none() &&
364               "Shouldn't be alive across any blocks!");
365        LiveRange LR(defIndex, killIdx, ValNo);
366        interval.addRange(LR);
367        DOUT << " +" << LR << "\n";
368        interval.addKill(ValNo, killIdx);
369        return;
370      }
371    }
372
373    // The other case we handle is when a virtual register lives to the end
374    // of the defining block, potentially live across some blocks, then is
375    // live into some number of blocks, but gets killed.  Start by adding a
376    // range that goes from this definition to the end of the defining block.
377    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
378    DOUT << " +" << NewLR;
379    interval.addRange(NewLR);
380
381    // Iterate over all of the blocks that the variable is completely
382    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
383    // live interval.
384    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
385      if (vi.AliveBlocks[i]) {
386        LiveRange LR(getMBBStartIdx(i),
387                     getMBBEndIdx(i)+1,  // MBB ends at -1.
388                     ValNo);
389        interval.addRange(LR);
390        DOUT << " +" << LR;
391      }
392    }
393
394    // Finally, this virtual register is live from the start of any killing
395    // block to the 'use' slot of the killing instruction.
396    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397      MachineInstr *Kill = vi.Kills[i];
398      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
399      LiveRange LR(getMBBStartIdx(Kill->getParent()),
400                   killIdx, ValNo);
401      interval.addRange(LR);
402      interval.addKill(ValNo, killIdx);
403      DOUT << " +" << LR;
404    }
405
406  } else {
407    // If this is the second time we see a virtual register definition, it
408    // must be due to phi elimination or two addr elimination.  If this is
409    // the result of two address elimination, then the vreg is one of the
410    // def-and-use register operand.
411    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
412      // If this is a two-address definition, then we have already processed
413      // the live range.  The only problem is that we didn't realize there
414      // are actually two values in the live interval.  Because of this we
415      // need to take the LiveRegion that defines this register and split it
416      // into two values.
417      assert(interval.containsOneValue());
418      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
419      unsigned RedefIndex = getDefIndex(MIIdx);
420
421      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
422      VNInfo *OldValNo = OldLR->valno;
423
424      // Delete the initial value, which should be short and continuous,
425      // because the 2-addr copy must be in the same MBB as the redef.
426      interval.removeRange(DefIndex, RedefIndex);
427
428      // Two-address vregs should always only be redefined once.  This means
429      // that at this point, there should be exactly one value number in it.
430      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
431
432      // The new value number (#1) is defined by the instruction we claimed
433      // defined value #0.
434      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
435                                            VNInfoAllocator);
436
437      // Value#0 is now defined by the 2-addr instruction.
438      OldValNo->def  = RedefIndex;
439      OldValNo->copy = 0;
440
441      // Add the new live interval which replaces the range for the input copy.
442      LiveRange LR(DefIndex, RedefIndex, ValNo);
443      DOUT << " replace range with " << LR;
444      interval.addRange(LR);
445      interval.addKill(ValNo, RedefIndex);
446
447      // If this redefinition is dead, we need to add a dummy unit live
448      // range covering the def slot.
449      if (MO.isDead())
450        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
451
452      DOUT << " RESULT: ";
453      interval.print(DOUT, tri_);
454
455    } else {
456      // Otherwise, this must be because of phi elimination.  If this is the
457      // first redefinition of the vreg that we have seen, go back and change
458      // the live range in the PHI block to be a different value number.
459      if (interval.containsOneValue()) {
460        assert(vi.Kills.size() == 1 &&
461               "PHI elimination vreg should have one kill, the PHI itself!");
462
463        // Remove the old range that we now know has an incorrect number.
464        VNInfo *VNI = interval.getValNumInfo(0);
465        MachineInstr *Killer = vi.Kills[0];
466        unsigned Start = getMBBStartIdx(Killer->getParent());
467        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
468        DOUT << " Removing [" << Start << "," << End << "] from: ";
469        interval.print(DOUT, tri_); DOUT << "\n";
470        interval.removeRange(Start, End);
471        VNI->hasPHIKill = true;
472        DOUT << " RESULT: "; interval.print(DOUT, tri_);
473
474        // Replace the interval with one of a NEW value number.  Note that this
475        // value number isn't actually defined by an instruction, weird huh? :)
476        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
477        DOUT << " replace range with " << LR;
478        interval.addRange(LR);
479        interval.addKill(LR.valno, End);
480        DOUT << " RESULT: "; interval.print(DOUT, tri_);
481      }
482
483      // In the case of PHI elimination, each variable definition is only
484      // live until the end of the block.  We've already taken care of the
485      // rest of the live range.
486      unsigned defIndex = getDefIndex(MIIdx);
487
488      VNInfo *ValNo;
489      MachineInstr *CopyMI = NULL;
490      unsigned SrcReg, DstReg;
491      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
492          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
493          tii_->isMoveInstr(*mi, SrcReg, DstReg))
494        CopyMI = mi;
495      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
496
497      unsigned killIndex = getMBBEndIdx(mbb) + 1;
498      LiveRange LR(defIndex, killIndex, ValNo);
499      interval.addRange(LR);
500      interval.addKill(ValNo, killIndex);
501      ValNo->hasPHIKill = true;
502      DOUT << " +" << LR;
503    }
504  }
505
506  DOUT << '\n';
507}
508
509void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
510                                              MachineBasicBlock::iterator mi,
511                                              unsigned MIIdx,
512                                              MachineOperand& MO,
513                                              LiveInterval &interval,
514                                              MachineInstr *CopyMI) {
515  // A physical register cannot be live across basic block, so its
516  // lifetime must end somewhere in its defining basic block.
517  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
518
519  unsigned baseIndex = MIIdx;
520  unsigned start = getDefIndex(baseIndex);
521  unsigned end = start;
522
523  // If it is not used after definition, it is considered dead at
524  // the instruction defining it. Hence its interval is:
525  // [defSlot(def), defSlot(def)+1)
526  if (MO.isDead()) {
527    DOUT << " dead";
528    end = getDefIndex(start) + 1;
529    goto exit;
530  }
531
532  // If it is not dead on definition, it must be killed by a
533  // subsequent instruction. Hence its interval is:
534  // [defSlot(def), useSlot(kill)+1)
535  baseIndex += InstrSlots::NUM;
536  while (++mi != MBB->end()) {
537    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
538           getInstructionFromIndex(baseIndex) == 0)
539      baseIndex += InstrSlots::NUM;
540    if (mi->killsRegister(interval.reg, tri_)) {
541      DOUT << " killed";
542      end = getUseIndex(baseIndex) + 1;
543      goto exit;
544    } else if (mi->modifiesRegister(interval.reg, tri_)) {
545      // Another instruction redefines the register before it is ever read.
546      // Then the register is essentially dead at the instruction that defines
547      // it. Hence its interval is:
548      // [defSlot(def), defSlot(def)+1)
549      DOUT << " dead";
550      end = getDefIndex(start) + 1;
551      goto exit;
552    }
553
554    baseIndex += InstrSlots::NUM;
555  }
556
557  // The only case we should have a dead physreg here without a killing or
558  // instruction where we know it's dead is if it is live-in to the function
559  // and never used.
560  assert(!CopyMI && "physreg was not killed in defining block!");
561  end = getDefIndex(start) + 1;  // It's dead.
562
563exit:
564  assert(start < end && "did not find end of interval?");
565
566  // Already exists? Extend old live interval.
567  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
568  VNInfo *ValNo = (OldLR != interval.end())
569    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
570  LiveRange LR(start, end, ValNo);
571  interval.addRange(LR);
572  interval.addKill(LR.valno, end);
573  DOUT << " +" << LR << '\n';
574}
575
576void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
577                                      MachineBasicBlock::iterator MI,
578                                      unsigned MIIdx,
579                                      MachineOperand& MO,
580                                      unsigned MOIdx) {
581  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
582    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
583                             getOrCreateInterval(MO.getReg()));
584  else if (allocatableRegs_[MO.getReg()]) {
585    MachineInstr *CopyMI = NULL;
586    unsigned SrcReg, DstReg;
587    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
588        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
589        tii_->isMoveInstr(*MI, SrcReg, DstReg))
590      CopyMI = MI;
591    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
592                              getOrCreateInterval(MO.getReg()), CopyMI);
593    // Def of a register also defines its sub-registers.
594    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
595      // If MI also modifies the sub-register explicitly, avoid processing it
596      // more than once. Do not pass in TRI here so it checks for exact match.
597      if (!MI->modifiesRegister(*AS))
598        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
599                                  getOrCreateInterval(*AS), 0);
600  }
601}
602
603void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
604                                         unsigned MIIdx,
605                                         LiveInterval &interval, bool isAlias) {
606  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
607
608  // Look for kills, if it reaches a def before it's killed, then it shouldn't
609  // be considered a livein.
610  MachineBasicBlock::iterator mi = MBB->begin();
611  unsigned baseIndex = MIIdx;
612  unsigned start = baseIndex;
613  unsigned end = start;
614  while (mi != MBB->end()) {
615    if (mi->killsRegister(interval.reg, tri_)) {
616      DOUT << " killed";
617      end = getUseIndex(baseIndex) + 1;
618      goto exit;
619    } else if (mi->modifiesRegister(interval.reg, tri_)) {
620      // Another instruction redefines the register before it is ever read.
621      // Then the register is essentially dead at the instruction that defines
622      // it. Hence its interval is:
623      // [defSlot(def), defSlot(def)+1)
624      DOUT << " dead";
625      end = getDefIndex(start) + 1;
626      goto exit;
627    }
628
629    baseIndex += InstrSlots::NUM;
630    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
631           getInstructionFromIndex(baseIndex) == 0)
632      baseIndex += InstrSlots::NUM;
633    ++mi;
634  }
635
636exit:
637  // Live-in register might not be used at all.
638  if (end == MIIdx) {
639    if (isAlias) {
640      DOUT << " dead";
641      end = getDefIndex(MIIdx) + 1;
642    } else {
643      DOUT << " live through";
644      end = baseIndex;
645    }
646  }
647
648  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
649  interval.addRange(LR);
650  interval.addKill(LR.valno, end);
651  DOUT << " +" << LR << '\n';
652}
653
654/// computeIntervals - computes the live intervals for virtual
655/// registers. for some ordering of the machine instructions [1,N] a
656/// live interval is an interval [i, j) where 1 <= i <= j < N for
657/// which a variable is live
658void LiveIntervals::computeIntervals() {
659  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
660       << "********** Function: "
661       << ((Value*)mf_->getFunction())->getName() << '\n';
662  // Track the index of the current machine instr.
663  unsigned MIIndex = 0;
664
665  // Skip over empty initial indices.
666  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
667         getInstructionFromIndex(MIIndex) == 0)
668    MIIndex += InstrSlots::NUM;
669
670  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
671       MBBI != E; ++MBBI) {
672    MachineBasicBlock *MBB = MBBI;
673    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
674
675    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
676
677    // Create intervals for live-ins to this BB first.
678    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
679           LE = MBB->livein_end(); LI != LE; ++LI) {
680      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
681      // Multiple live-ins can alias the same register.
682      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
683        if (!hasInterval(*AS))
684          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
685                               true);
686    }
687
688    for (; MI != miEnd; ++MI) {
689      DOUT << MIIndex << "\t" << *MI;
690
691      // Handle defs.
692      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
693        MachineOperand &MO = MI->getOperand(i);
694        // handle register defs - build intervals
695        if (MO.isRegister() && MO.getReg() && MO.isDef())
696          handleRegisterDef(MBB, MI, MIIndex, MO, i);
697      }
698
699      MIIndex += InstrSlots::NUM;
700
701      // Skip over empty indices.
702      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
703             getInstructionFromIndex(MIIndex) == 0)
704        MIIndex += InstrSlots::NUM;
705    }
706  }
707}
708
709bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
710                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
711  std::vector<IdxMBBPair>::const_iterator I =
712    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
713
714  bool ResVal = false;
715  while (I != Idx2MBBMap.end()) {
716    if (LR.end <= I->first)
717      break;
718    MBBs.push_back(I->second);
719    ResVal = true;
720    ++I;
721  }
722  return ResVal;
723}
724
725
726LiveInterval LiveIntervals::createInterval(unsigned reg) {
727  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
728                       HUGE_VALF : 0.0F;
729  return LiveInterval(reg, Weight);
730}
731
732/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
733/// copy field and returns the source register that defines it.
734unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
735  if (!VNI->copy)
736    return 0;
737
738  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
739    return VNI->copy->getOperand(1).getReg();
740  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
741    return VNI->copy->getOperand(2).getReg();
742  unsigned SrcReg, DstReg;
743  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
744    return SrcReg;
745  assert(0 && "Unrecognized copy instruction!");
746  return 0;
747}
748
749//===----------------------------------------------------------------------===//
750// Register allocator hooks.
751//
752
753/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
754/// allow one) virtual register operand, then its uses are implicitly using
755/// the register. Returns the virtual register.
756unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
757                                            MachineInstr *MI) const {
758  unsigned RegOp = 0;
759  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
760    MachineOperand &MO = MI->getOperand(i);
761    if (!MO.isRegister() || !MO.isUse())
762      continue;
763    unsigned Reg = MO.getReg();
764    if (Reg == 0 || Reg == li.reg)
765      continue;
766    // FIXME: For now, only remat MI with at most one register operand.
767    assert(!RegOp &&
768           "Can't rematerialize instruction with multiple register operand!");
769    RegOp = MO.getReg();
770#ifndef NDEBUG
771    break;
772#endif
773  }
774  return RegOp;
775}
776
777/// isValNoAvailableAt - Return true if the val# of the specified interval
778/// which reaches the given instruction also reaches the specified use index.
779bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
780                                       unsigned UseIdx) const {
781  unsigned Index = getInstructionIndex(MI);
782  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
783  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
784  return UI != li.end() && UI->valno == ValNo;
785}
786
787/// isReMaterializable - Returns true if the definition MI of the specified
788/// val# of the specified interval is re-materializable.
789bool LiveIntervals::isReMaterializable(const LiveInterval &li,
790                                       const VNInfo *ValNo, MachineInstr *MI,
791                                       bool &isLoad) {
792  if (DisableReMat)
793    return false;
794
795  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
796    return true;
797
798  int FrameIdx = 0;
799  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
800      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
801    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
802    // this but remember this is not safe to fold into a two-address
803    // instruction.
804    // This is a load from fixed stack slot. It can be rematerialized.
805    return true;
806
807  // If the target-specific rules don't identify an instruction as
808  // being trivially rematerializable, use some target-independent
809  // rules.
810  if (!MI->getDesc().isRematerializable() ||
811      !tii_->isTriviallyReMaterializable(MI)) {
812    if (!EnableAggressiveRemat)
813      return false;
814
815    // If the instruction accesses memory but the memoperands have been lost,
816    // we can't analyze it.
817    const TargetInstrDesc &TID = MI->getDesc();
818    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
819      return false;
820
821    // Avoid instructions obviously unsafe for remat.
822    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
823      return false;
824
825    // If the instruction accesses memory and the memory could be non-constant,
826    // assume the instruction is not rematerializable.
827    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
828         E = MI->memoperands_end(); I != E; ++I) {
829      const MachineMemOperand &MMO = *I;
830      if (MMO.isVolatile() || MMO.isStore())
831        return false;
832      const Value *V = MMO.getValue();
833      if (!V)
834        return false;
835      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
836        if (!PSV->isConstant(mf_->getFrameInfo()))
837          return false;
838      } else if (!aa_->pointsToConstantMemory(V))
839        return false;
840    }
841
842    // If any of the registers accessed are non-constant, conservatively assume
843    // the instruction is not rematerializable.
844    unsigned ImpUse = 0;
845    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
846      const MachineOperand &MO = MI->getOperand(i);
847      if (MO.isReg()) {
848        unsigned Reg = MO.getReg();
849        if (Reg == 0)
850          continue;
851        if (TargetRegisterInfo::isPhysicalRegister(Reg))
852          return false;
853
854        // Only allow one def, and that in the first operand.
855        if (MO.isDef() != (i == 0))
856          return false;
857
858        // Only allow constant-valued registers.
859        bool IsLiveIn = mri_->isLiveIn(Reg);
860        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
861                                          E = mri_->def_end();
862
863        // For the def, it should be the only def.
864        if (MO.isDef() && (next(I) != E || IsLiveIn))
865          return false;
866
867        if (MO.isUse()) {
868          // Only allow one use other register use, as that's all the
869          // remat mechanisms support currently.
870          if (Reg != li.reg) {
871            if (ImpUse == 0)
872              ImpUse = Reg;
873            else if (Reg != ImpUse)
874              return false;
875          }
876          // For uses, there should be only one associate def.
877          if (I != E && (next(I) != E || IsLiveIn))
878            return false;
879        }
880      }
881    }
882  }
883
884  unsigned ImpUse = getReMatImplicitUse(li, MI);
885  if (ImpUse) {
886    const LiveInterval &ImpLi = getInterval(ImpUse);
887    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
888           re = mri_->use_end(); ri != re; ++ri) {
889      MachineInstr *UseMI = &*ri;
890      unsigned UseIdx = getInstructionIndex(UseMI);
891      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
892        continue;
893      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
894        return false;
895    }
896  }
897  return true;
898}
899
900/// isReMaterializable - Returns true if every definition of MI of every
901/// val# of the specified interval is re-materializable.
902bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
903  isLoad = false;
904  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
905       i != e; ++i) {
906    const VNInfo *VNI = *i;
907    unsigned DefIdx = VNI->def;
908    if (DefIdx == ~1U)
909      continue; // Dead val#.
910    // Is the def for the val# rematerializable?
911    if (DefIdx == ~0u)
912      return false;
913    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
914    bool DefIsLoad = false;
915    if (!ReMatDefMI ||
916        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
917      return false;
918    isLoad |= DefIsLoad;
919  }
920  return true;
921}
922
923/// FilterFoldedOps - Filter out two-address use operands. Return
924/// true if it finds any issue with the operands that ought to prevent
925/// folding.
926static bool FilterFoldedOps(MachineInstr *MI,
927                            SmallVector<unsigned, 2> &Ops,
928                            unsigned &MRInfo,
929                            SmallVector<unsigned, 2> &FoldOps) {
930  const TargetInstrDesc &TID = MI->getDesc();
931
932  MRInfo = 0;
933  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
934    unsigned OpIdx = Ops[i];
935    MachineOperand &MO = MI->getOperand(OpIdx);
936    // FIXME: fold subreg use.
937    if (MO.getSubReg())
938      return true;
939    if (MO.isDef())
940      MRInfo |= (unsigned)VirtRegMap::isMod;
941    else {
942      // Filter out two-address use operand(s).
943      if (!MO.isImplicit() &&
944          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
945        MRInfo = VirtRegMap::isModRef;
946        continue;
947      }
948      MRInfo |= (unsigned)VirtRegMap::isRef;
949    }
950    FoldOps.push_back(OpIdx);
951  }
952  return false;
953}
954
955
956/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
957/// slot / to reg or any rematerialized load into ith operand of specified
958/// MI. If it is successul, MI is updated with the newly created MI and
959/// returns true.
960bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
961                                         VirtRegMap &vrm, MachineInstr *DefMI,
962                                         unsigned InstrIdx,
963                                         SmallVector<unsigned, 2> &Ops,
964                                         bool isSS, int Slot, unsigned Reg) {
965  // If it is an implicit def instruction, just delete it.
966  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
967    RemoveMachineInstrFromMaps(MI);
968    vrm.RemoveMachineInstrFromMaps(MI);
969    MI->eraseFromParent();
970    ++numFolds;
971    return true;
972  }
973
974  // Filter the list of operand indexes that are to be folded. Abort if
975  // any operand will prevent folding.
976  unsigned MRInfo = 0;
977  SmallVector<unsigned, 2> FoldOps;
978  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
979    return false;
980
981  // The only time it's safe to fold into a two address instruction is when
982  // it's folding reload and spill from / into a spill stack slot.
983  if (DefMI && (MRInfo & VirtRegMap::isMod))
984    return false;
985
986  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
987                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
988  if (fmi) {
989    // Remember this instruction uses the spill slot.
990    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
991
992    // Attempt to fold the memory reference into the instruction. If
993    // we can do this, we don't need to insert spill code.
994    MachineBasicBlock &MBB = *MI->getParent();
995    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
996      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
997    vrm.transferSpillPts(MI, fmi);
998    vrm.transferRestorePts(MI, fmi);
999    vrm.transferEmergencySpills(MI, fmi);
1000    mi2iMap_.erase(MI);
1001    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1002    mi2iMap_[fmi] = InstrIdx;
1003    MI = MBB.insert(MBB.erase(MI), fmi);
1004    ++numFolds;
1005    return true;
1006  }
1007  return false;
1008}
1009
1010/// canFoldMemoryOperand - Returns true if the specified load / store
1011/// folding is possible.
1012bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1013                                         SmallVector<unsigned, 2> &Ops,
1014                                         bool ReMat) const {
1015  // Filter the list of operand indexes that are to be folded. Abort if
1016  // any operand will prevent folding.
1017  unsigned MRInfo = 0;
1018  SmallVector<unsigned, 2> FoldOps;
1019  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1020    return false;
1021
1022  // It's only legal to remat for a use, not a def.
1023  if (ReMat && (MRInfo & VirtRegMap::isMod))
1024    return false;
1025
1026  return tii_->canFoldMemoryOperand(MI, FoldOps);
1027}
1028
1029bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1030  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1031  for (LiveInterval::Ranges::const_iterator
1032         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1033    std::vector<IdxMBBPair>::const_iterator II =
1034      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1035    if (II == Idx2MBBMap.end())
1036      continue;
1037    if (I->end > II->first)  // crossing a MBB.
1038      return false;
1039    MBBs.insert(II->second);
1040    if (MBBs.size() > 1)
1041      return false;
1042  }
1043  return true;
1044}
1045
1046/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1047/// interval on to-be re-materialized operands of MI) with new register.
1048void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1049                                       MachineInstr *MI, unsigned NewVReg,
1050                                       VirtRegMap &vrm) {
1051  // There is an implicit use. That means one of the other operand is
1052  // being remat'ed and the remat'ed instruction has li.reg as an
1053  // use operand. Make sure we rewrite that as well.
1054  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1055    MachineOperand &MO = MI->getOperand(i);
1056    if (!MO.isRegister())
1057      continue;
1058    unsigned Reg = MO.getReg();
1059    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1060      continue;
1061    if (!vrm.isReMaterialized(Reg))
1062      continue;
1063    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1064    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1065    if (UseMO)
1066      UseMO->setReg(NewVReg);
1067  }
1068}
1069
1070/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1071/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1072bool LiveIntervals::
1073rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1074                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1075                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1076                 unsigned Slot, int LdSlot,
1077                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1078                 VirtRegMap &vrm,
1079                 const TargetRegisterClass* rc,
1080                 SmallVector<int, 4> &ReMatIds,
1081                 const MachineLoopInfo *loopInfo,
1082                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1083                 std::map<unsigned,unsigned> &MBBVRegsMap,
1084                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1085  MachineBasicBlock *MBB = MI->getParent();
1086  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1087  bool CanFold = false;
1088 RestartInstruction:
1089  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1090    MachineOperand& mop = MI->getOperand(i);
1091    if (!mop.isRegister())
1092      continue;
1093    unsigned Reg = mop.getReg();
1094    unsigned RegI = Reg;
1095    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1096      continue;
1097    if (Reg != li.reg)
1098      continue;
1099
1100    bool TryFold = !DefIsReMat;
1101    bool FoldSS = true; // Default behavior unless it's a remat.
1102    int FoldSlot = Slot;
1103    if (DefIsReMat) {
1104      // If this is the rematerializable definition MI itself and
1105      // all of its uses are rematerialized, simply delete it.
1106      if (MI == ReMatOrigDefMI && CanDelete) {
1107        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1108        DOUT << MI << '\n';
1109        RemoveMachineInstrFromMaps(MI);
1110        vrm.RemoveMachineInstrFromMaps(MI);
1111        MI->eraseFromParent();
1112        break;
1113      }
1114
1115      // If def for this use can't be rematerialized, then try folding.
1116      // If def is rematerializable and it's a load, also try folding.
1117      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1118      if (isLoad) {
1119        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1120        FoldSS = isLoadSS;
1121        FoldSlot = LdSlot;
1122      }
1123    }
1124
1125    // Scan all of the operands of this instruction rewriting operands
1126    // to use NewVReg instead of li.reg as appropriate.  We do this for
1127    // two reasons:
1128    //
1129    //   1. If the instr reads the same spilled vreg multiple times, we
1130    //      want to reuse the NewVReg.
1131    //   2. If the instr is a two-addr instruction, we are required to
1132    //      keep the src/dst regs pinned.
1133    //
1134    // Keep track of whether we replace a use and/or def so that we can
1135    // create the spill interval with the appropriate range.
1136
1137    HasUse = mop.isUse();
1138    HasDef = mop.isDef();
1139    SmallVector<unsigned, 2> Ops;
1140    Ops.push_back(i);
1141    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1142      const MachineOperand &MOj = MI->getOperand(j);
1143      if (!MOj.isRegister())
1144        continue;
1145      unsigned RegJ = MOj.getReg();
1146      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1147        continue;
1148      if (RegJ == RegI) {
1149        Ops.push_back(j);
1150        HasUse |= MOj.isUse();
1151        HasDef |= MOj.isDef();
1152      }
1153    }
1154
1155    if (HasUse && !li.liveAt(getUseIndex(index)))
1156      // Must be defined by an implicit def. It should not be spilled. Note,
1157      // this is for correctness reason. e.g.
1158      // 8   %reg1024<def> = IMPLICIT_DEF
1159      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1160      // The live range [12, 14) are not part of the r1024 live interval since
1161      // it's defined by an implicit def. It will not conflicts with live
1162      // interval of r1025. Now suppose both registers are spilled, you can
1163      // easily see a situation where both registers are reloaded before
1164      // the INSERT_SUBREG and both target registers that would overlap.
1165      HasUse = false;
1166
1167    // Update stack slot spill weight if we are splitting.
1168    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1169      if (!TrySplit)
1170      SSWeight += Weight;
1171
1172    if (!TryFold)
1173      CanFold = false;
1174    else {
1175      // Do not fold load / store here if we are splitting. We'll find an
1176      // optimal point to insert a load / store later.
1177      if (!TrySplit) {
1178        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1179                                 Ops, FoldSS, FoldSlot, Reg)) {
1180          // Folding the load/store can completely change the instruction in
1181          // unpredictable ways, rescan it from the beginning.
1182          HasUse = false;
1183          HasDef = false;
1184          CanFold = false;
1185          if (isRemoved(MI)) {
1186            SSWeight -= Weight;
1187            break;
1188          }
1189          goto RestartInstruction;
1190        }
1191      } else {
1192        // We'll try to fold it later if it's profitable.
1193        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1194      }
1195    }
1196
1197    // Create a new virtual register for the spill interval.
1198    bool CreatedNewVReg = false;
1199    if (NewVReg == 0) {
1200      NewVReg = mri_->createVirtualRegister(rc);
1201      vrm.grow();
1202      CreatedNewVReg = true;
1203    }
1204    mop.setReg(NewVReg);
1205    if (mop.isImplicit())
1206      rewriteImplicitOps(li, MI, NewVReg, vrm);
1207
1208    // Reuse NewVReg for other reads.
1209    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1210      MachineOperand &mopj = MI->getOperand(Ops[j]);
1211      mopj.setReg(NewVReg);
1212      if (mopj.isImplicit())
1213        rewriteImplicitOps(li, MI, NewVReg, vrm);
1214    }
1215
1216    if (CreatedNewVReg) {
1217      if (DefIsReMat) {
1218        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1219        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1220          // Each valnum may have its own remat id.
1221          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1222        } else {
1223          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1224        }
1225        if (!CanDelete || (HasUse && HasDef)) {
1226          // If this is a two-addr instruction then its use operands are
1227          // rematerializable but its def is not. It should be assigned a
1228          // stack slot.
1229          vrm.assignVirt2StackSlot(NewVReg, Slot);
1230        }
1231      } else {
1232        vrm.assignVirt2StackSlot(NewVReg, Slot);
1233      }
1234    } else if (HasUse && HasDef &&
1235               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1236      // If this interval hasn't been assigned a stack slot (because earlier
1237      // def is a deleted remat def), do it now.
1238      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1239      vrm.assignVirt2StackSlot(NewVReg, Slot);
1240    }
1241
1242    // Re-matting an instruction with virtual register use. Add the
1243    // register as an implicit use on the use MI.
1244    if (DefIsReMat && ImpUse)
1245      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1246
1247    // create a new register interval for this spill / remat.
1248    LiveInterval &nI = getOrCreateInterval(NewVReg);
1249    if (CreatedNewVReg) {
1250      NewLIs.push_back(&nI);
1251      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1252      if (TrySplit)
1253        vrm.setIsSplitFromReg(NewVReg, li.reg);
1254    }
1255
1256    if (HasUse) {
1257      if (CreatedNewVReg) {
1258        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1259                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1260        DOUT << " +" << LR;
1261        nI.addRange(LR);
1262      } else {
1263        // Extend the split live interval to this def / use.
1264        unsigned End = getUseIndex(index)+1;
1265        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1266                     nI.getValNumInfo(nI.getNumValNums()-1));
1267        DOUT << " +" << LR;
1268        nI.addRange(LR);
1269      }
1270    }
1271    if (HasDef) {
1272      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1273                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1274      DOUT << " +" << LR;
1275      nI.addRange(LR);
1276    }
1277
1278    DOUT << "\t\t\t\tAdded new interval: ";
1279    nI.print(DOUT, tri_);
1280    DOUT << '\n';
1281  }
1282  return CanFold;
1283}
1284bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1285                                   const VNInfo *VNI,
1286                                   MachineBasicBlock *MBB, unsigned Idx) const {
1287  unsigned End = getMBBEndIdx(MBB);
1288  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1289    unsigned KillIdx = VNI->kills[j];
1290    if (KillIdx > Idx && KillIdx < End)
1291      return true;
1292  }
1293  return false;
1294}
1295
1296/// RewriteInfo - Keep track of machine instrs that will be rewritten
1297/// during spilling.
1298namespace {
1299  struct RewriteInfo {
1300    unsigned Index;
1301    MachineInstr *MI;
1302    bool HasUse;
1303    bool HasDef;
1304    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1305      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1306  };
1307
1308  struct RewriteInfoCompare {
1309    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1310      return LHS.Index < RHS.Index;
1311    }
1312  };
1313}
1314
1315void LiveIntervals::
1316rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1317                    LiveInterval::Ranges::const_iterator &I,
1318                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1319                    unsigned Slot, int LdSlot,
1320                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1321                    VirtRegMap &vrm,
1322                    const TargetRegisterClass* rc,
1323                    SmallVector<int, 4> &ReMatIds,
1324                    const MachineLoopInfo *loopInfo,
1325                    BitVector &SpillMBBs,
1326                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1327                    BitVector &RestoreMBBs,
1328                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1329                    std::map<unsigned,unsigned> &MBBVRegsMap,
1330                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1331  bool AllCanFold = true;
1332  unsigned NewVReg = 0;
1333  unsigned start = getBaseIndex(I->start);
1334  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1335
1336  // First collect all the def / use in this live range that will be rewritten.
1337  // Make sure they are sorted according to instruction index.
1338  std::vector<RewriteInfo> RewriteMIs;
1339  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1340         re = mri_->reg_end(); ri != re; ) {
1341    MachineInstr *MI = &*ri;
1342    MachineOperand &O = ri.getOperand();
1343    ++ri;
1344    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1345    unsigned index = getInstructionIndex(MI);
1346    if (index < start || index >= end)
1347      continue;
1348    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1349      // Must be defined by an implicit def. It should not be spilled. Note,
1350      // this is for correctness reason. e.g.
1351      // 8   %reg1024<def> = IMPLICIT_DEF
1352      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1353      // The live range [12, 14) are not part of the r1024 live interval since
1354      // it's defined by an implicit def. It will not conflicts with live
1355      // interval of r1025. Now suppose both registers are spilled, you can
1356      // easily see a situation where both registers are reloaded before
1357      // the INSERT_SUBREG and both target registers that would overlap.
1358      continue;
1359    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1360  }
1361  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1362
1363  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1364  // Now rewrite the defs and uses.
1365  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1366    RewriteInfo &rwi = RewriteMIs[i];
1367    ++i;
1368    unsigned index = rwi.Index;
1369    bool MIHasUse = rwi.HasUse;
1370    bool MIHasDef = rwi.HasDef;
1371    MachineInstr *MI = rwi.MI;
1372    // If MI def and/or use the same register multiple times, then there
1373    // are multiple entries.
1374    unsigned NumUses = MIHasUse;
1375    while (i != e && RewriteMIs[i].MI == MI) {
1376      assert(RewriteMIs[i].Index == index);
1377      bool isUse = RewriteMIs[i].HasUse;
1378      if (isUse) ++NumUses;
1379      MIHasUse |= isUse;
1380      MIHasDef |= RewriteMIs[i].HasDef;
1381      ++i;
1382    }
1383    MachineBasicBlock *MBB = MI->getParent();
1384
1385    if (ImpUse && MI != ReMatDefMI) {
1386      // Re-matting an instruction with virtual register use. Update the
1387      // register interval's spill weight to HUGE_VALF to prevent it from
1388      // being spilled.
1389      LiveInterval &ImpLi = getInterval(ImpUse);
1390      ImpLi.weight = HUGE_VALF;
1391    }
1392
1393    unsigned MBBId = MBB->getNumber();
1394    unsigned ThisVReg = 0;
1395    if (TrySplit) {
1396      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1397      if (NVI != MBBVRegsMap.end()) {
1398        ThisVReg = NVI->second;
1399        // One common case:
1400        // x = use
1401        // ...
1402        // ...
1403        // def = ...
1404        //     = use
1405        // It's better to start a new interval to avoid artifically
1406        // extend the new interval.
1407        if (MIHasDef && !MIHasUse) {
1408          MBBVRegsMap.erase(MBB->getNumber());
1409          ThisVReg = 0;
1410        }
1411      }
1412    }
1413
1414    bool IsNew = ThisVReg == 0;
1415    if (IsNew) {
1416      // This ends the previous live interval. If all of its def / use
1417      // can be folded, give it a low spill weight.
1418      if (NewVReg && TrySplit && AllCanFold) {
1419        LiveInterval &nI = getOrCreateInterval(NewVReg);
1420        nI.weight /= 10.0F;
1421      }
1422      AllCanFold = true;
1423    }
1424    NewVReg = ThisVReg;
1425
1426    bool HasDef = false;
1427    bool HasUse = false;
1428    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1429                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1430                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1431                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1432                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1433    if (!HasDef && !HasUse)
1434      continue;
1435
1436    AllCanFold &= CanFold;
1437
1438    // Update weight of spill interval.
1439    LiveInterval &nI = getOrCreateInterval(NewVReg);
1440    if (!TrySplit) {
1441      // The spill weight is now infinity as it cannot be spilled again.
1442      nI.weight = HUGE_VALF;
1443      continue;
1444    }
1445
1446    // Keep track of the last def and first use in each MBB.
1447    if (HasDef) {
1448      if (MI != ReMatOrigDefMI || !CanDelete) {
1449        bool HasKill = false;
1450        if (!HasUse)
1451          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1452        else {
1453          // If this is a two-address code, then this index starts a new VNInfo.
1454          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1455          if (VNI)
1456            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1457        }
1458        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1459          SpillIdxes.find(MBBId);
1460        if (!HasKill) {
1461          if (SII == SpillIdxes.end()) {
1462            std::vector<SRInfo> S;
1463            S.push_back(SRInfo(index, NewVReg, true));
1464            SpillIdxes.insert(std::make_pair(MBBId, S));
1465          } else if (SII->second.back().vreg != NewVReg) {
1466            SII->second.push_back(SRInfo(index, NewVReg, true));
1467          } else if ((int)index > SII->second.back().index) {
1468            // If there is an earlier def and this is a two-address
1469            // instruction, then it's not possible to fold the store (which
1470            // would also fold the load).
1471            SRInfo &Info = SII->second.back();
1472            Info.index = index;
1473            Info.canFold = !HasUse;
1474          }
1475          SpillMBBs.set(MBBId);
1476        } else if (SII != SpillIdxes.end() &&
1477                   SII->second.back().vreg == NewVReg &&
1478                   (int)index > SII->second.back().index) {
1479          // There is an earlier def that's not killed (must be two-address).
1480          // The spill is no longer needed.
1481          SII->second.pop_back();
1482          if (SII->second.empty()) {
1483            SpillIdxes.erase(MBBId);
1484            SpillMBBs.reset(MBBId);
1485          }
1486        }
1487      }
1488    }
1489
1490    if (HasUse) {
1491      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1492        SpillIdxes.find(MBBId);
1493      if (SII != SpillIdxes.end() &&
1494          SII->second.back().vreg == NewVReg &&
1495          (int)index > SII->second.back().index)
1496        // Use(s) following the last def, it's not safe to fold the spill.
1497        SII->second.back().canFold = false;
1498      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1499        RestoreIdxes.find(MBBId);
1500      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1501        // If we are splitting live intervals, only fold if it's the first
1502        // use and there isn't another use later in the MBB.
1503        RII->second.back().canFold = false;
1504      else if (IsNew) {
1505        // Only need a reload if there isn't an earlier def / use.
1506        if (RII == RestoreIdxes.end()) {
1507          std::vector<SRInfo> Infos;
1508          Infos.push_back(SRInfo(index, NewVReg, true));
1509          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1510        } else {
1511          RII->second.push_back(SRInfo(index, NewVReg, true));
1512        }
1513        RestoreMBBs.set(MBBId);
1514      }
1515    }
1516
1517    // Update spill weight.
1518    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1519    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1520  }
1521
1522  if (NewVReg && TrySplit && AllCanFold) {
1523    // If all of its def / use can be folded, give it a low spill weight.
1524    LiveInterval &nI = getOrCreateInterval(NewVReg);
1525    nI.weight /= 10.0F;
1526  }
1527}
1528
1529bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1530                        BitVector &RestoreMBBs,
1531                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1532  if (!RestoreMBBs[Id])
1533    return false;
1534  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1535  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1536    if (Restores[i].index == index &&
1537        Restores[i].vreg == vr &&
1538        Restores[i].canFold)
1539      return true;
1540  return false;
1541}
1542
1543void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1544                        BitVector &RestoreMBBs,
1545                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1546  if (!RestoreMBBs[Id])
1547    return;
1548  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1549  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1550    if (Restores[i].index == index && Restores[i].vreg)
1551      Restores[i].index = -1;
1552}
1553
1554/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1555/// spilled and create empty intervals for their uses.
1556void
1557LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1558                                    const TargetRegisterClass* rc,
1559                                    std::vector<LiveInterval*> &NewLIs) {
1560  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1561         re = mri_->reg_end(); ri != re; ) {
1562    MachineOperand &O = ri.getOperand();
1563    MachineInstr *MI = &*ri;
1564    ++ri;
1565    if (O.isDef()) {
1566      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1567             "Register def was not rewritten?");
1568      RemoveMachineInstrFromMaps(MI);
1569      vrm.RemoveMachineInstrFromMaps(MI);
1570      MI->eraseFromParent();
1571    } else {
1572      // This must be an use of an implicit_def so it's not part of the live
1573      // interval. Create a new empty live interval for it.
1574      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1575      unsigned NewVReg = mri_->createVirtualRegister(rc);
1576      vrm.grow();
1577      vrm.setIsImplicitlyDefined(NewVReg);
1578      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1579      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1580        MachineOperand &MO = MI->getOperand(i);
1581        if (MO.isReg() && MO.getReg() == li.reg)
1582          MO.setReg(NewVReg);
1583      }
1584    }
1585  }
1586}
1587
1588
1589std::vector<LiveInterval*> LiveIntervals::
1590addIntervalsForSpills(const LiveInterval &li,
1591                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1592                      float &SSWeight) {
1593  assert(li.weight != HUGE_VALF &&
1594         "attempt to spill already spilled interval!");
1595
1596  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1597  li.print(DOUT, tri_);
1598  DOUT << '\n';
1599
1600  // Spill slot weight.
1601  SSWeight = 0.0f;
1602
1603  // Each bit specify whether it a spill is required in the MBB.
1604  BitVector SpillMBBs(mf_->getNumBlockIDs());
1605  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1606  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1607  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1608  std::map<unsigned,unsigned> MBBVRegsMap;
1609  std::vector<LiveInterval*> NewLIs;
1610  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1611
1612  unsigned NumValNums = li.getNumValNums();
1613  SmallVector<MachineInstr*, 4> ReMatDefs;
1614  ReMatDefs.resize(NumValNums, NULL);
1615  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1616  ReMatOrigDefs.resize(NumValNums, NULL);
1617  SmallVector<int, 4> ReMatIds;
1618  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1619  BitVector ReMatDelete(NumValNums);
1620  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1621
1622  // Spilling a split live interval. It cannot be split any further. Also,
1623  // it's also guaranteed to be a single val# / range interval.
1624  if (vrm.getPreSplitReg(li.reg)) {
1625    vrm.setIsSplitFromReg(li.reg, 0);
1626    // Unset the split kill marker on the last use.
1627    unsigned KillIdx = vrm.getKillPoint(li.reg);
1628    if (KillIdx) {
1629      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1630      assert(KillMI && "Last use disappeared?");
1631      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1632      assert(KillOp != -1 && "Last use disappeared?");
1633      KillMI->getOperand(KillOp).setIsKill(false);
1634    }
1635    vrm.removeKillPoint(li.reg);
1636    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1637    Slot = vrm.getStackSlot(li.reg);
1638    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1639    MachineInstr *ReMatDefMI = DefIsReMat ?
1640      vrm.getReMaterializedMI(li.reg) : NULL;
1641    int LdSlot = 0;
1642    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1643    bool isLoad = isLoadSS ||
1644      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1645    bool IsFirstRange = true;
1646    for (LiveInterval::Ranges::const_iterator
1647           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1648      // If this is a split live interval with multiple ranges, it means there
1649      // are two-address instructions that re-defined the value. Only the
1650      // first def can be rematerialized!
1651      if (IsFirstRange) {
1652        // Note ReMatOrigDefMI has already been deleted.
1653        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1654                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1655                             false, vrm, rc, ReMatIds, loopInfo,
1656                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1657                             MBBVRegsMap, NewLIs, SSWeight);
1658      } else {
1659        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1660                             Slot, 0, false, false, false,
1661                             false, vrm, rc, ReMatIds, loopInfo,
1662                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1663                             MBBVRegsMap, NewLIs, SSWeight);
1664      }
1665      IsFirstRange = false;
1666    }
1667
1668    SSWeight = 0.0f;  // Already accounted for when split.
1669    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1670    return NewLIs;
1671  }
1672
1673  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1674  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1675    TrySplit = false;
1676  if (TrySplit)
1677    ++numSplits;
1678  bool NeedStackSlot = false;
1679  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1680       i != e; ++i) {
1681    const VNInfo *VNI = *i;
1682    unsigned VN = VNI->id;
1683    unsigned DefIdx = VNI->def;
1684    if (DefIdx == ~1U)
1685      continue; // Dead val#.
1686    // Is the def for the val# rematerializable?
1687    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1688      ? 0 : getInstructionFromIndex(DefIdx);
1689    bool dummy;
1690    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1691      // Remember how to remat the def of this val#.
1692      ReMatOrigDefs[VN] = ReMatDefMI;
1693      // Original def may be modified so we have to make a copy here.
1694      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1695      ClonedMIs.push_back(Clone);
1696      ReMatDefs[VN] = Clone;
1697
1698      bool CanDelete = true;
1699      if (VNI->hasPHIKill) {
1700        // A kill is a phi node, not all of its uses can be rematerialized.
1701        // It must not be deleted.
1702        CanDelete = false;
1703        // Need a stack slot if there is any live range where uses cannot be
1704        // rematerialized.
1705        NeedStackSlot = true;
1706      }
1707      if (CanDelete)
1708        ReMatDelete.set(VN);
1709    } else {
1710      // Need a stack slot if there is any live range where uses cannot be
1711      // rematerialized.
1712      NeedStackSlot = true;
1713    }
1714  }
1715
1716  // One stack slot per live interval.
1717  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1718    Slot = vrm.assignVirt2StackSlot(li.reg);
1719
1720  // Create new intervals and rewrite defs and uses.
1721  for (LiveInterval::Ranges::const_iterator
1722         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1723    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1724    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1725    bool DefIsReMat = ReMatDefMI != NULL;
1726    bool CanDelete = ReMatDelete[I->valno->id];
1727    int LdSlot = 0;
1728    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1729    bool isLoad = isLoadSS ||
1730      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1731    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1732                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1733                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1734                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1735                               MBBVRegsMap, NewLIs, SSWeight);
1736  }
1737
1738  // Insert spills / restores if we are splitting.
1739  if (!TrySplit) {
1740    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1741    return NewLIs;
1742  }
1743
1744  SmallPtrSet<LiveInterval*, 4> AddedKill;
1745  SmallVector<unsigned, 2> Ops;
1746  if (NeedStackSlot) {
1747    int Id = SpillMBBs.find_first();
1748    while (Id != -1) {
1749      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1750      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1751      std::vector<SRInfo> &spills = SpillIdxes[Id];
1752      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1753        int index = spills[i].index;
1754        unsigned VReg = spills[i].vreg;
1755        LiveInterval &nI = getOrCreateInterval(VReg);
1756        bool isReMat = vrm.isReMaterialized(VReg);
1757        MachineInstr *MI = getInstructionFromIndex(index);
1758        bool CanFold = false;
1759        bool FoundUse = false;
1760        Ops.clear();
1761        if (spills[i].canFold) {
1762          CanFold = true;
1763          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1764            MachineOperand &MO = MI->getOperand(j);
1765            if (!MO.isRegister() || MO.getReg() != VReg)
1766              continue;
1767
1768            Ops.push_back(j);
1769            if (MO.isDef())
1770              continue;
1771            if (isReMat ||
1772                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1773                                                RestoreMBBs, RestoreIdxes))) {
1774              // MI has two-address uses of the same register. If the use
1775              // isn't the first and only use in the BB, then we can't fold
1776              // it. FIXME: Move this to rewriteInstructionsForSpills.
1777              CanFold = false;
1778              break;
1779            }
1780            FoundUse = true;
1781          }
1782        }
1783        // Fold the store into the def if possible.
1784        bool Folded = false;
1785        if (CanFold && !Ops.empty()) {
1786          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1787            Folded = true;
1788            if (FoundUse > 0) {
1789              // Also folded uses, do not issue a load.
1790              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1791              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1792            }
1793            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1794          }
1795        }
1796
1797        // Otherwise tell the spiller to issue a spill.
1798        if (!Folded) {
1799          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1800          bool isKill = LR->end == getStoreIndex(index);
1801          if (!MI->registerDefIsDead(nI.reg))
1802            // No need to spill a dead def.
1803            vrm.addSpillPoint(VReg, isKill, MI);
1804          if (isKill)
1805            AddedKill.insert(&nI);
1806        }
1807
1808        // Update spill slot weight.
1809        if (!isReMat)
1810          SSWeight += getSpillWeight(true, false, loopDepth);
1811      }
1812      Id = SpillMBBs.find_next(Id);
1813    }
1814  }
1815
1816  int Id = RestoreMBBs.find_first();
1817  while (Id != -1) {
1818    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1819    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1820
1821    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1822    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1823      int index = restores[i].index;
1824      if (index == -1)
1825        continue;
1826      unsigned VReg = restores[i].vreg;
1827      LiveInterval &nI = getOrCreateInterval(VReg);
1828      bool isReMat = vrm.isReMaterialized(VReg);
1829      MachineInstr *MI = getInstructionFromIndex(index);
1830      bool CanFold = false;
1831      Ops.clear();
1832      if (restores[i].canFold) {
1833        CanFold = true;
1834        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1835          MachineOperand &MO = MI->getOperand(j);
1836          if (!MO.isRegister() || MO.getReg() != VReg)
1837            continue;
1838
1839          if (MO.isDef()) {
1840            // If this restore were to be folded, it would have been folded
1841            // already.
1842            CanFold = false;
1843            break;
1844          }
1845          Ops.push_back(j);
1846        }
1847      }
1848
1849      // Fold the load into the use if possible.
1850      bool Folded = false;
1851      if (CanFold && !Ops.empty()) {
1852        if (!isReMat)
1853          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1854        else {
1855          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1856          int LdSlot = 0;
1857          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1858          // If the rematerializable def is a load, also try to fold it.
1859          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1860            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1861                                          Ops, isLoadSS, LdSlot, VReg);
1862          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1863          if (ImpUse) {
1864            // Re-matting an instruction with virtual register use. Add the
1865            // register as an implicit use on the use MI and update the register
1866            // interval's spill weight to HUGE_VALF to prevent it from being
1867            // spilled.
1868            LiveInterval &ImpLi = getInterval(ImpUse);
1869            ImpLi.weight = HUGE_VALF;
1870            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1871          }
1872        }
1873      }
1874      // If folding is not possible / failed, then tell the spiller to issue a
1875      // load / rematerialization for us.
1876      if (Folded)
1877        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1878      else
1879        vrm.addRestorePoint(VReg, MI);
1880
1881      // Update spill slot weight.
1882      if (!isReMat)
1883        SSWeight += getSpillWeight(false, true, loopDepth);
1884    }
1885    Id = RestoreMBBs.find_next(Id);
1886  }
1887
1888  // Finalize intervals: add kills, finalize spill weights, and filter out
1889  // dead intervals.
1890  std::vector<LiveInterval*> RetNewLIs;
1891  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1892    LiveInterval *LI = NewLIs[i];
1893    if (!LI->empty()) {
1894      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1895      if (!AddedKill.count(LI)) {
1896        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1897        unsigned LastUseIdx = getBaseIndex(LR->end);
1898        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1899        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1900        assert(UseIdx != -1);
1901        if (LastUse->getOperand(UseIdx).isImplicit() ||
1902            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1903          LastUse->getOperand(UseIdx).setIsKill();
1904          vrm.addKillPoint(LI->reg, LastUseIdx);
1905        }
1906      }
1907      RetNewLIs.push_back(LI);
1908    }
1909  }
1910
1911  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1912  return RetNewLIs;
1913}
1914
1915/// hasAllocatableSuperReg - Return true if the specified physical register has
1916/// any super register that's allocatable.
1917bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1918  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1919    if (allocatableRegs_[*AS] && hasInterval(*AS))
1920      return true;
1921  return false;
1922}
1923
1924/// getRepresentativeReg - Find the largest super register of the specified
1925/// physical register.
1926unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1927  // Find the largest super-register that is allocatable.
1928  unsigned BestReg = Reg;
1929  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1930    unsigned SuperReg = *AS;
1931    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1932      BestReg = SuperReg;
1933      break;
1934    }
1935  }
1936  return BestReg;
1937}
1938
1939/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1940/// specified interval that conflicts with the specified physical register.
1941unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1942                                                   unsigned PhysReg) const {
1943  unsigned NumConflicts = 0;
1944  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1945  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1946         E = mri_->reg_end(); I != E; ++I) {
1947    MachineOperand &O = I.getOperand();
1948    MachineInstr *MI = O.getParent();
1949    unsigned Index = getInstructionIndex(MI);
1950    if (pli.liveAt(Index))
1951      ++NumConflicts;
1952  }
1953  return NumConflicts;
1954}
1955
1956/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1957/// around all defs and uses of the specified interval.
1958void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1959                                            unsigned PhysReg, VirtRegMap &vrm) {
1960  unsigned SpillReg = getRepresentativeReg(PhysReg);
1961
1962  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1963    // If there are registers which alias PhysReg, but which are not a
1964    // sub-register of the chosen representative super register. Assert
1965    // since we can't handle it yet.
1966    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1967           tri_->isSuperRegister(*AS, SpillReg));
1968
1969  LiveInterval &pli = getInterval(SpillReg);
1970  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1971  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1972         E = mri_->reg_end(); I != E; ++I) {
1973    MachineOperand &O = I.getOperand();
1974    MachineInstr *MI = O.getParent();
1975    if (SeenMIs.count(MI))
1976      continue;
1977    SeenMIs.insert(MI);
1978    unsigned Index = getInstructionIndex(MI);
1979    if (pli.liveAt(Index)) {
1980      vrm.addEmergencySpill(SpillReg, MI);
1981      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1982      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1983        if (!hasInterval(*AS))
1984          continue;
1985        LiveInterval &spli = getInterval(*AS);
1986        if (spli.liveAt(Index))
1987          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1988      }
1989    }
1990  }
1991}
1992
1993LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1994                                                   MachineInstr* startInst) {
1995  LiveInterval& Interval = getOrCreateInterval(reg);
1996  VNInfo* VN = Interval.getNextValue(
1997            getInstructionIndex(startInst) + InstrSlots::DEF,
1998            startInst, getVNInfoAllocator());
1999  VN->hasPHIKill = true;
2000  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2001  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2002               getMBBEndIdx(startInst->getParent()) + 1, VN);
2003  Interval.addRange(LR);
2004
2005  return LR;
2006}
2007