LiveIntervalAnalysis.cpp revision d45a11ac9bcf958d5296bf7571934c20a1008291
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52STATISTIC(numIntervals, "Number of original intervals");
53STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits   , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addRequired<AliasAnalysis>();
62  AU.addPreserved<AliasAnalysis>();
63  AU.addPreserved<LiveVariables>();
64  AU.addRequired<LiveVariables>();
65  AU.addPreservedID(MachineLoopInfoID);
66  AU.addPreservedID(MachineDominatorsID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  MBB2IdxMap.clear();
73  Idx2MBBMap.clear();
74  mi2iMap_.clear();
75  i2miMap_.clear();
76  r2iMap_.clear();
77  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
78  VNInfoAllocator.Reset();
79  while (!ClonedMIs.empty()) {
80    MachineInstr *MI = ClonedMIs.back();
81    ClonedMIs.pop_back();
82    mf_->DeleteMachineInstr(MI);
83  }
84}
85
86void LiveIntervals::computeNumbering() {
87  Index2MiMap OldI2MI = i2miMap_;
88  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
89
90  Idx2MBBMap.clear();
91  MBB2IdxMap.clear();
92  mi2iMap_.clear();
93  i2miMap_.clear();
94
95  FunctionSize = 0;
96
97  // Number MachineInstrs and MachineBasicBlocks.
98  // Initialize MBB indexes to a sentinal.
99  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
100
101  unsigned MIIndex = 0;
102  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
103       MBB != E; ++MBB) {
104    unsigned StartIdx = MIIndex;
105
106    // Insert an empty slot at the beginning of each block.
107    MIIndex += InstrSlots::NUM;
108    i2miMap_.push_back(0);
109
110    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
111         I != E; ++I) {
112      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
113      assert(inserted && "multiple MachineInstr -> index mappings");
114      i2miMap_.push_back(I);
115      MIIndex += InstrSlots::NUM;
116      FunctionSize++;
117
118      // Insert an empty slot after every instruction.
119      MIIndex += InstrSlots::NUM;
120      i2miMap_.push_back(0);
121    }
122
123    // Set the MBB2IdxMap entry for this MBB.
124    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
125    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
126  }
127  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
128
129  if (!OldI2MI.empty())
130    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
131      for (LiveInterval::iterator LI = OI->second.begin(),
132           LE = OI->second.end(); LI != LE; ++LI) {
133
134        // Remap the start index of the live range to the corresponding new
135        // number, or our best guess at what it _should_ correspond to if the
136        // original instruction has been erased.  This is either the following
137        // instruction or its predecessor.
138        unsigned index = LI->start / InstrSlots::NUM;
139        unsigned offset = LI->start % InstrSlots::NUM;
140        if (offset == InstrSlots::LOAD) {
141          std::vector<IdxMBBPair>::const_iterator I =
142                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
143          // Take the pair containing the index
144          std::vector<IdxMBBPair>::const_iterator J =
145                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
146
147          LI->start = getMBBStartIdx(J->second);
148        } else {
149          LI->start = mi2iMap_[OldI2MI[index]] + offset;
150        }
151
152        // Remap the ending index in the same way that we remapped the start,
153        // except for the final step where we always map to the immediately
154        // following instruction.
155        index = (LI->end - 1) / InstrSlots::NUM;
156        offset  = LI->end % InstrSlots::NUM;
157        if (offset == InstrSlots::LOAD) {
158          // VReg dies at end of block.
159          std::vector<IdxMBBPair>::const_iterator I =
160                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
161          --I;
162
163          LI->end = getMBBEndIdx(I->second) + 1;
164        } else {
165          unsigned idx = index;
166          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
167
168          if (index != OldI2MI.size())
169            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
170          else
171            LI->end = InstrSlots::NUM * i2miMap_.size();
172        }
173      }
174
175      for (LiveInterval::vni_iterator VNI = OI->second.vni_begin(),
176           VNE = OI->second.vni_end(); VNI != VNE; ++VNI) {
177        VNInfo* vni = *VNI;
178
179        // Remap the VNInfo def index, which works the same as the
180        // start indices above. VN's with special sentinel defs
181        // don't need to be remapped.
182        if (vni->def != ~0U && vni->def != ~1U) {
183          unsigned index = vni->def / InstrSlots::NUM;
184          unsigned offset = vni->def % InstrSlots::NUM;
185          if (offset == InstrSlots::LOAD) {
186            std::vector<IdxMBBPair>::const_iterator I =
187                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
188            // Take the pair containing the index
189            std::vector<IdxMBBPair>::const_iterator J =
190                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
191
192            vni->def = getMBBStartIdx(J->second);
193          } else {
194            vni->def = mi2iMap_[OldI2MI[index]] + offset;
195          }
196        }
197
198        // Remap the VNInfo kill indices, which works the same as
199        // the end indices above.
200        for (size_t i = 0; i < vni->kills.size(); ++i) {
201          // PHI kills don't need to be remapped.
202          if (!vni->kills[i]) continue;
203
204          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
205          unsigned offset = vni->kills[i] % InstrSlots::NUM;
206          if (offset == InstrSlots::STORE) {
207            std::vector<IdxMBBPair>::const_iterator I =
208             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
209            --I;
210
211            vni->kills[i] = getMBBEndIdx(I->second);
212          } else {
213            unsigned idx = index;
214            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
215
216            if (index != OldI2MI.size())
217              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
218                              (idx == index ? offset : 0);
219            else
220              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
221          }
222        }
223      }
224    }
225}
226
227/// runOnMachineFunction - Register allocate the whole function
228///
229bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
230  mf_ = &fn;
231  mri_ = &mf_->getRegInfo();
232  tm_ = &fn.getTarget();
233  tri_ = tm_->getRegisterInfo();
234  tii_ = tm_->getInstrInfo();
235  aa_ = &getAnalysis<AliasAnalysis>();
236  lv_ = &getAnalysis<LiveVariables>();
237  allocatableRegs_ = tri_->getAllocatableSet(fn);
238
239  computeNumbering();
240  computeIntervals();
241
242  numIntervals += getNumIntervals();
243
244  DOUT << "********** INTERVALS **********\n";
245  for (iterator I = begin(), E = end(); I != E; ++I) {
246    I->second.print(DOUT, tri_);
247    DOUT << "\n";
248  }
249
250  numIntervalsAfter += getNumIntervals();
251  DEBUG(dump());
252  return true;
253}
254
255/// print - Implement the dump method.
256void LiveIntervals::print(std::ostream &O, const Module* ) const {
257  O << "********** INTERVALS **********\n";
258  for (const_iterator I = begin(), E = end(); I != E; ++I) {
259    I->second.print(O, tri_);
260    O << "\n";
261  }
262
263  O << "********** MACHINEINSTRS **********\n";
264  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
265       mbbi != mbbe; ++mbbi) {
266    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
267    for (MachineBasicBlock::iterator mii = mbbi->begin(),
268           mie = mbbi->end(); mii != mie; ++mii) {
269      O << getInstructionIndex(mii) << '\t' << *mii;
270    }
271  }
272}
273
274/// conflictsWithPhysRegDef - Returns true if the specified register
275/// is defined during the duration of the specified interval.
276bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
277                                            VirtRegMap &vrm, unsigned reg) {
278  for (LiveInterval::Ranges::const_iterator
279         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
280    for (unsigned index = getBaseIndex(I->start),
281           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
282         index += InstrSlots::NUM) {
283      // skip deleted instructions
284      while (index != end && !getInstructionFromIndex(index))
285        index += InstrSlots::NUM;
286      if (index == end) break;
287
288      MachineInstr *MI = getInstructionFromIndex(index);
289      unsigned SrcReg, DstReg;
290      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
291        if (SrcReg == li.reg || DstReg == li.reg)
292          continue;
293      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
294        MachineOperand& mop = MI->getOperand(i);
295        if (!mop.isRegister())
296          continue;
297        unsigned PhysReg = mop.getReg();
298        if (PhysReg == 0 || PhysReg == li.reg)
299          continue;
300        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
301          if (!vrm.hasPhys(PhysReg))
302            continue;
303          PhysReg = vrm.getPhys(PhysReg);
304        }
305        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
306          return true;
307      }
308    }
309  }
310
311  return false;
312}
313
314void LiveIntervals::printRegName(unsigned reg) const {
315  if (TargetRegisterInfo::isPhysicalRegister(reg))
316    cerr << tri_->getName(reg);
317  else
318    cerr << "%reg" << reg;
319}
320
321void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
322                                             MachineBasicBlock::iterator mi,
323                                             unsigned MIIdx, MachineOperand& MO,
324                                             unsigned MOIdx,
325                                             LiveInterval &interval) {
326  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
327  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
328
329  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
330    DOUT << "is a implicit_def\n";
331    return;
332  }
333
334  // Virtual registers may be defined multiple times (due to phi
335  // elimination and 2-addr elimination).  Much of what we do only has to be
336  // done once for the vreg.  We use an empty interval to detect the first
337  // time we see a vreg.
338  if (interval.empty()) {
339    // Get the Idx of the defining instructions.
340    unsigned defIndex = getDefIndex(MIIdx);
341    VNInfo *ValNo;
342    MachineInstr *CopyMI = NULL;
343    unsigned SrcReg, DstReg;
344    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
345        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
346        tii_->isMoveInstr(*mi, SrcReg, DstReg))
347      CopyMI = mi;
348    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
349
350    assert(ValNo->id == 0 && "First value in interval is not 0?");
351
352    // Loop over all of the blocks that the vreg is defined in.  There are
353    // two cases we have to handle here.  The most common case is a vreg
354    // whose lifetime is contained within a basic block.  In this case there
355    // will be a single kill, in MBB, which comes after the definition.
356    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
357      // FIXME: what about dead vars?
358      unsigned killIdx;
359      if (vi.Kills[0] != mi)
360        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
361      else
362        killIdx = defIndex+1;
363
364      // If the kill happens after the definition, we have an intra-block
365      // live range.
366      if (killIdx > defIndex) {
367        assert(vi.AliveBlocks.none() &&
368               "Shouldn't be alive across any blocks!");
369        LiveRange LR(defIndex, killIdx, ValNo);
370        interval.addRange(LR);
371        DOUT << " +" << LR << "\n";
372        interval.addKill(ValNo, killIdx);
373        return;
374      }
375    }
376
377    // The other case we handle is when a virtual register lives to the end
378    // of the defining block, potentially live across some blocks, then is
379    // live into some number of blocks, but gets killed.  Start by adding a
380    // range that goes from this definition to the end of the defining block.
381    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
382    DOUT << " +" << NewLR;
383    interval.addRange(NewLR);
384
385    // Iterate over all of the blocks that the variable is completely
386    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
387    // live interval.
388    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
389      if (vi.AliveBlocks[i]) {
390        LiveRange LR(getMBBStartIdx(i),
391                     getMBBEndIdx(i)+1,  // MBB ends at -1.
392                     ValNo);
393        interval.addRange(LR);
394        DOUT << " +" << LR;
395      }
396    }
397
398    // Finally, this virtual register is live from the start of any killing
399    // block to the 'use' slot of the killing instruction.
400    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
401      MachineInstr *Kill = vi.Kills[i];
402      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
403      LiveRange LR(getMBBStartIdx(Kill->getParent()),
404                   killIdx, ValNo);
405      interval.addRange(LR);
406      interval.addKill(ValNo, killIdx);
407      DOUT << " +" << LR;
408    }
409
410  } else {
411    // If this is the second time we see a virtual register definition, it
412    // must be due to phi elimination or two addr elimination.  If this is
413    // the result of two address elimination, then the vreg is one of the
414    // def-and-use register operand.
415    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
416      // If this is a two-address definition, then we have already processed
417      // the live range.  The only problem is that we didn't realize there
418      // are actually two values in the live interval.  Because of this we
419      // need to take the LiveRegion that defines this register and split it
420      // into two values.
421      assert(interval.containsOneValue());
422      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
423      unsigned RedefIndex = getDefIndex(MIIdx);
424
425      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
426      VNInfo *OldValNo = OldLR->valno;
427
428      // Delete the initial value, which should be short and continuous,
429      // because the 2-addr copy must be in the same MBB as the redef.
430      interval.removeRange(DefIndex, RedefIndex);
431
432      // Two-address vregs should always only be redefined once.  This means
433      // that at this point, there should be exactly one value number in it.
434      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
435
436      // The new value number (#1) is defined by the instruction we claimed
437      // defined value #0.
438      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
439                                            VNInfoAllocator);
440
441      // Value#0 is now defined by the 2-addr instruction.
442      OldValNo->def  = RedefIndex;
443      OldValNo->copy = 0;
444
445      // Add the new live interval which replaces the range for the input copy.
446      LiveRange LR(DefIndex, RedefIndex, ValNo);
447      DOUT << " replace range with " << LR;
448      interval.addRange(LR);
449      interval.addKill(ValNo, RedefIndex);
450
451      // If this redefinition is dead, we need to add a dummy unit live
452      // range covering the def slot.
453      if (MO.isDead())
454        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
455
456      DOUT << " RESULT: ";
457      interval.print(DOUT, tri_);
458
459    } else {
460      // Otherwise, this must be because of phi elimination.  If this is the
461      // first redefinition of the vreg that we have seen, go back and change
462      // the live range in the PHI block to be a different value number.
463      if (interval.containsOneValue()) {
464        assert(vi.Kills.size() == 1 &&
465               "PHI elimination vreg should have one kill, the PHI itself!");
466
467        // Remove the old range that we now know has an incorrect number.
468        VNInfo *VNI = interval.getValNumInfo(0);
469        MachineInstr *Killer = vi.Kills[0];
470        unsigned Start = getMBBStartIdx(Killer->getParent());
471        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
472        DOUT << " Removing [" << Start << "," << End << "] from: ";
473        interval.print(DOUT, tri_); DOUT << "\n";
474        interval.removeRange(Start, End);
475        VNI->hasPHIKill = true;
476        DOUT << " RESULT: "; interval.print(DOUT, tri_);
477
478        // Replace the interval with one of a NEW value number.  Note that this
479        // value number isn't actually defined by an instruction, weird huh? :)
480        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
481        DOUT << " replace range with " << LR;
482        interval.addRange(LR);
483        interval.addKill(LR.valno, End);
484        DOUT << " RESULT: "; interval.print(DOUT, tri_);
485      }
486
487      // In the case of PHI elimination, each variable definition is only
488      // live until the end of the block.  We've already taken care of the
489      // rest of the live range.
490      unsigned defIndex = getDefIndex(MIIdx);
491
492      VNInfo *ValNo;
493      MachineInstr *CopyMI = NULL;
494      unsigned SrcReg, DstReg;
495      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
496          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
497          tii_->isMoveInstr(*mi, SrcReg, DstReg))
498        CopyMI = mi;
499      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
500
501      unsigned killIndex = getMBBEndIdx(mbb) + 1;
502      LiveRange LR(defIndex, killIndex, ValNo);
503      interval.addRange(LR);
504      interval.addKill(ValNo, killIndex);
505      ValNo->hasPHIKill = true;
506      DOUT << " +" << LR;
507    }
508  }
509
510  DOUT << '\n';
511}
512
513void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
514                                              MachineBasicBlock::iterator mi,
515                                              unsigned MIIdx,
516                                              MachineOperand& MO,
517                                              LiveInterval &interval,
518                                              MachineInstr *CopyMI) {
519  // A physical register cannot be live across basic block, so its
520  // lifetime must end somewhere in its defining basic block.
521  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
522
523  unsigned baseIndex = MIIdx;
524  unsigned start = getDefIndex(baseIndex);
525  unsigned end = start;
526
527  // If it is not used after definition, it is considered dead at
528  // the instruction defining it. Hence its interval is:
529  // [defSlot(def), defSlot(def)+1)
530  if (MO.isDead()) {
531    DOUT << " dead";
532    end = getDefIndex(start) + 1;
533    goto exit;
534  }
535
536  // If it is not dead on definition, it must be killed by a
537  // subsequent instruction. Hence its interval is:
538  // [defSlot(def), useSlot(kill)+1)
539  baseIndex += InstrSlots::NUM;
540  while (++mi != MBB->end()) {
541    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
542           getInstructionFromIndex(baseIndex) == 0)
543      baseIndex += InstrSlots::NUM;
544    if (mi->killsRegister(interval.reg, tri_)) {
545      DOUT << " killed";
546      end = getUseIndex(baseIndex) + 1;
547      goto exit;
548    } else if (mi->modifiesRegister(interval.reg, tri_)) {
549      // Another instruction redefines the register before it is ever read.
550      // Then the register is essentially dead at the instruction that defines
551      // it. Hence its interval is:
552      // [defSlot(def), defSlot(def)+1)
553      DOUT << " dead";
554      end = getDefIndex(start) + 1;
555      goto exit;
556    }
557
558    baseIndex += InstrSlots::NUM;
559  }
560
561  // The only case we should have a dead physreg here without a killing or
562  // instruction where we know it's dead is if it is live-in to the function
563  // and never used.
564  assert(!CopyMI && "physreg was not killed in defining block!");
565  end = getDefIndex(start) + 1;  // It's dead.
566
567exit:
568  assert(start < end && "did not find end of interval?");
569
570  // Already exists? Extend old live interval.
571  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
572  VNInfo *ValNo = (OldLR != interval.end())
573    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
574  LiveRange LR(start, end, ValNo);
575  interval.addRange(LR);
576  interval.addKill(LR.valno, end);
577  DOUT << " +" << LR << '\n';
578}
579
580void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
581                                      MachineBasicBlock::iterator MI,
582                                      unsigned MIIdx,
583                                      MachineOperand& MO,
584                                      unsigned MOIdx) {
585  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
586    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
587                             getOrCreateInterval(MO.getReg()));
588  else if (allocatableRegs_[MO.getReg()]) {
589    MachineInstr *CopyMI = NULL;
590    unsigned SrcReg, DstReg;
591    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
592        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
593        tii_->isMoveInstr(*MI, SrcReg, DstReg))
594      CopyMI = MI;
595    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
596                              getOrCreateInterval(MO.getReg()), CopyMI);
597    // Def of a register also defines its sub-registers.
598    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
599      // If MI also modifies the sub-register explicitly, avoid processing it
600      // more than once. Do not pass in TRI here so it checks for exact match.
601      if (!MI->modifiesRegister(*AS))
602        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
603                                  getOrCreateInterval(*AS), 0);
604  }
605}
606
607void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
608                                         unsigned MIIdx,
609                                         LiveInterval &interval, bool isAlias) {
610  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
611
612  // Look for kills, if it reaches a def before it's killed, then it shouldn't
613  // be considered a livein.
614  MachineBasicBlock::iterator mi = MBB->begin();
615  unsigned baseIndex = MIIdx;
616  unsigned start = baseIndex;
617  unsigned end = start;
618  while (mi != MBB->end()) {
619    if (mi->killsRegister(interval.reg, tri_)) {
620      DOUT << " killed";
621      end = getUseIndex(baseIndex) + 1;
622      goto exit;
623    } else if (mi->modifiesRegister(interval.reg, tri_)) {
624      // Another instruction redefines the register before it is ever read.
625      // Then the register is essentially dead at the instruction that defines
626      // it. Hence its interval is:
627      // [defSlot(def), defSlot(def)+1)
628      DOUT << " dead";
629      end = getDefIndex(start) + 1;
630      goto exit;
631    }
632
633    baseIndex += InstrSlots::NUM;
634    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
635           getInstructionFromIndex(baseIndex) == 0)
636      baseIndex += InstrSlots::NUM;
637    ++mi;
638  }
639
640exit:
641  // Live-in register might not be used at all.
642  if (end == MIIdx) {
643    if (isAlias) {
644      DOUT << " dead";
645      end = getDefIndex(MIIdx) + 1;
646    } else {
647      DOUT << " live through";
648      end = baseIndex;
649    }
650  }
651
652  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
653  interval.addRange(LR);
654  interval.addKill(LR.valno, end);
655  DOUT << " +" << LR << '\n';
656}
657
658/// computeIntervals - computes the live intervals for virtual
659/// registers. for some ordering of the machine instructions [1,N] a
660/// live interval is an interval [i, j) where 1 <= i <= j < N for
661/// which a variable is live
662void LiveIntervals::computeIntervals() {
663  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
664       << "********** Function: "
665       << ((Value*)mf_->getFunction())->getName() << '\n';
666  // Track the index of the current machine instr.
667  unsigned MIIndex = 0;
668
669  // Skip over empty initial indices.
670  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
671         getInstructionFromIndex(MIIndex) == 0)
672    MIIndex += InstrSlots::NUM;
673
674  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675       MBBI != E; ++MBBI) {
676    MachineBasicBlock *MBB = MBBI;
677    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
678
679    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
680
681    // Create intervals for live-ins to this BB first.
682    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683           LE = MBB->livein_end(); LI != LE; ++LI) {
684      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685      // Multiple live-ins can alias the same register.
686      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687        if (!hasInterval(*AS))
688          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
689                               true);
690    }
691
692    for (; MI != miEnd; ++MI) {
693      DOUT << MIIndex << "\t" << *MI;
694
695      // Handle defs.
696      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
697        MachineOperand &MO = MI->getOperand(i);
698        // handle register defs - build intervals
699        if (MO.isRegister() && MO.getReg() && MO.isDef())
700          handleRegisterDef(MBB, MI, MIIndex, MO, i);
701      }
702
703      MIIndex += InstrSlots::NUM;
704
705      // Skip over empty indices.
706      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
707             getInstructionFromIndex(MIIndex) == 0)
708        MIIndex += InstrSlots::NUM;
709    }
710  }
711}
712
713bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
714                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
715  std::vector<IdxMBBPair>::const_iterator I =
716    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
717
718  bool ResVal = false;
719  while (I != Idx2MBBMap.end()) {
720    if (LR.end <= I->first)
721      break;
722    MBBs.push_back(I->second);
723    ResVal = true;
724    ++I;
725  }
726  return ResVal;
727}
728
729
730LiveInterval LiveIntervals::createInterval(unsigned reg) {
731  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
732                       HUGE_VALF : 0.0F;
733  return LiveInterval(reg, Weight);
734}
735
736/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
737/// copy field and returns the source register that defines it.
738unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
739  if (!VNI->copy)
740    return 0;
741
742  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
743    return VNI->copy->getOperand(1).getReg();
744  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
745    return VNI->copy->getOperand(2).getReg();
746  unsigned SrcReg, DstReg;
747  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
748    return SrcReg;
749  assert(0 && "Unrecognized copy instruction!");
750  return 0;
751}
752
753//===----------------------------------------------------------------------===//
754// Register allocator hooks.
755//
756
757/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
758/// allow one) virtual register operand, then its uses are implicitly using
759/// the register. Returns the virtual register.
760unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
761                                            MachineInstr *MI) const {
762  unsigned RegOp = 0;
763  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
764    MachineOperand &MO = MI->getOperand(i);
765    if (!MO.isRegister() || !MO.isUse())
766      continue;
767    unsigned Reg = MO.getReg();
768    if (Reg == 0 || Reg == li.reg)
769      continue;
770    // FIXME: For now, only remat MI with at most one register operand.
771    assert(!RegOp &&
772           "Can't rematerialize instruction with multiple register operand!");
773    RegOp = MO.getReg();
774#ifndef NDEBUG
775    break;
776#endif
777  }
778  return RegOp;
779}
780
781/// isValNoAvailableAt - Return true if the val# of the specified interval
782/// which reaches the given instruction also reaches the specified use index.
783bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
784                                       unsigned UseIdx) const {
785  unsigned Index = getInstructionIndex(MI);
786  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
787  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
788  return UI != li.end() && UI->valno == ValNo;
789}
790
791/// isReMaterializable - Returns true if the definition MI of the specified
792/// val# of the specified interval is re-materializable.
793bool LiveIntervals::isReMaterializable(const LiveInterval &li,
794                                       const VNInfo *ValNo, MachineInstr *MI,
795                                       bool &isLoad) {
796  if (DisableReMat)
797    return false;
798
799  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
800    return true;
801
802  int FrameIdx = 0;
803  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
804      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
805    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
806    // this but remember this is not safe to fold into a two-address
807    // instruction.
808    // This is a load from fixed stack slot. It can be rematerialized.
809    return true;
810
811  // If the target-specific rules don't identify an instruction as
812  // being trivially rematerializable, use some target-independent
813  // rules.
814  if (!MI->getDesc().isRematerializable() ||
815      !tii_->isTriviallyReMaterializable(MI)) {
816    if (!EnableAggressiveRemat)
817      return false;
818
819    // If the instruction accesses memory but the memoperands have been lost,
820    // we can't analyze it.
821    const TargetInstrDesc &TID = MI->getDesc();
822    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
823      return false;
824
825    // Avoid instructions obviously unsafe for remat.
826    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
827      return false;
828
829    // If the instruction accesses memory and the memory could be non-constant,
830    // assume the instruction is not rematerializable.
831    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
832         E = MI->memoperands_end(); I != E; ++I) {
833      const MachineMemOperand &MMO = *I;
834      if (MMO.isVolatile() || MMO.isStore())
835        return false;
836      const Value *V = MMO.getValue();
837      if (!V)
838        return false;
839      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
840        if (!PSV->isConstant(mf_->getFrameInfo()))
841          return false;
842      } else if (!aa_->pointsToConstantMemory(V))
843        return false;
844    }
845
846    // If any of the registers accessed are non-constant, conservatively assume
847    // the instruction is not rematerializable.
848    unsigned ImpUse = 0;
849    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
850      const MachineOperand &MO = MI->getOperand(i);
851      if (MO.isReg()) {
852        unsigned Reg = MO.getReg();
853        if (Reg == 0)
854          continue;
855        if (TargetRegisterInfo::isPhysicalRegister(Reg))
856          return false;
857
858        // Only allow one def, and that in the first operand.
859        if (MO.isDef() != (i == 0))
860          return false;
861
862        // Only allow constant-valued registers.
863        bool IsLiveIn = mri_->isLiveIn(Reg);
864        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
865                                          E = mri_->def_end();
866
867        // For the def, it should be the only def.
868        if (MO.isDef() && (next(I) != E || IsLiveIn))
869          return false;
870
871        if (MO.isUse()) {
872          // Only allow one use other register use, as that's all the
873          // remat mechanisms support currently.
874          if (Reg != li.reg) {
875            if (ImpUse == 0)
876              ImpUse = Reg;
877            else if (Reg != ImpUse)
878              return false;
879          }
880          // For uses, there should be only one associate def.
881          if (I != E && (next(I) != E || IsLiveIn))
882            return false;
883        }
884      }
885    }
886  }
887
888  unsigned ImpUse = getReMatImplicitUse(li, MI);
889  if (ImpUse) {
890    const LiveInterval &ImpLi = getInterval(ImpUse);
891    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
892           re = mri_->use_end(); ri != re; ++ri) {
893      MachineInstr *UseMI = &*ri;
894      unsigned UseIdx = getInstructionIndex(UseMI);
895      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
896        continue;
897      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
898        return false;
899    }
900  }
901  return true;
902}
903
904/// isReMaterializable - Returns true if every definition of MI of every
905/// val# of the specified interval is re-materializable.
906bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
907  isLoad = false;
908  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
909       i != e; ++i) {
910    const VNInfo *VNI = *i;
911    unsigned DefIdx = VNI->def;
912    if (DefIdx == ~1U)
913      continue; // Dead val#.
914    // Is the def for the val# rematerializable?
915    if (DefIdx == ~0u)
916      return false;
917    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
918    bool DefIsLoad = false;
919    if (!ReMatDefMI ||
920        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
921      return false;
922    isLoad |= DefIsLoad;
923  }
924  return true;
925}
926
927/// FilterFoldedOps - Filter out two-address use operands. Return
928/// true if it finds any issue with the operands that ought to prevent
929/// folding.
930static bool FilterFoldedOps(MachineInstr *MI,
931                            SmallVector<unsigned, 2> &Ops,
932                            unsigned &MRInfo,
933                            SmallVector<unsigned, 2> &FoldOps) {
934  const TargetInstrDesc &TID = MI->getDesc();
935
936  MRInfo = 0;
937  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
938    unsigned OpIdx = Ops[i];
939    MachineOperand &MO = MI->getOperand(OpIdx);
940    // FIXME: fold subreg use.
941    if (MO.getSubReg())
942      return true;
943    if (MO.isDef())
944      MRInfo |= (unsigned)VirtRegMap::isMod;
945    else {
946      // Filter out two-address use operand(s).
947      if (!MO.isImplicit() &&
948          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
949        MRInfo = VirtRegMap::isModRef;
950        continue;
951      }
952      MRInfo |= (unsigned)VirtRegMap::isRef;
953    }
954    FoldOps.push_back(OpIdx);
955  }
956  return false;
957}
958
959
960/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
961/// slot / to reg or any rematerialized load into ith operand of specified
962/// MI. If it is successul, MI is updated with the newly created MI and
963/// returns true.
964bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
965                                         VirtRegMap &vrm, MachineInstr *DefMI,
966                                         unsigned InstrIdx,
967                                         SmallVector<unsigned, 2> &Ops,
968                                         bool isSS, int Slot, unsigned Reg) {
969  // If it is an implicit def instruction, just delete it.
970  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
971    RemoveMachineInstrFromMaps(MI);
972    vrm.RemoveMachineInstrFromMaps(MI);
973    MI->eraseFromParent();
974    ++numFolds;
975    return true;
976  }
977
978  // Filter the list of operand indexes that are to be folded. Abort if
979  // any operand will prevent folding.
980  unsigned MRInfo = 0;
981  SmallVector<unsigned, 2> FoldOps;
982  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
983    return false;
984
985  // The only time it's safe to fold into a two address instruction is when
986  // it's folding reload and spill from / into a spill stack slot.
987  if (DefMI && (MRInfo & VirtRegMap::isMod))
988    return false;
989
990  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
991                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
992  if (fmi) {
993    // Remember this instruction uses the spill slot.
994    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
995
996    // Attempt to fold the memory reference into the instruction. If
997    // we can do this, we don't need to insert spill code.
998    MachineBasicBlock &MBB = *MI->getParent();
999    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1000      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1001    vrm.transferSpillPts(MI, fmi);
1002    vrm.transferRestorePts(MI, fmi);
1003    vrm.transferEmergencySpills(MI, fmi);
1004    mi2iMap_.erase(MI);
1005    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1006    mi2iMap_[fmi] = InstrIdx;
1007    MI = MBB.insert(MBB.erase(MI), fmi);
1008    ++numFolds;
1009    return true;
1010  }
1011  return false;
1012}
1013
1014/// canFoldMemoryOperand - Returns true if the specified load / store
1015/// folding is possible.
1016bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1017                                         SmallVector<unsigned, 2> &Ops,
1018                                         bool ReMat) const {
1019  // Filter the list of operand indexes that are to be folded. Abort if
1020  // any operand will prevent folding.
1021  unsigned MRInfo = 0;
1022  SmallVector<unsigned, 2> FoldOps;
1023  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1024    return false;
1025
1026  // It's only legal to remat for a use, not a def.
1027  if (ReMat && (MRInfo & VirtRegMap::isMod))
1028    return false;
1029
1030  return tii_->canFoldMemoryOperand(MI, FoldOps);
1031}
1032
1033bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1034  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1035  for (LiveInterval::Ranges::const_iterator
1036         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1037    std::vector<IdxMBBPair>::const_iterator II =
1038      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1039    if (II == Idx2MBBMap.end())
1040      continue;
1041    if (I->end > II->first)  // crossing a MBB.
1042      return false;
1043    MBBs.insert(II->second);
1044    if (MBBs.size() > 1)
1045      return false;
1046  }
1047  return true;
1048}
1049
1050/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1051/// interval on to-be re-materialized operands of MI) with new register.
1052void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1053                                       MachineInstr *MI, unsigned NewVReg,
1054                                       VirtRegMap &vrm) {
1055  // There is an implicit use. That means one of the other operand is
1056  // being remat'ed and the remat'ed instruction has li.reg as an
1057  // use operand. Make sure we rewrite that as well.
1058  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1059    MachineOperand &MO = MI->getOperand(i);
1060    if (!MO.isRegister())
1061      continue;
1062    unsigned Reg = MO.getReg();
1063    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1064      continue;
1065    if (!vrm.isReMaterialized(Reg))
1066      continue;
1067    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1068    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1069    if (UseMO)
1070      UseMO->setReg(NewVReg);
1071  }
1072}
1073
1074/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1075/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1076bool LiveIntervals::
1077rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1078                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1079                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1080                 unsigned Slot, int LdSlot,
1081                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1082                 VirtRegMap &vrm,
1083                 const TargetRegisterClass* rc,
1084                 SmallVector<int, 4> &ReMatIds,
1085                 const MachineLoopInfo *loopInfo,
1086                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1087                 std::map<unsigned,unsigned> &MBBVRegsMap,
1088                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1089  MachineBasicBlock *MBB = MI->getParent();
1090  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1091  bool CanFold = false;
1092 RestartInstruction:
1093  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1094    MachineOperand& mop = MI->getOperand(i);
1095    if (!mop.isRegister())
1096      continue;
1097    unsigned Reg = mop.getReg();
1098    unsigned RegI = Reg;
1099    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1100      continue;
1101    if (Reg != li.reg)
1102      continue;
1103
1104    bool TryFold = !DefIsReMat;
1105    bool FoldSS = true; // Default behavior unless it's a remat.
1106    int FoldSlot = Slot;
1107    if (DefIsReMat) {
1108      // If this is the rematerializable definition MI itself and
1109      // all of its uses are rematerialized, simply delete it.
1110      if (MI == ReMatOrigDefMI && CanDelete) {
1111        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1112        DOUT << MI << '\n';
1113        RemoveMachineInstrFromMaps(MI);
1114        vrm.RemoveMachineInstrFromMaps(MI);
1115        MI->eraseFromParent();
1116        break;
1117      }
1118
1119      // If def for this use can't be rematerialized, then try folding.
1120      // If def is rematerializable and it's a load, also try folding.
1121      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1122      if (isLoad) {
1123        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1124        FoldSS = isLoadSS;
1125        FoldSlot = LdSlot;
1126      }
1127    }
1128
1129    // Scan all of the operands of this instruction rewriting operands
1130    // to use NewVReg instead of li.reg as appropriate.  We do this for
1131    // two reasons:
1132    //
1133    //   1. If the instr reads the same spilled vreg multiple times, we
1134    //      want to reuse the NewVReg.
1135    //   2. If the instr is a two-addr instruction, we are required to
1136    //      keep the src/dst regs pinned.
1137    //
1138    // Keep track of whether we replace a use and/or def so that we can
1139    // create the spill interval with the appropriate range.
1140
1141    HasUse = mop.isUse();
1142    HasDef = mop.isDef();
1143    SmallVector<unsigned, 2> Ops;
1144    Ops.push_back(i);
1145    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1146      const MachineOperand &MOj = MI->getOperand(j);
1147      if (!MOj.isRegister())
1148        continue;
1149      unsigned RegJ = MOj.getReg();
1150      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1151        continue;
1152      if (RegJ == RegI) {
1153        Ops.push_back(j);
1154        HasUse |= MOj.isUse();
1155        HasDef |= MOj.isDef();
1156      }
1157    }
1158
1159    if (HasUse && !li.liveAt(getUseIndex(index)))
1160      // Must be defined by an implicit def. It should not be spilled. Note,
1161      // this is for correctness reason. e.g.
1162      // 8   %reg1024<def> = IMPLICIT_DEF
1163      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1164      // The live range [12, 14) are not part of the r1024 live interval since
1165      // it's defined by an implicit def. It will not conflicts with live
1166      // interval of r1025. Now suppose both registers are spilled, you can
1167      // easily see a situation where both registers are reloaded before
1168      // the INSERT_SUBREG and both target registers that would overlap.
1169      HasUse = false;
1170
1171    // Update stack slot spill weight if we are splitting.
1172    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1173      if (!TrySplit)
1174      SSWeight += Weight;
1175
1176    if (!TryFold)
1177      CanFold = false;
1178    else {
1179      // Do not fold load / store here if we are splitting. We'll find an
1180      // optimal point to insert a load / store later.
1181      if (!TrySplit) {
1182        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1183                                 Ops, FoldSS, FoldSlot, Reg)) {
1184          // Folding the load/store can completely change the instruction in
1185          // unpredictable ways, rescan it from the beginning.
1186          HasUse = false;
1187          HasDef = false;
1188          CanFold = false;
1189          if (isRemoved(MI)) {
1190            SSWeight -= Weight;
1191            break;
1192          }
1193          goto RestartInstruction;
1194        }
1195      } else {
1196        // We'll try to fold it later if it's profitable.
1197        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1198      }
1199    }
1200
1201    // Create a new virtual register for the spill interval.
1202    bool CreatedNewVReg = false;
1203    if (NewVReg == 0) {
1204      NewVReg = mri_->createVirtualRegister(rc);
1205      vrm.grow();
1206      CreatedNewVReg = true;
1207    }
1208    mop.setReg(NewVReg);
1209    if (mop.isImplicit())
1210      rewriteImplicitOps(li, MI, NewVReg, vrm);
1211
1212    // Reuse NewVReg for other reads.
1213    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1214      MachineOperand &mopj = MI->getOperand(Ops[j]);
1215      mopj.setReg(NewVReg);
1216      if (mopj.isImplicit())
1217        rewriteImplicitOps(li, MI, NewVReg, vrm);
1218    }
1219
1220    if (CreatedNewVReg) {
1221      if (DefIsReMat) {
1222        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1223        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1224          // Each valnum may have its own remat id.
1225          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1226        } else {
1227          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1228        }
1229        if (!CanDelete || (HasUse && HasDef)) {
1230          // If this is a two-addr instruction then its use operands are
1231          // rematerializable but its def is not. It should be assigned a
1232          // stack slot.
1233          vrm.assignVirt2StackSlot(NewVReg, Slot);
1234        }
1235      } else {
1236        vrm.assignVirt2StackSlot(NewVReg, Slot);
1237      }
1238    } else if (HasUse && HasDef &&
1239               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1240      // If this interval hasn't been assigned a stack slot (because earlier
1241      // def is a deleted remat def), do it now.
1242      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1243      vrm.assignVirt2StackSlot(NewVReg, Slot);
1244    }
1245
1246    // Re-matting an instruction with virtual register use. Add the
1247    // register as an implicit use on the use MI.
1248    if (DefIsReMat && ImpUse)
1249      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1250
1251    // create a new register interval for this spill / remat.
1252    LiveInterval &nI = getOrCreateInterval(NewVReg);
1253    if (CreatedNewVReg) {
1254      NewLIs.push_back(&nI);
1255      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1256      if (TrySplit)
1257        vrm.setIsSplitFromReg(NewVReg, li.reg);
1258    }
1259
1260    if (HasUse) {
1261      if (CreatedNewVReg) {
1262        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1263                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1264        DOUT << " +" << LR;
1265        nI.addRange(LR);
1266      } else {
1267        // Extend the split live interval to this def / use.
1268        unsigned End = getUseIndex(index)+1;
1269        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1270                     nI.getValNumInfo(nI.getNumValNums()-1));
1271        DOUT << " +" << LR;
1272        nI.addRange(LR);
1273      }
1274    }
1275    if (HasDef) {
1276      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1277                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1278      DOUT << " +" << LR;
1279      nI.addRange(LR);
1280    }
1281
1282    DOUT << "\t\t\t\tAdded new interval: ";
1283    nI.print(DOUT, tri_);
1284    DOUT << '\n';
1285  }
1286  return CanFold;
1287}
1288bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1289                                   const VNInfo *VNI,
1290                                   MachineBasicBlock *MBB, unsigned Idx) const {
1291  unsigned End = getMBBEndIdx(MBB);
1292  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1293    unsigned KillIdx = VNI->kills[j];
1294    if (KillIdx > Idx && KillIdx < End)
1295      return true;
1296  }
1297  return false;
1298}
1299
1300/// RewriteInfo - Keep track of machine instrs that will be rewritten
1301/// during spilling.
1302namespace {
1303  struct RewriteInfo {
1304    unsigned Index;
1305    MachineInstr *MI;
1306    bool HasUse;
1307    bool HasDef;
1308    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1309      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1310  };
1311
1312  struct RewriteInfoCompare {
1313    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1314      return LHS.Index < RHS.Index;
1315    }
1316  };
1317}
1318
1319void LiveIntervals::
1320rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1321                    LiveInterval::Ranges::const_iterator &I,
1322                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1323                    unsigned Slot, int LdSlot,
1324                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1325                    VirtRegMap &vrm,
1326                    const TargetRegisterClass* rc,
1327                    SmallVector<int, 4> &ReMatIds,
1328                    const MachineLoopInfo *loopInfo,
1329                    BitVector &SpillMBBs,
1330                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1331                    BitVector &RestoreMBBs,
1332                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1333                    std::map<unsigned,unsigned> &MBBVRegsMap,
1334                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1335  bool AllCanFold = true;
1336  unsigned NewVReg = 0;
1337  unsigned start = getBaseIndex(I->start);
1338  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1339
1340  // First collect all the def / use in this live range that will be rewritten.
1341  // Make sure they are sorted according to instruction index.
1342  std::vector<RewriteInfo> RewriteMIs;
1343  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1344         re = mri_->reg_end(); ri != re; ) {
1345    MachineInstr *MI = &*ri;
1346    MachineOperand &O = ri.getOperand();
1347    ++ri;
1348    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1349    unsigned index = getInstructionIndex(MI);
1350    if (index < start || index >= end)
1351      continue;
1352    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1353      // Must be defined by an implicit def. It should not be spilled. Note,
1354      // this is for correctness reason. e.g.
1355      // 8   %reg1024<def> = IMPLICIT_DEF
1356      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1357      // The live range [12, 14) are not part of the r1024 live interval since
1358      // it's defined by an implicit def. It will not conflicts with live
1359      // interval of r1025. Now suppose both registers are spilled, you can
1360      // easily see a situation where both registers are reloaded before
1361      // the INSERT_SUBREG and both target registers that would overlap.
1362      continue;
1363    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1364  }
1365  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1366
1367  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1368  // Now rewrite the defs and uses.
1369  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1370    RewriteInfo &rwi = RewriteMIs[i];
1371    ++i;
1372    unsigned index = rwi.Index;
1373    bool MIHasUse = rwi.HasUse;
1374    bool MIHasDef = rwi.HasDef;
1375    MachineInstr *MI = rwi.MI;
1376    // If MI def and/or use the same register multiple times, then there
1377    // are multiple entries.
1378    unsigned NumUses = MIHasUse;
1379    while (i != e && RewriteMIs[i].MI == MI) {
1380      assert(RewriteMIs[i].Index == index);
1381      bool isUse = RewriteMIs[i].HasUse;
1382      if (isUse) ++NumUses;
1383      MIHasUse |= isUse;
1384      MIHasDef |= RewriteMIs[i].HasDef;
1385      ++i;
1386    }
1387    MachineBasicBlock *MBB = MI->getParent();
1388
1389    if (ImpUse && MI != ReMatDefMI) {
1390      // Re-matting an instruction with virtual register use. Update the
1391      // register interval's spill weight to HUGE_VALF to prevent it from
1392      // being spilled.
1393      LiveInterval &ImpLi = getInterval(ImpUse);
1394      ImpLi.weight = HUGE_VALF;
1395    }
1396
1397    unsigned MBBId = MBB->getNumber();
1398    unsigned ThisVReg = 0;
1399    if (TrySplit) {
1400      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1401      if (NVI != MBBVRegsMap.end()) {
1402        ThisVReg = NVI->second;
1403        // One common case:
1404        // x = use
1405        // ...
1406        // ...
1407        // def = ...
1408        //     = use
1409        // It's better to start a new interval to avoid artifically
1410        // extend the new interval.
1411        if (MIHasDef && !MIHasUse) {
1412          MBBVRegsMap.erase(MBB->getNumber());
1413          ThisVReg = 0;
1414        }
1415      }
1416    }
1417
1418    bool IsNew = ThisVReg == 0;
1419    if (IsNew) {
1420      // This ends the previous live interval. If all of its def / use
1421      // can be folded, give it a low spill weight.
1422      if (NewVReg && TrySplit && AllCanFold) {
1423        LiveInterval &nI = getOrCreateInterval(NewVReg);
1424        nI.weight /= 10.0F;
1425      }
1426      AllCanFold = true;
1427    }
1428    NewVReg = ThisVReg;
1429
1430    bool HasDef = false;
1431    bool HasUse = false;
1432    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1433                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1434                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1435                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1436                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1437    if (!HasDef && !HasUse)
1438      continue;
1439
1440    AllCanFold &= CanFold;
1441
1442    // Update weight of spill interval.
1443    LiveInterval &nI = getOrCreateInterval(NewVReg);
1444    if (!TrySplit) {
1445      // The spill weight is now infinity as it cannot be spilled again.
1446      nI.weight = HUGE_VALF;
1447      continue;
1448    }
1449
1450    // Keep track of the last def and first use in each MBB.
1451    if (HasDef) {
1452      if (MI != ReMatOrigDefMI || !CanDelete) {
1453        bool HasKill = false;
1454        if (!HasUse)
1455          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1456        else {
1457          // If this is a two-address code, then this index starts a new VNInfo.
1458          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1459          if (VNI)
1460            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1461        }
1462        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1463          SpillIdxes.find(MBBId);
1464        if (!HasKill) {
1465          if (SII == SpillIdxes.end()) {
1466            std::vector<SRInfo> S;
1467            S.push_back(SRInfo(index, NewVReg, true));
1468            SpillIdxes.insert(std::make_pair(MBBId, S));
1469          } else if (SII->second.back().vreg != NewVReg) {
1470            SII->second.push_back(SRInfo(index, NewVReg, true));
1471          } else if ((int)index > SII->second.back().index) {
1472            // If there is an earlier def and this is a two-address
1473            // instruction, then it's not possible to fold the store (which
1474            // would also fold the load).
1475            SRInfo &Info = SII->second.back();
1476            Info.index = index;
1477            Info.canFold = !HasUse;
1478          }
1479          SpillMBBs.set(MBBId);
1480        } else if (SII != SpillIdxes.end() &&
1481                   SII->second.back().vreg == NewVReg &&
1482                   (int)index > SII->second.back().index) {
1483          // There is an earlier def that's not killed (must be two-address).
1484          // The spill is no longer needed.
1485          SII->second.pop_back();
1486          if (SII->second.empty()) {
1487            SpillIdxes.erase(MBBId);
1488            SpillMBBs.reset(MBBId);
1489          }
1490        }
1491      }
1492    }
1493
1494    if (HasUse) {
1495      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1496        SpillIdxes.find(MBBId);
1497      if (SII != SpillIdxes.end() &&
1498          SII->second.back().vreg == NewVReg &&
1499          (int)index > SII->second.back().index)
1500        // Use(s) following the last def, it's not safe to fold the spill.
1501        SII->second.back().canFold = false;
1502      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1503        RestoreIdxes.find(MBBId);
1504      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1505        // If we are splitting live intervals, only fold if it's the first
1506        // use and there isn't another use later in the MBB.
1507        RII->second.back().canFold = false;
1508      else if (IsNew) {
1509        // Only need a reload if there isn't an earlier def / use.
1510        if (RII == RestoreIdxes.end()) {
1511          std::vector<SRInfo> Infos;
1512          Infos.push_back(SRInfo(index, NewVReg, true));
1513          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1514        } else {
1515          RII->second.push_back(SRInfo(index, NewVReg, true));
1516        }
1517        RestoreMBBs.set(MBBId);
1518      }
1519    }
1520
1521    // Update spill weight.
1522    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1523    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1524  }
1525
1526  if (NewVReg && TrySplit && AllCanFold) {
1527    // If all of its def / use can be folded, give it a low spill weight.
1528    LiveInterval &nI = getOrCreateInterval(NewVReg);
1529    nI.weight /= 10.0F;
1530  }
1531}
1532
1533bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1534                        BitVector &RestoreMBBs,
1535                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1536  if (!RestoreMBBs[Id])
1537    return false;
1538  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1539  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1540    if (Restores[i].index == index &&
1541        Restores[i].vreg == vr &&
1542        Restores[i].canFold)
1543      return true;
1544  return false;
1545}
1546
1547void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1548                        BitVector &RestoreMBBs,
1549                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1550  if (!RestoreMBBs[Id])
1551    return;
1552  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1553  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1554    if (Restores[i].index == index && Restores[i].vreg)
1555      Restores[i].index = -1;
1556}
1557
1558/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1559/// spilled and create empty intervals for their uses.
1560void
1561LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1562                                    const TargetRegisterClass* rc,
1563                                    std::vector<LiveInterval*> &NewLIs) {
1564  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1565         re = mri_->reg_end(); ri != re; ) {
1566    MachineOperand &O = ri.getOperand();
1567    MachineInstr *MI = &*ri;
1568    ++ri;
1569    if (O.isDef()) {
1570      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1571             "Register def was not rewritten?");
1572      RemoveMachineInstrFromMaps(MI);
1573      vrm.RemoveMachineInstrFromMaps(MI);
1574      MI->eraseFromParent();
1575    } else {
1576      // This must be an use of an implicit_def so it's not part of the live
1577      // interval. Create a new empty live interval for it.
1578      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1579      unsigned NewVReg = mri_->createVirtualRegister(rc);
1580      vrm.grow();
1581      vrm.setIsImplicitlyDefined(NewVReg);
1582      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1583      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1584        MachineOperand &MO = MI->getOperand(i);
1585        if (MO.isReg() && MO.getReg() == li.reg)
1586          MO.setReg(NewVReg);
1587      }
1588    }
1589  }
1590}
1591
1592
1593std::vector<LiveInterval*> LiveIntervals::
1594addIntervalsForSpills(const LiveInterval &li,
1595                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1596                      float &SSWeight) {
1597  assert(li.weight != HUGE_VALF &&
1598         "attempt to spill already spilled interval!");
1599
1600  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1601  li.print(DOUT, tri_);
1602  DOUT << '\n';
1603
1604  // Spill slot weight.
1605  SSWeight = 0.0f;
1606
1607  // Each bit specify whether it a spill is required in the MBB.
1608  BitVector SpillMBBs(mf_->getNumBlockIDs());
1609  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1610  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1611  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1612  std::map<unsigned,unsigned> MBBVRegsMap;
1613  std::vector<LiveInterval*> NewLIs;
1614  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1615
1616  unsigned NumValNums = li.getNumValNums();
1617  SmallVector<MachineInstr*, 4> ReMatDefs;
1618  ReMatDefs.resize(NumValNums, NULL);
1619  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1620  ReMatOrigDefs.resize(NumValNums, NULL);
1621  SmallVector<int, 4> ReMatIds;
1622  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1623  BitVector ReMatDelete(NumValNums);
1624  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1625
1626  // Spilling a split live interval. It cannot be split any further. Also,
1627  // it's also guaranteed to be a single val# / range interval.
1628  if (vrm.getPreSplitReg(li.reg)) {
1629    vrm.setIsSplitFromReg(li.reg, 0);
1630    // Unset the split kill marker on the last use.
1631    unsigned KillIdx = vrm.getKillPoint(li.reg);
1632    if (KillIdx) {
1633      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1634      assert(KillMI && "Last use disappeared?");
1635      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1636      assert(KillOp != -1 && "Last use disappeared?");
1637      KillMI->getOperand(KillOp).setIsKill(false);
1638    }
1639    vrm.removeKillPoint(li.reg);
1640    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1641    Slot = vrm.getStackSlot(li.reg);
1642    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1643    MachineInstr *ReMatDefMI = DefIsReMat ?
1644      vrm.getReMaterializedMI(li.reg) : NULL;
1645    int LdSlot = 0;
1646    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1647    bool isLoad = isLoadSS ||
1648      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1649    bool IsFirstRange = true;
1650    for (LiveInterval::Ranges::const_iterator
1651           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1652      // If this is a split live interval with multiple ranges, it means there
1653      // are two-address instructions that re-defined the value. Only the
1654      // first def can be rematerialized!
1655      if (IsFirstRange) {
1656        // Note ReMatOrigDefMI has already been deleted.
1657        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1658                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1659                             false, vrm, rc, ReMatIds, loopInfo,
1660                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1661                             MBBVRegsMap, NewLIs, SSWeight);
1662      } else {
1663        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1664                             Slot, 0, false, false, false,
1665                             false, vrm, rc, ReMatIds, loopInfo,
1666                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1667                             MBBVRegsMap, NewLIs, SSWeight);
1668      }
1669      IsFirstRange = false;
1670    }
1671
1672    SSWeight = 0.0f;  // Already accounted for when split.
1673    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1674    return NewLIs;
1675  }
1676
1677  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1678  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1679    TrySplit = false;
1680  if (TrySplit)
1681    ++numSplits;
1682  bool NeedStackSlot = false;
1683  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1684       i != e; ++i) {
1685    const VNInfo *VNI = *i;
1686    unsigned VN = VNI->id;
1687    unsigned DefIdx = VNI->def;
1688    if (DefIdx == ~1U)
1689      continue; // Dead val#.
1690    // Is the def for the val# rematerializable?
1691    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1692      ? 0 : getInstructionFromIndex(DefIdx);
1693    bool dummy;
1694    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1695      // Remember how to remat the def of this val#.
1696      ReMatOrigDefs[VN] = ReMatDefMI;
1697      // Original def may be modified so we have to make a copy here.
1698      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1699      ClonedMIs.push_back(Clone);
1700      ReMatDefs[VN] = Clone;
1701
1702      bool CanDelete = true;
1703      if (VNI->hasPHIKill) {
1704        // A kill is a phi node, not all of its uses can be rematerialized.
1705        // It must not be deleted.
1706        CanDelete = false;
1707        // Need a stack slot if there is any live range where uses cannot be
1708        // rematerialized.
1709        NeedStackSlot = true;
1710      }
1711      if (CanDelete)
1712        ReMatDelete.set(VN);
1713    } else {
1714      // Need a stack slot if there is any live range where uses cannot be
1715      // rematerialized.
1716      NeedStackSlot = true;
1717    }
1718  }
1719
1720  // One stack slot per live interval.
1721  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1722    Slot = vrm.assignVirt2StackSlot(li.reg);
1723
1724  // Create new intervals and rewrite defs and uses.
1725  for (LiveInterval::Ranges::const_iterator
1726         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1727    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1728    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1729    bool DefIsReMat = ReMatDefMI != NULL;
1730    bool CanDelete = ReMatDelete[I->valno->id];
1731    int LdSlot = 0;
1732    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1733    bool isLoad = isLoadSS ||
1734      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1735    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1736                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1737                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1738                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1739                               MBBVRegsMap, NewLIs, SSWeight);
1740  }
1741
1742  // Insert spills / restores if we are splitting.
1743  if (!TrySplit) {
1744    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1745    return NewLIs;
1746  }
1747
1748  SmallPtrSet<LiveInterval*, 4> AddedKill;
1749  SmallVector<unsigned, 2> Ops;
1750  if (NeedStackSlot) {
1751    int Id = SpillMBBs.find_first();
1752    while (Id != -1) {
1753      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1754      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1755      std::vector<SRInfo> &spills = SpillIdxes[Id];
1756      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1757        int index = spills[i].index;
1758        unsigned VReg = spills[i].vreg;
1759        LiveInterval &nI = getOrCreateInterval(VReg);
1760        bool isReMat = vrm.isReMaterialized(VReg);
1761        MachineInstr *MI = getInstructionFromIndex(index);
1762        bool CanFold = false;
1763        bool FoundUse = false;
1764        Ops.clear();
1765        if (spills[i].canFold) {
1766          CanFold = true;
1767          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1768            MachineOperand &MO = MI->getOperand(j);
1769            if (!MO.isRegister() || MO.getReg() != VReg)
1770              continue;
1771
1772            Ops.push_back(j);
1773            if (MO.isDef())
1774              continue;
1775            if (isReMat ||
1776                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1777                                                RestoreMBBs, RestoreIdxes))) {
1778              // MI has two-address uses of the same register. If the use
1779              // isn't the first and only use in the BB, then we can't fold
1780              // it. FIXME: Move this to rewriteInstructionsForSpills.
1781              CanFold = false;
1782              break;
1783            }
1784            FoundUse = true;
1785          }
1786        }
1787        // Fold the store into the def if possible.
1788        bool Folded = false;
1789        if (CanFold && !Ops.empty()) {
1790          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1791            Folded = true;
1792            if (FoundUse > 0) {
1793              // Also folded uses, do not issue a load.
1794              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1795              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1796            }
1797            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1798          }
1799        }
1800
1801        // Otherwise tell the spiller to issue a spill.
1802        if (!Folded) {
1803          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1804          bool isKill = LR->end == getStoreIndex(index);
1805          if (!MI->registerDefIsDead(nI.reg))
1806            // No need to spill a dead def.
1807            vrm.addSpillPoint(VReg, isKill, MI);
1808          if (isKill)
1809            AddedKill.insert(&nI);
1810        }
1811
1812        // Update spill slot weight.
1813        if (!isReMat)
1814          SSWeight += getSpillWeight(true, false, loopDepth);
1815      }
1816      Id = SpillMBBs.find_next(Id);
1817    }
1818  }
1819
1820  int Id = RestoreMBBs.find_first();
1821  while (Id != -1) {
1822    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1823    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1824
1825    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1826    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1827      int index = restores[i].index;
1828      if (index == -1)
1829        continue;
1830      unsigned VReg = restores[i].vreg;
1831      LiveInterval &nI = getOrCreateInterval(VReg);
1832      bool isReMat = vrm.isReMaterialized(VReg);
1833      MachineInstr *MI = getInstructionFromIndex(index);
1834      bool CanFold = false;
1835      Ops.clear();
1836      if (restores[i].canFold) {
1837        CanFold = true;
1838        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1839          MachineOperand &MO = MI->getOperand(j);
1840          if (!MO.isRegister() || MO.getReg() != VReg)
1841            continue;
1842
1843          if (MO.isDef()) {
1844            // If this restore were to be folded, it would have been folded
1845            // already.
1846            CanFold = false;
1847            break;
1848          }
1849          Ops.push_back(j);
1850        }
1851      }
1852
1853      // Fold the load into the use if possible.
1854      bool Folded = false;
1855      if (CanFold && !Ops.empty()) {
1856        if (!isReMat)
1857          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1858        else {
1859          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1860          int LdSlot = 0;
1861          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1862          // If the rematerializable def is a load, also try to fold it.
1863          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1864            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1865                                          Ops, isLoadSS, LdSlot, VReg);
1866          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1867          if (ImpUse) {
1868            // Re-matting an instruction with virtual register use. Add the
1869            // register as an implicit use on the use MI and update the register
1870            // interval's spill weight to HUGE_VALF to prevent it from being
1871            // spilled.
1872            LiveInterval &ImpLi = getInterval(ImpUse);
1873            ImpLi.weight = HUGE_VALF;
1874            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1875          }
1876        }
1877      }
1878      // If folding is not possible / failed, then tell the spiller to issue a
1879      // load / rematerialization for us.
1880      if (Folded)
1881        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1882      else
1883        vrm.addRestorePoint(VReg, MI);
1884
1885      // Update spill slot weight.
1886      if (!isReMat)
1887        SSWeight += getSpillWeight(false, true, loopDepth);
1888    }
1889    Id = RestoreMBBs.find_next(Id);
1890  }
1891
1892  // Finalize intervals: add kills, finalize spill weights, and filter out
1893  // dead intervals.
1894  std::vector<LiveInterval*> RetNewLIs;
1895  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1896    LiveInterval *LI = NewLIs[i];
1897    if (!LI->empty()) {
1898      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1899      if (!AddedKill.count(LI)) {
1900        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1901        unsigned LastUseIdx = getBaseIndex(LR->end);
1902        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1903        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1904        assert(UseIdx != -1);
1905        if (LastUse->getOperand(UseIdx).isImplicit() ||
1906            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1907          LastUse->getOperand(UseIdx).setIsKill();
1908          vrm.addKillPoint(LI->reg, LastUseIdx);
1909        }
1910      }
1911      RetNewLIs.push_back(LI);
1912    }
1913  }
1914
1915  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1916  return RetNewLIs;
1917}
1918
1919/// hasAllocatableSuperReg - Return true if the specified physical register has
1920/// any super register that's allocatable.
1921bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1922  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1923    if (allocatableRegs_[*AS] && hasInterval(*AS))
1924      return true;
1925  return false;
1926}
1927
1928/// getRepresentativeReg - Find the largest super register of the specified
1929/// physical register.
1930unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1931  // Find the largest super-register that is allocatable.
1932  unsigned BestReg = Reg;
1933  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1934    unsigned SuperReg = *AS;
1935    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1936      BestReg = SuperReg;
1937      break;
1938    }
1939  }
1940  return BestReg;
1941}
1942
1943/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1944/// specified interval that conflicts with the specified physical register.
1945unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1946                                                   unsigned PhysReg) const {
1947  unsigned NumConflicts = 0;
1948  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1949  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1950         E = mri_->reg_end(); I != E; ++I) {
1951    MachineOperand &O = I.getOperand();
1952    MachineInstr *MI = O.getParent();
1953    unsigned Index = getInstructionIndex(MI);
1954    if (pli.liveAt(Index))
1955      ++NumConflicts;
1956  }
1957  return NumConflicts;
1958}
1959
1960/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1961/// around all defs and uses of the specified interval.
1962void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1963                                            unsigned PhysReg, VirtRegMap &vrm) {
1964  unsigned SpillReg = getRepresentativeReg(PhysReg);
1965
1966  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1967    // If there are registers which alias PhysReg, but which are not a
1968    // sub-register of the chosen representative super register. Assert
1969    // since we can't handle it yet.
1970    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1971           tri_->isSuperRegister(*AS, SpillReg));
1972
1973  LiveInterval &pli = getInterval(SpillReg);
1974  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1975  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1976         E = mri_->reg_end(); I != E; ++I) {
1977    MachineOperand &O = I.getOperand();
1978    MachineInstr *MI = O.getParent();
1979    if (SeenMIs.count(MI))
1980      continue;
1981    SeenMIs.insert(MI);
1982    unsigned Index = getInstructionIndex(MI);
1983    if (pli.liveAt(Index)) {
1984      vrm.addEmergencySpill(SpillReg, MI);
1985      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1986      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1987        if (!hasInterval(*AS))
1988          continue;
1989        LiveInterval &spli = getInterval(*AS);
1990        if (spli.liveAt(Index))
1991          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1992      }
1993    }
1994  }
1995}
1996
1997LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1998                                                   MachineInstr* startInst) {
1999  LiveInterval& Interval = getOrCreateInterval(reg);
2000  VNInfo* VN = Interval.getNextValue(
2001            getInstructionIndex(startInst) + InstrSlots::DEF,
2002            startInst, getVNInfoAllocator());
2003  VN->hasPHIKill = true;
2004  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2005  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2006               getMBBEndIdx(startInst->getParent()) + 1, VN);
2007  Interval.addRange(LR);
2008
2009  return LR;
2010}
2011