LiveIntervalAnalysis.cpp revision 46292be362c1b0bb0aecf737cd4dbb81fdd38586
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52static cl::opt<bool> EnableFastSpilling("fast-spill",
53                                        cl::init(false), cl::Hidden);
54
55STATISTIC(numIntervals, "Number of original intervals");
56STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70  AU.addPreservedID(PHIEliminationID);
71  AU.addRequiredID(PHIEliminationID);
72  AU.addRequiredID(TwoAddressInstructionPassID);
73  MachineFunctionPass::getAnalysisUsage(AU);
74}
75
76void LiveIntervals::releaseMemory() {
77  // Free the live intervals themselves.
78  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79       E = r2iMap_.end(); I != E; ++I)
80    delete I->second;
81
82  MBB2IdxMap.clear();
83  Idx2MBBMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86  r2iMap_.clear();
87  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88  VNInfoAllocator.Reset();
89  while (!ClonedMIs.empty()) {
90    MachineInstr *MI = ClonedMIs.back();
91    ClonedMIs.pop_back();
92    mf_->DeleteMachineInstr(MI);
93  }
94}
95
96void LiveIntervals::computeNumbering() {
97  Index2MiMap OldI2MI = i2miMap_;
98  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99
100  Idx2MBBMap.clear();
101  MBB2IdxMap.clear();
102  mi2iMap_.clear();
103  i2miMap_.clear();
104
105  FunctionSize = 0;
106
107  // Number MachineInstrs and MachineBasicBlocks.
108  // Initialize MBB indexes to a sentinal.
109  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
110
111  unsigned MIIndex = 0;
112  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
113       MBB != E; ++MBB) {
114    unsigned StartIdx = MIIndex;
115
116    // Insert an empty slot at the beginning of each block.
117    MIIndex += InstrSlots::NUM;
118    i2miMap_.push_back(0);
119
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ++I) {
122      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123      assert(inserted && "multiple MachineInstr -> index mappings");
124      i2miMap_.push_back(I);
125      MIIndex += InstrSlots::NUM;
126      FunctionSize++;
127
128      // Insert an empty slot after every instruction.
129      MIIndex += InstrSlots::NUM;
130      i2miMap_.push_back(0);
131    }
132
133    // Set the MBB2IdxMap entry for this MBB.
134    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
136  }
137  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
138
139  if (!OldI2MI.empty())
140    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141      for (LiveInterval::iterator LI = OI->second->begin(),
142           LE = OI->second->end(); LI != LE; ++LI) {
143
144        // Remap the start index of the live range to the corresponding new
145        // number, or our best guess at what it _should_ correspond to if the
146        // original instruction has been erased.  This is either the following
147        // instruction or its predecessor.
148        unsigned index = LI->start / InstrSlots::NUM;
149        unsigned offset = LI->start % InstrSlots::NUM;
150        if (offset == InstrSlots::LOAD) {
151          std::vector<IdxMBBPair>::const_iterator I =
152                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153          // Take the pair containing the index
154          std::vector<IdxMBBPair>::const_iterator J =
155                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
156
157          LI->start = getMBBStartIdx(J->second);
158        } else {
159          LI->start = mi2iMap_[OldI2MI[index]] + offset;
160        }
161
162        // Remap the ending index in the same way that we remapped the start,
163        // except for the final step where we always map to the immediately
164        // following instruction.
165        index = (LI->end - 1) / InstrSlots::NUM;
166        offset  = LI->end % InstrSlots::NUM;
167        if (offset == InstrSlots::LOAD) {
168          // VReg dies at end of block.
169          std::vector<IdxMBBPair>::const_iterator I =
170                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
171          --I;
172
173          LI->end = getMBBEndIdx(I->second) + 1;
174        } else {
175          unsigned idx = index;
176          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
177
178          if (index != OldI2MI.size())
179            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
180          else
181            LI->end = InstrSlots::NUM * i2miMap_.size();
182        }
183      }
184
185      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
187        VNInfo* vni = *VNI;
188
189        // Remap the VNInfo def index, which works the same as the
190        // start indices above. VN's with special sentinel defs
191        // don't need to be remapped.
192        if (vni->def != ~0U && vni->def != ~1U) {
193          unsigned index = vni->def / InstrSlots::NUM;
194          unsigned offset = vni->def % InstrSlots::NUM;
195          if (offset == InstrSlots::LOAD) {
196            std::vector<IdxMBBPair>::const_iterator I =
197                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198            // Take the pair containing the index
199            std::vector<IdxMBBPair>::const_iterator J =
200                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
201
202            vni->def = getMBBStartIdx(J->second);
203          } else {
204            vni->def = mi2iMap_[OldI2MI[index]] + offset;
205          }
206        }
207
208        // Remap the VNInfo kill indices, which works the same as
209        // the end indices above.
210        for (size_t i = 0; i < vni->kills.size(); ++i) {
211          // PHI kills don't need to be remapped.
212          if (!vni->kills[i]) continue;
213
214          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215          unsigned offset = vni->kills[i] % InstrSlots::NUM;
216          if (offset == InstrSlots::STORE) {
217            std::vector<IdxMBBPair>::const_iterator I =
218             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
219            --I;
220
221            vni->kills[i] = getMBBEndIdx(I->second);
222          } else {
223            unsigned idx = index;
224            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
225
226            if (index != OldI2MI.size())
227              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228                              (idx == index ? offset : 0);
229            else
230              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
231          }
232        }
233      }
234    }
235}
236
237/// runOnMachineFunction - Register allocate the whole function
238///
239bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
240  mf_ = &fn;
241  mri_ = &mf_->getRegInfo();
242  tm_ = &fn.getTarget();
243  tri_ = tm_->getRegisterInfo();
244  tii_ = tm_->getInstrInfo();
245  aa_ = &getAnalysis<AliasAnalysis>();
246  lv_ = &getAnalysis<LiveVariables>();
247  allocatableRegs_ = tri_->getAllocatableSet(fn);
248
249  computeNumbering();
250  computeIntervals();
251
252  numIntervals += getNumIntervals();
253
254  DOUT << "********** INTERVALS **********\n";
255  for (iterator I = begin(), E = end(); I != E; ++I) {
256    I->second->print(DOUT, tri_);
257    DOUT << "\n";
258  }
259
260  numIntervalsAfter += getNumIntervals();
261  DEBUG(dump());
262  return true;
263}
264
265/// print - Implement the dump method.
266void LiveIntervals::print(std::ostream &O, const Module* ) const {
267  O << "********** INTERVALS **********\n";
268  for (const_iterator I = begin(), E = end(); I != E; ++I) {
269    I->second->print(O, tri_);
270    O << "\n";
271  }
272
273  O << "********** MACHINEINSTRS **********\n";
274  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275       mbbi != mbbe; ++mbbi) {
276    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277    for (MachineBasicBlock::iterator mii = mbbi->begin(),
278           mie = mbbi->end(); mii != mie; ++mii) {
279      O << getInstructionIndex(mii) << '\t' << *mii;
280    }
281  }
282}
283
284/// conflictsWithPhysRegDef - Returns true if the specified register
285/// is defined during the duration of the specified interval.
286bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287                                            VirtRegMap &vrm, unsigned reg) {
288  for (LiveInterval::Ranges::const_iterator
289         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290    for (unsigned index = getBaseIndex(I->start),
291           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292         index += InstrSlots::NUM) {
293      // skip deleted instructions
294      while (index != end && !getInstructionFromIndex(index))
295        index += InstrSlots::NUM;
296      if (index == end) break;
297
298      MachineInstr *MI = getInstructionFromIndex(index);
299      unsigned SrcReg, DstReg;
300      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301        if (SrcReg == li.reg || DstReg == li.reg)
302          continue;
303      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304        MachineOperand& mop = MI->getOperand(i);
305        if (!mop.isRegister())
306          continue;
307        unsigned PhysReg = mop.getReg();
308        if (PhysReg == 0 || PhysReg == li.reg)
309          continue;
310        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311          if (!vrm.hasPhys(PhysReg))
312            continue;
313          PhysReg = vrm.getPhys(PhysReg);
314        }
315        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
316          return true;
317      }
318    }
319  }
320
321  return false;
322}
323
324void LiveIntervals::printRegName(unsigned reg) const {
325  if (TargetRegisterInfo::isPhysicalRegister(reg))
326    cerr << tri_->getName(reg);
327  else
328    cerr << "%reg" << reg;
329}
330
331void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332                                             MachineBasicBlock::iterator mi,
333                                             unsigned MIIdx, MachineOperand& MO,
334                                             unsigned MOIdx,
335                                             LiveInterval &interval) {
336  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
338
339  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340    DOUT << "is a implicit_def\n";
341    return;
342  }
343
344  // Virtual registers may be defined multiple times (due to phi
345  // elimination and 2-addr elimination).  Much of what we do only has to be
346  // done once for the vreg.  We use an empty interval to detect the first
347  // time we see a vreg.
348  if (interval.empty()) {
349    // Get the Idx of the defining instructions.
350    unsigned defIndex = getDefIndex(MIIdx);
351    // Earlyclobbers move back one.
352    if (MO.isEarlyClobber())
353      defIndex = getUseIndex(MIIdx);
354    VNInfo *ValNo;
355    MachineInstr *CopyMI = NULL;
356    unsigned SrcReg, DstReg;
357    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
358        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
359        tii_->isMoveInstr(*mi, SrcReg, DstReg))
360      CopyMI = mi;
361    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
362
363    assert(ValNo->id == 0 && "First value in interval is not 0?");
364
365    // Loop over all of the blocks that the vreg is defined in.  There are
366    // two cases we have to handle here.  The most common case is a vreg
367    // whose lifetime is contained within a basic block.  In this case there
368    // will be a single kill, in MBB, which comes after the definition.
369    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
370      // FIXME: what about dead vars?
371      unsigned killIdx;
372      if (vi.Kills[0] != mi)
373        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
374      else
375        killIdx = defIndex+1;
376
377      // If the kill happens after the definition, we have an intra-block
378      // live range.
379      if (killIdx > defIndex) {
380        assert(vi.AliveBlocks.none() &&
381               "Shouldn't be alive across any blocks!");
382        LiveRange LR(defIndex, killIdx, ValNo);
383        interval.addRange(LR);
384        DOUT << " +" << LR << "\n";
385        interval.addKill(ValNo, killIdx);
386        return;
387      }
388    }
389
390    // The other case we handle is when a virtual register lives to the end
391    // of the defining block, potentially live across some blocks, then is
392    // live into some number of blocks, but gets killed.  Start by adding a
393    // range that goes from this definition to the end of the defining block.
394    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
395    DOUT << " +" << NewLR;
396    interval.addRange(NewLR);
397
398    // Iterate over all of the blocks that the variable is completely
399    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
400    // live interval.
401    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
402      if (vi.AliveBlocks[i]) {
403        LiveRange LR(getMBBStartIdx(i),
404                     getMBBEndIdx(i)+1,  // MBB ends at -1.
405                     ValNo);
406        interval.addRange(LR);
407        DOUT << " +" << LR;
408      }
409    }
410
411    // Finally, this virtual register is live from the start of any killing
412    // block to the 'use' slot of the killing instruction.
413    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
414      MachineInstr *Kill = vi.Kills[i];
415      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
416      LiveRange LR(getMBBStartIdx(Kill->getParent()),
417                   killIdx, ValNo);
418      interval.addRange(LR);
419      interval.addKill(ValNo, killIdx);
420      DOUT << " +" << LR;
421    }
422
423  } else {
424    // If this is the second time we see a virtual register definition, it
425    // must be due to phi elimination or two addr elimination.  If this is
426    // the result of two address elimination, then the vreg is one of the
427    // def-and-use register operand.
428    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
429      // If this is a two-address definition, then we have already processed
430      // the live range.  The only problem is that we didn't realize there
431      // are actually two values in the live interval.  Because of this we
432      // need to take the LiveRegion that defines this register and split it
433      // into two values.
434      assert(interval.containsOneValue());
435      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
436      unsigned RedefIndex = getDefIndex(MIIdx);
437      // Earlyclobbers move back one.
438      if (MO.isEarlyClobber())
439        RedefIndex = getUseIndex(MIIdx);
440
441      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
442      VNInfo *OldValNo = OldLR->valno;
443
444      // Delete the initial value, which should be short and continuous,
445      // because the 2-addr copy must be in the same MBB as the redef.
446      interval.removeRange(DefIndex, RedefIndex);
447
448      // Two-address vregs should always only be redefined once.  This means
449      // that at this point, there should be exactly one value number in it.
450      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
451
452      // The new value number (#1) is defined by the instruction we claimed
453      // defined value #0.
454      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
455                                            VNInfoAllocator);
456
457      // Value#0 is now defined by the 2-addr instruction.
458      OldValNo->def  = RedefIndex;
459      OldValNo->copy = 0;
460
461      // Add the new live interval which replaces the range for the input copy.
462      LiveRange LR(DefIndex, RedefIndex, ValNo);
463      DOUT << " replace range with " << LR;
464      interval.addRange(LR);
465      interval.addKill(ValNo, RedefIndex);
466
467      // If this redefinition is dead, we need to add a dummy unit live
468      // range covering the def slot.
469      if (MO.isDead())
470        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
471
472      DOUT << " RESULT: ";
473      interval.print(DOUT, tri_);
474
475    } else {
476      // Otherwise, this must be because of phi elimination.  If this is the
477      // first redefinition of the vreg that we have seen, go back and change
478      // the live range in the PHI block to be a different value number.
479      if (interval.containsOneValue()) {
480        assert(vi.Kills.size() == 1 &&
481               "PHI elimination vreg should have one kill, the PHI itself!");
482
483        // Remove the old range that we now know has an incorrect number.
484        VNInfo *VNI = interval.getValNumInfo(0);
485        MachineInstr *Killer = vi.Kills[0];
486        unsigned Start = getMBBStartIdx(Killer->getParent());
487        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
488        DOUT << " Removing [" << Start << "," << End << "] from: ";
489        interval.print(DOUT, tri_); DOUT << "\n";
490        interval.removeRange(Start, End);
491        VNI->hasPHIKill = true;
492        DOUT << " RESULT: "; interval.print(DOUT, tri_);
493
494        // Replace the interval with one of a NEW value number.  Note that this
495        // value number isn't actually defined by an instruction, weird huh? :)
496        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
497        DOUT << " replace range with " << LR;
498        interval.addRange(LR);
499        interval.addKill(LR.valno, End);
500        DOUT << " RESULT: "; interval.print(DOUT, tri_);
501      }
502
503      // In the case of PHI elimination, each variable definition is only
504      // live until the end of the block.  We've already taken care of the
505      // rest of the live range.
506      unsigned defIndex = getDefIndex(MIIdx);
507      // Earlyclobbers move back one.
508      if (MO.isEarlyClobber())
509        defIndex = getUseIndex(MIIdx);
510
511      VNInfo *ValNo;
512      MachineInstr *CopyMI = NULL;
513      unsigned SrcReg, DstReg;
514      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516          tii_->isMoveInstr(*mi, SrcReg, DstReg))
517        CopyMI = mi;
518      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
519
520      unsigned killIndex = getMBBEndIdx(mbb) + 1;
521      LiveRange LR(defIndex, killIndex, ValNo);
522      interval.addRange(LR);
523      interval.addKill(ValNo, killIndex);
524      ValNo->hasPHIKill = true;
525      DOUT << " +" << LR;
526    }
527  }
528
529  DOUT << '\n';
530}
531
532void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533                                              MachineBasicBlock::iterator mi,
534                                              unsigned MIIdx,
535                                              MachineOperand& MO,
536                                              LiveInterval &interval,
537                                              MachineInstr *CopyMI) {
538  // A physical register cannot be live across basic block, so its
539  // lifetime must end somewhere in its defining basic block.
540  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
541
542  unsigned baseIndex = MIIdx;
543  unsigned start = getDefIndex(baseIndex);
544  // Earlyclobbers move back one.
545  if (MO.isEarlyClobber())
546    start = getUseIndex(MIIdx);
547  unsigned end = start;
548
549  // If it is not used after definition, it is considered dead at
550  // the instruction defining it. Hence its interval is:
551  // [defSlot(def), defSlot(def)+1)
552  if (MO.isDead()) {
553    DOUT << " dead";
554    end = start + 1;
555    goto exit;
556  }
557
558  // If it is not dead on definition, it must be killed by a
559  // subsequent instruction. Hence its interval is:
560  // [defSlot(def), useSlot(kill)+1)
561  baseIndex += InstrSlots::NUM;
562  while (++mi != MBB->end()) {
563    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564           getInstructionFromIndex(baseIndex) == 0)
565      baseIndex += InstrSlots::NUM;
566    if (mi->killsRegister(interval.reg, tri_)) {
567      DOUT << " killed";
568      end = getUseIndex(baseIndex) + 1;
569      goto exit;
570    } else if (mi->modifiesRegister(interval.reg, tri_)) {
571      // Another instruction redefines the register before it is ever read.
572      // Then the register is essentially dead at the instruction that defines
573      // it. Hence its interval is:
574      // [defSlot(def), defSlot(def)+1)
575      DOUT << " dead";
576      end = start + 1;
577      goto exit;
578    }
579
580    baseIndex += InstrSlots::NUM;
581  }
582
583  // The only case we should have a dead physreg here without a killing or
584  // instruction where we know it's dead is if it is live-in to the function
585  // and never used.
586  assert(!CopyMI && "physreg was not killed in defining block!");
587  end = start + 1;
588
589exit:
590  assert(start < end && "did not find end of interval?");
591
592  // Already exists? Extend old live interval.
593  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594  VNInfo *ValNo = (OldLR != interval.end())
595    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
596  LiveRange LR(start, end, ValNo);
597  interval.addRange(LR);
598  interval.addKill(LR.valno, end);
599  DOUT << " +" << LR << '\n';
600}
601
602void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603                                      MachineBasicBlock::iterator MI,
604                                      unsigned MIIdx,
605                                      MachineOperand& MO,
606                                      unsigned MOIdx) {
607  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609                             getOrCreateInterval(MO.getReg()));
610  else if (allocatableRegs_[MO.getReg()]) {
611    MachineInstr *CopyMI = NULL;
612    unsigned SrcReg, DstReg;
613    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
614        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
615        tii_->isMoveInstr(*MI, SrcReg, DstReg))
616      CopyMI = MI;
617    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618                              getOrCreateInterval(MO.getReg()), CopyMI);
619    // Def of a register also defines its sub-registers.
620    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621      // If MI also modifies the sub-register explicitly, avoid processing it
622      // more than once. Do not pass in TRI here so it checks for exact match.
623      if (!MI->modifiesRegister(*AS))
624        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625                                  getOrCreateInterval(*AS), 0);
626  }
627}
628
629void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
630                                         unsigned MIIdx,
631                                         LiveInterval &interval, bool isAlias) {
632  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
633
634  // Look for kills, if it reaches a def before it's killed, then it shouldn't
635  // be considered a livein.
636  MachineBasicBlock::iterator mi = MBB->begin();
637  unsigned baseIndex = MIIdx;
638  unsigned start = baseIndex;
639  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
640         getInstructionFromIndex(baseIndex) == 0)
641    baseIndex += InstrSlots::NUM;
642  unsigned end = baseIndex;
643
644  while (mi != MBB->end()) {
645    if (mi->killsRegister(interval.reg, tri_)) {
646      DOUT << " killed";
647      end = getUseIndex(baseIndex) + 1;
648      goto exit;
649    } else if (mi->modifiesRegister(interval.reg, tri_)) {
650      // Another instruction redefines the register before it is ever read.
651      // Then the register is essentially dead at the instruction that defines
652      // it. Hence its interval is:
653      // [defSlot(def), defSlot(def)+1)
654      DOUT << " dead";
655      end = getDefIndex(start) + 1;
656      goto exit;
657    }
658
659    baseIndex += InstrSlots::NUM;
660    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
661           getInstructionFromIndex(baseIndex) == 0)
662      baseIndex += InstrSlots::NUM;
663    ++mi;
664  }
665
666exit:
667  // Live-in register might not be used at all.
668  if (end == MIIdx) {
669    if (isAlias) {
670      DOUT << " dead";
671      end = getDefIndex(MIIdx) + 1;
672    } else {
673      DOUT << " live through";
674      end = baseIndex;
675    }
676  }
677
678  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
679  interval.addRange(LR);
680  interval.addKill(LR.valno, end);
681  DOUT << " +" << LR << '\n';
682}
683
684/// computeIntervals - computes the live intervals for virtual
685/// registers. for some ordering of the machine instructions [1,N] a
686/// live interval is an interval [i, j) where 1 <= i <= j < N for
687/// which a variable is live
688void LiveIntervals::computeIntervals() {
689
690  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
691       << "********** Function: "
692       << ((Value*)mf_->getFunction())->getName() << '\n';
693
694  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
695       MBBI != E; ++MBBI) {
696    MachineBasicBlock *MBB = MBBI;
697    // Track the index of the current machine instr.
698    unsigned MIIndex = getMBBStartIdx(MBB);
699    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
700
701    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702
703    // Create intervals for live-ins to this BB first.
704    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
705           LE = MBB->livein_end(); LI != LE; ++LI) {
706      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
707      // Multiple live-ins can alias the same register.
708      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
709        if (!hasInterval(*AS))
710          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
711                               true);
712    }
713
714    // Skip over empty initial indices.
715    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
716           getInstructionFromIndex(MIIndex) == 0)
717      MIIndex += InstrSlots::NUM;
718
719    for (; MI != miEnd; ++MI) {
720      DOUT << MIIndex << "\t" << *MI;
721
722      // Handle defs.
723      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
724        MachineOperand &MO = MI->getOperand(i);
725        // handle register defs - build intervals
726        if (MO.isRegister() && MO.getReg() && MO.isDef()) {
727          handleRegisterDef(MBB, MI, MIIndex, MO, i);
728        }
729      }
730
731      MIIndex += InstrSlots::NUM;
732
733      // Skip over empty indices.
734      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
735             getInstructionFromIndex(MIIndex) == 0)
736        MIIndex += InstrSlots::NUM;
737    }
738  }
739}
740
741bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
742                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
743  std::vector<IdxMBBPair>::const_iterator I =
744    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
745
746  bool ResVal = false;
747  while (I != Idx2MBBMap.end()) {
748    if (LR.end <= I->first)
749      break;
750    MBBs.push_back(I->second);
751    ResVal = true;
752    ++I;
753  }
754  return ResVal;
755}
756
757LiveInterval* LiveIntervals::createInterval(unsigned reg) {
758  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
759                       HUGE_VALF : 0.0F;
760  return new LiveInterval(reg, Weight);
761}
762
763/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
764/// copy field and returns the source register that defines it.
765unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
766  if (!VNI->copy)
767    return 0;
768
769  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
770    return VNI->copy->getOperand(1).getReg();
771  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
772    return VNI->copy->getOperand(2).getReg();
773  unsigned SrcReg, DstReg;
774  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
775    return SrcReg;
776  assert(0 && "Unrecognized copy instruction!");
777  return 0;
778}
779
780//===----------------------------------------------------------------------===//
781// Register allocator hooks.
782//
783
784/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
785/// allow one) virtual register operand, then its uses are implicitly using
786/// the register. Returns the virtual register.
787unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
788                                            MachineInstr *MI) const {
789  unsigned RegOp = 0;
790  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
791    MachineOperand &MO = MI->getOperand(i);
792    if (!MO.isRegister() || !MO.isUse())
793      continue;
794    unsigned Reg = MO.getReg();
795    if (Reg == 0 || Reg == li.reg)
796      continue;
797    // FIXME: For now, only remat MI with at most one register operand.
798    assert(!RegOp &&
799           "Can't rematerialize instruction with multiple register operand!");
800    RegOp = MO.getReg();
801#ifndef NDEBUG
802    break;
803#endif
804  }
805  return RegOp;
806}
807
808/// isValNoAvailableAt - Return true if the val# of the specified interval
809/// which reaches the given instruction also reaches the specified use index.
810bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
811                                       unsigned UseIdx) const {
812  unsigned Index = getInstructionIndex(MI);
813  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
814  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
815  return UI != li.end() && UI->valno == ValNo;
816}
817
818/// isReMaterializable - Returns true if the definition MI of the specified
819/// val# of the specified interval is re-materializable.
820bool LiveIntervals::isReMaterializable(const LiveInterval &li,
821                                       const VNInfo *ValNo, MachineInstr *MI,
822                                       bool &isLoad) {
823  if (DisableReMat)
824    return false;
825
826  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
827    return true;
828
829  int FrameIdx = 0;
830  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
831      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
832    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
833    // this but remember this is not safe to fold into a two-address
834    // instruction.
835    // This is a load from fixed stack slot. It can be rematerialized.
836    return true;
837
838  // If the target-specific rules don't identify an instruction as
839  // being trivially rematerializable, use some target-independent
840  // rules.
841  if (!MI->getDesc().isRematerializable() ||
842      !tii_->isTriviallyReMaterializable(MI)) {
843    if (!EnableAggressiveRemat)
844      return false;
845
846    // If the instruction accesses memory but the memoperands have been lost,
847    // we can't analyze it.
848    const TargetInstrDesc &TID = MI->getDesc();
849    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
850      return false;
851
852    // Avoid instructions obviously unsafe for remat.
853    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
854      return false;
855
856    // If the instruction accesses memory and the memory could be non-constant,
857    // assume the instruction is not rematerializable.
858    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
859         E = MI->memoperands_end(); I != E; ++I) {
860      const MachineMemOperand &MMO = *I;
861      if (MMO.isVolatile() || MMO.isStore())
862        return false;
863      const Value *V = MMO.getValue();
864      if (!V)
865        return false;
866      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
867        if (!PSV->isConstant(mf_->getFrameInfo()))
868          return false;
869      } else if (!aa_->pointsToConstantMemory(V))
870        return false;
871    }
872
873    // If any of the registers accessed are non-constant, conservatively assume
874    // the instruction is not rematerializable.
875    unsigned ImpUse = 0;
876    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
877      const MachineOperand &MO = MI->getOperand(i);
878      if (MO.isRegister()) {
879        unsigned Reg = MO.getReg();
880        if (Reg == 0)
881          continue;
882        if (TargetRegisterInfo::isPhysicalRegister(Reg))
883          return false;
884
885        // Only allow one def, and that in the first operand.
886        if (MO.isDef() != (i == 0))
887          return false;
888
889        // Only allow constant-valued registers.
890        bool IsLiveIn = mri_->isLiveIn(Reg);
891        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
892                                          E = mri_->def_end();
893
894        // For the def, it should be the only def.
895        if (MO.isDef() && (next(I) != E || IsLiveIn))
896          return false;
897
898        if (MO.isUse()) {
899          // Only allow one use other register use, as that's all the
900          // remat mechanisms support currently.
901          if (Reg != li.reg) {
902            if (ImpUse == 0)
903              ImpUse = Reg;
904            else if (Reg != ImpUse)
905              return false;
906          }
907          // For uses, there should be only one associate def.
908          if (I != E && (next(I) != E || IsLiveIn))
909            return false;
910        }
911      }
912    }
913  }
914
915  unsigned ImpUse = getReMatImplicitUse(li, MI);
916  if (ImpUse) {
917    const LiveInterval &ImpLi = getInterval(ImpUse);
918    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
919           re = mri_->use_end(); ri != re; ++ri) {
920      MachineInstr *UseMI = &*ri;
921      unsigned UseIdx = getInstructionIndex(UseMI);
922      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
923        continue;
924      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
925        return false;
926    }
927  }
928  return true;
929}
930
931/// isReMaterializable - Returns true if every definition of MI of every
932/// val# of the specified interval is re-materializable.
933bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
934  isLoad = false;
935  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
936       i != e; ++i) {
937    const VNInfo *VNI = *i;
938    unsigned DefIdx = VNI->def;
939    if (DefIdx == ~1U)
940      continue; // Dead val#.
941    // Is the def for the val# rematerializable?
942    if (DefIdx == ~0u)
943      return false;
944    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
945    bool DefIsLoad = false;
946    if (!ReMatDefMI ||
947        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
948      return false;
949    isLoad |= DefIsLoad;
950  }
951  return true;
952}
953
954/// FilterFoldedOps - Filter out two-address use operands. Return
955/// true if it finds any issue with the operands that ought to prevent
956/// folding.
957static bool FilterFoldedOps(MachineInstr *MI,
958                            SmallVector<unsigned, 2> &Ops,
959                            unsigned &MRInfo,
960                            SmallVector<unsigned, 2> &FoldOps) {
961  const TargetInstrDesc &TID = MI->getDesc();
962
963  MRInfo = 0;
964  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
965    unsigned OpIdx = Ops[i];
966    MachineOperand &MO = MI->getOperand(OpIdx);
967    // FIXME: fold subreg use.
968    if (MO.getSubReg())
969      return true;
970    if (MO.isDef())
971      MRInfo |= (unsigned)VirtRegMap::isMod;
972    else {
973      // Filter out two-address use operand(s).
974      if (!MO.isImplicit() &&
975          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
976        MRInfo = VirtRegMap::isModRef;
977        continue;
978      }
979      MRInfo |= (unsigned)VirtRegMap::isRef;
980    }
981    FoldOps.push_back(OpIdx);
982  }
983  return false;
984}
985
986
987/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
988/// slot / to reg or any rematerialized load into ith operand of specified
989/// MI. If it is successul, MI is updated with the newly created MI and
990/// returns true.
991bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
992                                         VirtRegMap &vrm, MachineInstr *DefMI,
993                                         unsigned InstrIdx,
994                                         SmallVector<unsigned, 2> &Ops,
995                                         bool isSS, int Slot, unsigned Reg) {
996  // If it is an implicit def instruction, just delete it.
997  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
998    RemoveMachineInstrFromMaps(MI);
999    vrm.RemoveMachineInstrFromMaps(MI);
1000    MI->eraseFromParent();
1001    ++numFolds;
1002    return true;
1003  }
1004
1005  // Filter the list of operand indexes that are to be folded. Abort if
1006  // any operand will prevent folding.
1007  unsigned MRInfo = 0;
1008  SmallVector<unsigned, 2> FoldOps;
1009  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1010    return false;
1011
1012  // The only time it's safe to fold into a two address instruction is when
1013  // it's folding reload and spill from / into a spill stack slot.
1014  if (DefMI && (MRInfo & VirtRegMap::isMod))
1015    return false;
1016
1017  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1018                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1019  if (fmi) {
1020    // Remember this instruction uses the spill slot.
1021    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1022
1023    // Attempt to fold the memory reference into the instruction. If
1024    // we can do this, we don't need to insert spill code.
1025    MachineBasicBlock &MBB = *MI->getParent();
1026    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1027      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1028    vrm.transferSpillPts(MI, fmi);
1029    vrm.transferRestorePts(MI, fmi);
1030    vrm.transferEmergencySpills(MI, fmi);
1031    mi2iMap_.erase(MI);
1032    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1033    mi2iMap_[fmi] = InstrIdx;
1034    MI = MBB.insert(MBB.erase(MI), fmi);
1035    ++numFolds;
1036    return true;
1037  }
1038  return false;
1039}
1040
1041/// canFoldMemoryOperand - Returns true if the specified load / store
1042/// folding is possible.
1043bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1044                                         SmallVector<unsigned, 2> &Ops,
1045                                         bool ReMat) const {
1046  // Filter the list of operand indexes that are to be folded. Abort if
1047  // any operand will prevent folding.
1048  unsigned MRInfo = 0;
1049  SmallVector<unsigned, 2> FoldOps;
1050  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1051    return false;
1052
1053  // It's only legal to remat for a use, not a def.
1054  if (ReMat && (MRInfo & VirtRegMap::isMod))
1055    return false;
1056
1057  return tii_->canFoldMemoryOperand(MI, FoldOps);
1058}
1059
1060bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1061  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1062  for (LiveInterval::Ranges::const_iterator
1063         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1064    std::vector<IdxMBBPair>::const_iterator II =
1065      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1066    if (II == Idx2MBBMap.end())
1067      continue;
1068    if (I->end > II->first)  // crossing a MBB.
1069      return false;
1070    MBBs.insert(II->second);
1071    if (MBBs.size() > 1)
1072      return false;
1073  }
1074  return true;
1075}
1076
1077/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1078/// interval on to-be re-materialized operands of MI) with new register.
1079void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1080                                       MachineInstr *MI, unsigned NewVReg,
1081                                       VirtRegMap &vrm) {
1082  // There is an implicit use. That means one of the other operand is
1083  // being remat'ed and the remat'ed instruction has li.reg as an
1084  // use operand. Make sure we rewrite that as well.
1085  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1086    MachineOperand &MO = MI->getOperand(i);
1087    if (!MO.isRegister())
1088      continue;
1089    unsigned Reg = MO.getReg();
1090    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1091      continue;
1092    if (!vrm.isReMaterialized(Reg))
1093      continue;
1094    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1095    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1096    if (UseMO)
1097      UseMO->setReg(NewVReg);
1098  }
1099}
1100
1101/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1102/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1103bool LiveIntervals::
1104rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1105                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1106                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1107                 unsigned Slot, int LdSlot,
1108                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1109                 VirtRegMap &vrm,
1110                 const TargetRegisterClass* rc,
1111                 SmallVector<int, 4> &ReMatIds,
1112                 const MachineLoopInfo *loopInfo,
1113                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1114                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1115                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1116  MachineBasicBlock *MBB = MI->getParent();
1117  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1118  bool CanFold = false;
1119 RestartInstruction:
1120  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1121    MachineOperand& mop = MI->getOperand(i);
1122    if (!mop.isRegister())
1123      continue;
1124    unsigned Reg = mop.getReg();
1125    unsigned RegI = Reg;
1126    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1127      continue;
1128    if (Reg != li.reg)
1129      continue;
1130
1131    bool TryFold = !DefIsReMat;
1132    bool FoldSS = true; // Default behavior unless it's a remat.
1133    int FoldSlot = Slot;
1134    if (DefIsReMat) {
1135      // If this is the rematerializable definition MI itself and
1136      // all of its uses are rematerialized, simply delete it.
1137      if (MI == ReMatOrigDefMI && CanDelete) {
1138        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1139        DOUT << MI << '\n';
1140        RemoveMachineInstrFromMaps(MI);
1141        vrm.RemoveMachineInstrFromMaps(MI);
1142        MI->eraseFromParent();
1143        break;
1144      }
1145
1146      // If def for this use can't be rematerialized, then try folding.
1147      // If def is rematerializable and it's a load, also try folding.
1148      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1149      if (isLoad) {
1150        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1151        FoldSS = isLoadSS;
1152        FoldSlot = LdSlot;
1153      }
1154    }
1155
1156    // Scan all of the operands of this instruction rewriting operands
1157    // to use NewVReg instead of li.reg as appropriate.  We do this for
1158    // two reasons:
1159    //
1160    //   1. If the instr reads the same spilled vreg multiple times, we
1161    //      want to reuse the NewVReg.
1162    //   2. If the instr is a two-addr instruction, we are required to
1163    //      keep the src/dst regs pinned.
1164    //
1165    // Keep track of whether we replace a use and/or def so that we can
1166    // create the spill interval with the appropriate range.
1167
1168    HasUse = mop.isUse();
1169    HasDef = mop.isDef();
1170    SmallVector<unsigned, 2> Ops;
1171    Ops.push_back(i);
1172    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1173      const MachineOperand &MOj = MI->getOperand(j);
1174      if (!MOj.isRegister())
1175        continue;
1176      unsigned RegJ = MOj.getReg();
1177      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1178        continue;
1179      if (RegJ == RegI) {
1180        Ops.push_back(j);
1181        HasUse |= MOj.isUse();
1182        HasDef |= MOj.isDef();
1183      }
1184    }
1185
1186    if (HasUse && !li.liveAt(getUseIndex(index)))
1187      // Must be defined by an implicit def. It should not be spilled. Note,
1188      // this is for correctness reason. e.g.
1189      // 8   %reg1024<def> = IMPLICIT_DEF
1190      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1191      // The live range [12, 14) are not part of the r1024 live interval since
1192      // it's defined by an implicit def. It will not conflicts with live
1193      // interval of r1025. Now suppose both registers are spilled, you can
1194      // easily see a situation where both registers are reloaded before
1195      // the INSERT_SUBREG and both target registers that would overlap.
1196      HasUse = false;
1197
1198    // Update stack slot spill weight if we are splitting.
1199    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1200      if (!TrySplit)
1201      SSWeight += Weight;
1202
1203    if (!TryFold)
1204      CanFold = false;
1205    else {
1206      // Do not fold load / store here if we are splitting. We'll find an
1207      // optimal point to insert a load / store later.
1208      if (!TrySplit) {
1209        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1210                                 Ops, FoldSS, FoldSlot, Reg)) {
1211          // Folding the load/store can completely change the instruction in
1212          // unpredictable ways, rescan it from the beginning.
1213          HasUse = false;
1214          HasDef = false;
1215          CanFold = false;
1216          if (isRemoved(MI)) {
1217            SSWeight -= Weight;
1218            break;
1219          }
1220          goto RestartInstruction;
1221        }
1222      } else {
1223        // We'll try to fold it later if it's profitable.
1224        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1225      }
1226    }
1227
1228    // Create a new virtual register for the spill interval.
1229    bool CreatedNewVReg = false;
1230    if (NewVReg == 0) {
1231      NewVReg = mri_->createVirtualRegister(rc);
1232      vrm.grow();
1233      CreatedNewVReg = true;
1234    }
1235    mop.setReg(NewVReg);
1236    if (mop.isImplicit())
1237      rewriteImplicitOps(li, MI, NewVReg, vrm);
1238
1239    // Reuse NewVReg for other reads.
1240    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1241      MachineOperand &mopj = MI->getOperand(Ops[j]);
1242      mopj.setReg(NewVReg);
1243      if (mopj.isImplicit())
1244        rewriteImplicitOps(li, MI, NewVReg, vrm);
1245    }
1246
1247    if (CreatedNewVReg) {
1248      if (DefIsReMat) {
1249        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1250        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1251          // Each valnum may have its own remat id.
1252          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1253        } else {
1254          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1255        }
1256        if (!CanDelete || (HasUse && HasDef)) {
1257          // If this is a two-addr instruction then its use operands are
1258          // rematerializable but its def is not. It should be assigned a
1259          // stack slot.
1260          vrm.assignVirt2StackSlot(NewVReg, Slot);
1261        }
1262      } else {
1263        vrm.assignVirt2StackSlot(NewVReg, Slot);
1264      }
1265    } else if (HasUse && HasDef &&
1266               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1267      // If this interval hasn't been assigned a stack slot (because earlier
1268      // def is a deleted remat def), do it now.
1269      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1270      vrm.assignVirt2StackSlot(NewVReg, Slot);
1271    }
1272
1273    // Re-matting an instruction with virtual register use. Add the
1274    // register as an implicit use on the use MI.
1275    if (DefIsReMat && ImpUse)
1276      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1277
1278    // create a new register interval for this spill / remat.
1279    LiveInterval &nI = getOrCreateInterval(NewVReg);
1280    if (CreatedNewVReg) {
1281      NewLIs.push_back(&nI);
1282      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1283      if (TrySplit)
1284        vrm.setIsSplitFromReg(NewVReg, li.reg);
1285    }
1286
1287    if (HasUse) {
1288      if (CreatedNewVReg) {
1289        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1290                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1291        DOUT << " +" << LR;
1292        nI.addRange(LR);
1293      } else {
1294        // Extend the split live interval to this def / use.
1295        unsigned End = getUseIndex(index)+1;
1296        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1297                     nI.getValNumInfo(nI.getNumValNums()-1));
1298        DOUT << " +" << LR;
1299        nI.addRange(LR);
1300      }
1301    }
1302    if (HasDef) {
1303      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1304                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1305      DOUT << " +" << LR;
1306      nI.addRange(LR);
1307    }
1308
1309    DOUT << "\t\t\t\tAdded new interval: ";
1310    nI.print(DOUT, tri_);
1311    DOUT << '\n';
1312  }
1313  return CanFold;
1314}
1315bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1316                                   const VNInfo *VNI,
1317                                   MachineBasicBlock *MBB, unsigned Idx) const {
1318  unsigned End = getMBBEndIdx(MBB);
1319  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1320    unsigned KillIdx = VNI->kills[j];
1321    if (KillIdx > Idx && KillIdx < End)
1322      return true;
1323  }
1324  return false;
1325}
1326
1327/// RewriteInfo - Keep track of machine instrs that will be rewritten
1328/// during spilling.
1329namespace {
1330  struct RewriteInfo {
1331    unsigned Index;
1332    MachineInstr *MI;
1333    bool HasUse;
1334    bool HasDef;
1335    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1336      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1337  };
1338
1339  struct RewriteInfoCompare {
1340    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1341      return LHS.Index < RHS.Index;
1342    }
1343  };
1344}
1345
1346void LiveIntervals::
1347rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1348                    LiveInterval::Ranges::const_iterator &I,
1349                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1350                    unsigned Slot, int LdSlot,
1351                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1352                    VirtRegMap &vrm,
1353                    const TargetRegisterClass* rc,
1354                    SmallVector<int, 4> &ReMatIds,
1355                    const MachineLoopInfo *loopInfo,
1356                    BitVector &SpillMBBs,
1357                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1358                    BitVector &RestoreMBBs,
1359                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1360                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1361                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1362  bool AllCanFold = true;
1363  unsigned NewVReg = 0;
1364  unsigned start = getBaseIndex(I->start);
1365  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1366
1367  // First collect all the def / use in this live range that will be rewritten.
1368  // Make sure they are sorted according to instruction index.
1369  std::vector<RewriteInfo> RewriteMIs;
1370  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1371         re = mri_->reg_end(); ri != re; ) {
1372    MachineInstr *MI = &*ri;
1373    MachineOperand &O = ri.getOperand();
1374    ++ri;
1375    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1376    unsigned index = getInstructionIndex(MI);
1377    if (index < start || index >= end)
1378      continue;
1379    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1380      // Must be defined by an implicit def. It should not be spilled. Note,
1381      // this is for correctness reason. e.g.
1382      // 8   %reg1024<def> = IMPLICIT_DEF
1383      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1384      // The live range [12, 14) are not part of the r1024 live interval since
1385      // it's defined by an implicit def. It will not conflicts with live
1386      // interval of r1025. Now suppose both registers are spilled, you can
1387      // easily see a situation where both registers are reloaded before
1388      // the INSERT_SUBREG and both target registers that would overlap.
1389      continue;
1390    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1391  }
1392  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1393
1394  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1395  // Now rewrite the defs and uses.
1396  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1397    RewriteInfo &rwi = RewriteMIs[i];
1398    ++i;
1399    unsigned index = rwi.Index;
1400    bool MIHasUse = rwi.HasUse;
1401    bool MIHasDef = rwi.HasDef;
1402    MachineInstr *MI = rwi.MI;
1403    // If MI def and/or use the same register multiple times, then there
1404    // are multiple entries.
1405    unsigned NumUses = MIHasUse;
1406    while (i != e && RewriteMIs[i].MI == MI) {
1407      assert(RewriteMIs[i].Index == index);
1408      bool isUse = RewriteMIs[i].HasUse;
1409      if (isUse) ++NumUses;
1410      MIHasUse |= isUse;
1411      MIHasDef |= RewriteMIs[i].HasDef;
1412      ++i;
1413    }
1414    MachineBasicBlock *MBB = MI->getParent();
1415
1416    if (ImpUse && MI != ReMatDefMI) {
1417      // Re-matting an instruction with virtual register use. Update the
1418      // register interval's spill weight to HUGE_VALF to prevent it from
1419      // being spilled.
1420      LiveInterval &ImpLi = getInterval(ImpUse);
1421      ImpLi.weight = HUGE_VALF;
1422    }
1423
1424    unsigned MBBId = MBB->getNumber();
1425    unsigned ThisVReg = 0;
1426    if (TrySplit) {
1427      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1428      if (NVI != MBBVRegsMap.end()) {
1429        ThisVReg = NVI->second;
1430        // One common case:
1431        // x = use
1432        // ...
1433        // ...
1434        // def = ...
1435        //     = use
1436        // It's better to start a new interval to avoid artifically
1437        // extend the new interval.
1438        if (MIHasDef && !MIHasUse) {
1439          MBBVRegsMap.erase(MBB->getNumber());
1440          ThisVReg = 0;
1441        }
1442      }
1443    }
1444
1445    bool IsNew = ThisVReg == 0;
1446    if (IsNew) {
1447      // This ends the previous live interval. If all of its def / use
1448      // can be folded, give it a low spill weight.
1449      if (NewVReg && TrySplit && AllCanFold) {
1450        LiveInterval &nI = getOrCreateInterval(NewVReg);
1451        nI.weight /= 10.0F;
1452      }
1453      AllCanFold = true;
1454    }
1455    NewVReg = ThisVReg;
1456
1457    bool HasDef = false;
1458    bool HasUse = false;
1459    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1460                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1461                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1462                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1463                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1464    if (!HasDef && !HasUse)
1465      continue;
1466
1467    AllCanFold &= CanFold;
1468
1469    // Update weight of spill interval.
1470    LiveInterval &nI = getOrCreateInterval(NewVReg);
1471    if (!TrySplit) {
1472      // The spill weight is now infinity as it cannot be spilled again.
1473      nI.weight = HUGE_VALF;
1474      continue;
1475    }
1476
1477    // Keep track of the last def and first use in each MBB.
1478    if (HasDef) {
1479      if (MI != ReMatOrigDefMI || !CanDelete) {
1480        bool HasKill = false;
1481        if (!HasUse)
1482          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1483        else {
1484          // If this is a two-address code, then this index starts a new VNInfo.
1485          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1486          if (VNI)
1487            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1488        }
1489        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1490          SpillIdxes.find(MBBId);
1491        if (!HasKill) {
1492          if (SII == SpillIdxes.end()) {
1493            std::vector<SRInfo> S;
1494            S.push_back(SRInfo(index, NewVReg, true));
1495            SpillIdxes.insert(std::make_pair(MBBId, S));
1496          } else if (SII->second.back().vreg != NewVReg) {
1497            SII->second.push_back(SRInfo(index, NewVReg, true));
1498          } else if ((int)index > SII->second.back().index) {
1499            // If there is an earlier def and this is a two-address
1500            // instruction, then it's not possible to fold the store (which
1501            // would also fold the load).
1502            SRInfo &Info = SII->second.back();
1503            Info.index = index;
1504            Info.canFold = !HasUse;
1505          }
1506          SpillMBBs.set(MBBId);
1507        } else if (SII != SpillIdxes.end() &&
1508                   SII->second.back().vreg == NewVReg &&
1509                   (int)index > SII->second.back().index) {
1510          // There is an earlier def that's not killed (must be two-address).
1511          // The spill is no longer needed.
1512          SII->second.pop_back();
1513          if (SII->second.empty()) {
1514            SpillIdxes.erase(MBBId);
1515            SpillMBBs.reset(MBBId);
1516          }
1517        }
1518      }
1519    }
1520
1521    if (HasUse) {
1522      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1523        SpillIdxes.find(MBBId);
1524      if (SII != SpillIdxes.end() &&
1525          SII->second.back().vreg == NewVReg &&
1526          (int)index > SII->second.back().index)
1527        // Use(s) following the last def, it's not safe to fold the spill.
1528        SII->second.back().canFold = false;
1529      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1530        RestoreIdxes.find(MBBId);
1531      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1532        // If we are splitting live intervals, only fold if it's the first
1533        // use and there isn't another use later in the MBB.
1534        RII->second.back().canFold = false;
1535      else if (IsNew) {
1536        // Only need a reload if there isn't an earlier def / use.
1537        if (RII == RestoreIdxes.end()) {
1538          std::vector<SRInfo> Infos;
1539          Infos.push_back(SRInfo(index, NewVReg, true));
1540          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1541        } else {
1542          RII->second.push_back(SRInfo(index, NewVReg, true));
1543        }
1544        RestoreMBBs.set(MBBId);
1545      }
1546    }
1547
1548    // Update spill weight.
1549    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1550    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1551  }
1552
1553  if (NewVReg && TrySplit && AllCanFold) {
1554    // If all of its def / use can be folded, give it a low spill weight.
1555    LiveInterval &nI = getOrCreateInterval(NewVReg);
1556    nI.weight /= 10.0F;
1557  }
1558}
1559
1560bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1561                        BitVector &RestoreMBBs,
1562                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1563  if (!RestoreMBBs[Id])
1564    return false;
1565  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1566  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1567    if (Restores[i].index == index &&
1568        Restores[i].vreg == vr &&
1569        Restores[i].canFold)
1570      return true;
1571  return false;
1572}
1573
1574void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1575                        BitVector &RestoreMBBs,
1576                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1577  if (!RestoreMBBs[Id])
1578    return;
1579  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1580  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1581    if (Restores[i].index == index && Restores[i].vreg)
1582      Restores[i].index = -1;
1583}
1584
1585/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1586/// spilled and create empty intervals for their uses.
1587void
1588LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1589                                    const TargetRegisterClass* rc,
1590                                    std::vector<LiveInterval*> &NewLIs) {
1591  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1592         re = mri_->reg_end(); ri != re; ) {
1593    MachineOperand &O = ri.getOperand();
1594    MachineInstr *MI = &*ri;
1595    ++ri;
1596    if (O.isDef()) {
1597      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1598             "Register def was not rewritten?");
1599      RemoveMachineInstrFromMaps(MI);
1600      vrm.RemoveMachineInstrFromMaps(MI);
1601      MI->eraseFromParent();
1602    } else {
1603      // This must be an use of an implicit_def so it's not part of the live
1604      // interval. Create a new empty live interval for it.
1605      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1606      unsigned NewVReg = mri_->createVirtualRegister(rc);
1607      vrm.grow();
1608      vrm.setIsImplicitlyDefined(NewVReg);
1609      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1610      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1611        MachineOperand &MO = MI->getOperand(i);
1612        if (MO.isRegister() && MO.getReg() == li.reg)
1613          MO.setReg(NewVReg);
1614      }
1615    }
1616  }
1617}
1618
1619namespace {
1620  struct LISorter {
1621    bool operator()(LiveInterval* A, LiveInterval* B) {
1622      return A->beginNumber() < B->beginNumber();
1623    }
1624  };
1625}
1626
1627std::vector<LiveInterval*> LiveIntervals::
1628addIntervalsForSpillsFast(const LiveInterval &li,
1629                          const MachineLoopInfo *loopInfo,
1630                          VirtRegMap &vrm, float& SSWeight) {
1631  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1632
1633  std::vector<LiveInterval*> added;
1634
1635  assert(li.weight != HUGE_VALF &&
1636         "attempt to spill already spilled interval!");
1637
1638  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1639  DEBUG(li.dump());
1640  DOUT << '\n';
1641
1642  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1643
1644  SSWeight = 0.0f;
1645
1646  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1647  while (RI != mri_->reg_end()) {
1648    MachineInstr* MI = &*RI;
1649
1650    SmallVector<unsigned, 2> Indices;
1651    bool HasUse = false;
1652    bool HasDef = false;
1653
1654    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1655      MachineOperand& mop = MI->getOperand(i);
1656      if (!mop.isRegister() || mop.getReg() != li.reg) continue;
1657
1658      HasUse |= MI->getOperand(i).isUse();
1659      HasDef |= MI->getOperand(i).isDef();
1660
1661      Indices.push_back(i);
1662    }
1663
1664    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1665                              Indices, true, slot, li.reg)) {
1666      unsigned NewVReg = mri_->createVirtualRegister(rc);
1667      vrm.grow();
1668      vrm.assignVirt2StackSlot(NewVReg, slot);
1669
1670      // create a new register for this spill
1671      LiveInterval &nI = getOrCreateInterval(NewVReg);
1672
1673      // the spill weight is now infinity as it
1674      // cannot be spilled again
1675      nI.weight = HUGE_VALF;
1676
1677      // Rewrite register operands to use the new vreg.
1678      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1679           E = Indices.end(); I != E; ++I) {
1680        MI->getOperand(*I).setReg(NewVReg);
1681
1682        if (MI->getOperand(*I).isUse())
1683          MI->getOperand(*I).setIsKill(true);
1684      }
1685
1686      // Fill in  the new live interval.
1687      unsigned index = getInstructionIndex(MI);
1688      if (HasUse) {
1689        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1690                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1691        DOUT << " +" << LR;
1692        nI.addRange(LR);
1693        vrm.addRestorePoint(NewVReg, MI);
1694      }
1695      if (HasDef) {
1696        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1697                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1698        DOUT << " +" << LR;
1699        nI.addRange(LR);
1700        vrm.addSpillPoint(NewVReg, true, MI);
1701      }
1702
1703      added.push_back(&nI);
1704
1705      DOUT << "\t\t\t\tadded new interval: ";
1706      DEBUG(nI.dump());
1707      DOUT << '\n';
1708
1709      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1710      if (HasUse) {
1711        if (HasDef)
1712          SSWeight += getSpillWeight(true, true, loopDepth);
1713        else
1714          SSWeight += getSpillWeight(false, true, loopDepth);
1715      } else
1716        SSWeight += getSpillWeight(true, false, loopDepth);
1717    }
1718
1719
1720    RI = mri_->reg_begin(li.reg);
1721  }
1722
1723  // Clients expect the new intervals to be returned in sorted order.
1724  std::sort(added.begin(), added.end(), LISorter());
1725
1726  return added;
1727}
1728
1729std::vector<LiveInterval*> LiveIntervals::
1730addIntervalsForSpills(const LiveInterval &li,
1731                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1732                      float &SSWeight) {
1733
1734  if (EnableFastSpilling)
1735    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1736
1737  assert(li.weight != HUGE_VALF &&
1738         "attempt to spill already spilled interval!");
1739
1740  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1741  li.print(DOUT, tri_);
1742  DOUT << '\n';
1743
1744  // Spill slot weight.
1745  SSWeight = 0.0f;
1746
1747  // Each bit specify whether it a spill is required in the MBB.
1748  BitVector SpillMBBs(mf_->getNumBlockIDs());
1749  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1750  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1751  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1752  DenseMap<unsigned,unsigned> MBBVRegsMap;
1753  std::vector<LiveInterval*> NewLIs;
1754  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1755
1756  unsigned NumValNums = li.getNumValNums();
1757  SmallVector<MachineInstr*, 4> ReMatDefs;
1758  ReMatDefs.resize(NumValNums, NULL);
1759  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1760  ReMatOrigDefs.resize(NumValNums, NULL);
1761  SmallVector<int, 4> ReMatIds;
1762  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1763  BitVector ReMatDelete(NumValNums);
1764  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1765
1766  // Spilling a split live interval. It cannot be split any further. Also,
1767  // it's also guaranteed to be a single val# / range interval.
1768  if (vrm.getPreSplitReg(li.reg)) {
1769    vrm.setIsSplitFromReg(li.reg, 0);
1770    // Unset the split kill marker on the last use.
1771    unsigned KillIdx = vrm.getKillPoint(li.reg);
1772    if (KillIdx) {
1773      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1774      assert(KillMI && "Last use disappeared?");
1775      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1776      assert(KillOp != -1 && "Last use disappeared?");
1777      KillMI->getOperand(KillOp).setIsKill(false);
1778    }
1779    vrm.removeKillPoint(li.reg);
1780    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1781    Slot = vrm.getStackSlot(li.reg);
1782    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1783    MachineInstr *ReMatDefMI = DefIsReMat ?
1784      vrm.getReMaterializedMI(li.reg) : NULL;
1785    int LdSlot = 0;
1786    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1787    bool isLoad = isLoadSS ||
1788      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1789    bool IsFirstRange = true;
1790    for (LiveInterval::Ranges::const_iterator
1791           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1792      // If this is a split live interval with multiple ranges, it means there
1793      // are two-address instructions that re-defined the value. Only the
1794      // first def can be rematerialized!
1795      if (IsFirstRange) {
1796        // Note ReMatOrigDefMI has already been deleted.
1797        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1798                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1799                             false, vrm, rc, ReMatIds, loopInfo,
1800                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1801                             MBBVRegsMap, NewLIs, SSWeight);
1802      } else {
1803        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1804                             Slot, 0, false, false, false,
1805                             false, vrm, rc, ReMatIds, loopInfo,
1806                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1807                             MBBVRegsMap, NewLIs, SSWeight);
1808      }
1809      IsFirstRange = false;
1810    }
1811
1812    SSWeight = 0.0f;  // Already accounted for when split.
1813    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1814    return NewLIs;
1815  }
1816
1817  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1818  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1819    TrySplit = false;
1820  if (TrySplit)
1821    ++numSplits;
1822  bool NeedStackSlot = false;
1823  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1824       i != e; ++i) {
1825    const VNInfo *VNI = *i;
1826    unsigned VN = VNI->id;
1827    unsigned DefIdx = VNI->def;
1828    if (DefIdx == ~1U)
1829      continue; // Dead val#.
1830    // Is the def for the val# rematerializable?
1831    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1832      ? 0 : getInstructionFromIndex(DefIdx);
1833    bool dummy;
1834    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1835      // Remember how to remat the def of this val#.
1836      ReMatOrigDefs[VN] = ReMatDefMI;
1837      // Original def may be modified so we have to make a copy here.
1838      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1839      ClonedMIs.push_back(Clone);
1840      ReMatDefs[VN] = Clone;
1841
1842      bool CanDelete = true;
1843      if (VNI->hasPHIKill) {
1844        // A kill is a phi node, not all of its uses can be rematerialized.
1845        // It must not be deleted.
1846        CanDelete = false;
1847        // Need a stack slot if there is any live range where uses cannot be
1848        // rematerialized.
1849        NeedStackSlot = true;
1850      }
1851      if (CanDelete)
1852        ReMatDelete.set(VN);
1853    } else {
1854      // Need a stack slot if there is any live range where uses cannot be
1855      // rematerialized.
1856      NeedStackSlot = true;
1857    }
1858  }
1859
1860  // One stack slot per live interval.
1861  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1862    Slot = vrm.assignVirt2StackSlot(li.reg);
1863
1864  // Create new intervals and rewrite defs and uses.
1865  for (LiveInterval::Ranges::const_iterator
1866         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1867    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1868    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1869    bool DefIsReMat = ReMatDefMI != NULL;
1870    bool CanDelete = ReMatDelete[I->valno->id];
1871    int LdSlot = 0;
1872    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1873    bool isLoad = isLoadSS ||
1874      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1875    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1876                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1877                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1878                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1879                               MBBVRegsMap, NewLIs, SSWeight);
1880  }
1881
1882  // Insert spills / restores if we are splitting.
1883  if (!TrySplit) {
1884    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1885    return NewLIs;
1886  }
1887
1888  SmallPtrSet<LiveInterval*, 4> AddedKill;
1889  SmallVector<unsigned, 2> Ops;
1890  if (NeedStackSlot) {
1891    int Id = SpillMBBs.find_first();
1892    while (Id != -1) {
1893      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1894      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1895      std::vector<SRInfo> &spills = SpillIdxes[Id];
1896      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1897        int index = spills[i].index;
1898        unsigned VReg = spills[i].vreg;
1899        LiveInterval &nI = getOrCreateInterval(VReg);
1900        bool isReMat = vrm.isReMaterialized(VReg);
1901        MachineInstr *MI = getInstructionFromIndex(index);
1902        bool CanFold = false;
1903        bool FoundUse = false;
1904        Ops.clear();
1905        if (spills[i].canFold) {
1906          CanFold = true;
1907          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1908            MachineOperand &MO = MI->getOperand(j);
1909            if (!MO.isRegister() || MO.getReg() != VReg)
1910              continue;
1911
1912            Ops.push_back(j);
1913            if (MO.isDef())
1914              continue;
1915            if (isReMat ||
1916                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1917                                                RestoreMBBs, RestoreIdxes))) {
1918              // MI has two-address uses of the same register. If the use
1919              // isn't the first and only use in the BB, then we can't fold
1920              // it. FIXME: Move this to rewriteInstructionsForSpills.
1921              CanFold = false;
1922              break;
1923            }
1924            FoundUse = true;
1925          }
1926        }
1927        // Fold the store into the def if possible.
1928        bool Folded = false;
1929        if (CanFold && !Ops.empty()) {
1930          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1931            Folded = true;
1932            if (FoundUse > 0) {
1933              // Also folded uses, do not issue a load.
1934              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1935              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1936            }
1937            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1938          }
1939        }
1940
1941        // Otherwise tell the spiller to issue a spill.
1942        if (!Folded) {
1943          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1944          bool isKill = LR->end == getStoreIndex(index);
1945          if (!MI->registerDefIsDead(nI.reg))
1946            // No need to spill a dead def.
1947            vrm.addSpillPoint(VReg, isKill, MI);
1948          if (isKill)
1949            AddedKill.insert(&nI);
1950        }
1951
1952        // Update spill slot weight.
1953        if (!isReMat)
1954          SSWeight += getSpillWeight(true, false, loopDepth);
1955      }
1956      Id = SpillMBBs.find_next(Id);
1957    }
1958  }
1959
1960  int Id = RestoreMBBs.find_first();
1961  while (Id != -1) {
1962    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1963    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1964
1965    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1966    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1967      int index = restores[i].index;
1968      if (index == -1)
1969        continue;
1970      unsigned VReg = restores[i].vreg;
1971      LiveInterval &nI = getOrCreateInterval(VReg);
1972      bool isReMat = vrm.isReMaterialized(VReg);
1973      MachineInstr *MI = getInstructionFromIndex(index);
1974      bool CanFold = false;
1975      Ops.clear();
1976      if (restores[i].canFold) {
1977        CanFold = true;
1978        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1979          MachineOperand &MO = MI->getOperand(j);
1980          if (!MO.isRegister() || MO.getReg() != VReg)
1981            continue;
1982
1983          if (MO.isDef()) {
1984            // If this restore were to be folded, it would have been folded
1985            // already.
1986            CanFold = false;
1987            break;
1988          }
1989          Ops.push_back(j);
1990        }
1991      }
1992
1993      // Fold the load into the use if possible.
1994      bool Folded = false;
1995      if (CanFold && !Ops.empty()) {
1996        if (!isReMat)
1997          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1998        else {
1999          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2000          int LdSlot = 0;
2001          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2002          // If the rematerializable def is a load, also try to fold it.
2003          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2004            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2005                                          Ops, isLoadSS, LdSlot, VReg);
2006          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2007          if (ImpUse) {
2008            // Re-matting an instruction with virtual register use. Add the
2009            // register as an implicit use on the use MI and update the register
2010            // interval's spill weight to HUGE_VALF to prevent it from being
2011            // spilled.
2012            LiveInterval &ImpLi = getInterval(ImpUse);
2013            ImpLi.weight = HUGE_VALF;
2014            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2015          }
2016        }
2017      }
2018      // If folding is not possible / failed, then tell the spiller to issue a
2019      // load / rematerialization for us.
2020      if (Folded)
2021        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2022      else
2023        vrm.addRestorePoint(VReg, MI);
2024
2025      // Update spill slot weight.
2026      if (!isReMat)
2027        SSWeight += getSpillWeight(false, true, loopDepth);
2028    }
2029    Id = RestoreMBBs.find_next(Id);
2030  }
2031
2032  // Finalize intervals: add kills, finalize spill weights, and filter out
2033  // dead intervals.
2034  std::vector<LiveInterval*> RetNewLIs;
2035  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2036    LiveInterval *LI = NewLIs[i];
2037    if (!LI->empty()) {
2038      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2039      if (!AddedKill.count(LI)) {
2040        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2041        unsigned LastUseIdx = getBaseIndex(LR->end);
2042        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2043        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2044        assert(UseIdx != -1);
2045        if (LastUse->getOperand(UseIdx).isImplicit() ||
2046            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2047          LastUse->getOperand(UseIdx).setIsKill();
2048          vrm.addKillPoint(LI->reg, LastUseIdx);
2049        }
2050      }
2051      RetNewLIs.push_back(LI);
2052    }
2053  }
2054
2055  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2056  return RetNewLIs;
2057}
2058
2059/// hasAllocatableSuperReg - Return true if the specified physical register has
2060/// any super register that's allocatable.
2061bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2062  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2063    if (allocatableRegs_[*AS] && hasInterval(*AS))
2064      return true;
2065  return false;
2066}
2067
2068/// getRepresentativeReg - Find the largest super register of the specified
2069/// physical register.
2070unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2071  // Find the largest super-register that is allocatable.
2072  unsigned BestReg = Reg;
2073  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2074    unsigned SuperReg = *AS;
2075    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2076      BestReg = SuperReg;
2077      break;
2078    }
2079  }
2080  return BestReg;
2081}
2082
2083/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2084/// specified interval that conflicts with the specified physical register.
2085unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2086                                                   unsigned PhysReg) const {
2087  unsigned NumConflicts = 0;
2088  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2089  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2090         E = mri_->reg_end(); I != E; ++I) {
2091    MachineOperand &O = I.getOperand();
2092    MachineInstr *MI = O.getParent();
2093    unsigned Index = getInstructionIndex(MI);
2094    if (pli.liveAt(Index))
2095      ++NumConflicts;
2096  }
2097  return NumConflicts;
2098}
2099
2100/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2101/// around all defs and uses of the specified interval.
2102void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2103                                            unsigned PhysReg, VirtRegMap &vrm) {
2104  unsigned SpillReg = getRepresentativeReg(PhysReg);
2105
2106  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2107    // If there are registers which alias PhysReg, but which are not a
2108    // sub-register of the chosen representative super register. Assert
2109    // since we can't handle it yet.
2110    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2111           tri_->isSuperRegister(*AS, SpillReg));
2112
2113  LiveInterval &pli = getInterval(SpillReg);
2114  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2115  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2116         E = mri_->reg_end(); I != E; ++I) {
2117    MachineOperand &O = I.getOperand();
2118    MachineInstr *MI = O.getParent();
2119    if (SeenMIs.count(MI))
2120      continue;
2121    SeenMIs.insert(MI);
2122    unsigned Index = getInstructionIndex(MI);
2123    if (pli.liveAt(Index)) {
2124      vrm.addEmergencySpill(SpillReg, MI);
2125      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2126      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2127        if (!hasInterval(*AS))
2128          continue;
2129        LiveInterval &spli = getInterval(*AS);
2130        if (spli.liveAt(Index))
2131          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2132      }
2133    }
2134  }
2135}
2136
2137LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2138                                                   MachineInstr* startInst) {
2139  LiveInterval& Interval = getOrCreateInterval(reg);
2140  VNInfo* VN = Interval.getNextValue(
2141            getInstructionIndex(startInst) + InstrSlots::DEF,
2142            startInst, getVNInfoAllocator());
2143  VN->hasPHIKill = true;
2144  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2145  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2146               getMBBEndIdx(startInst->getParent()) + 1, VN);
2147  Interval.addRange(LR);
2148
2149  return LR;
2150}
2151