LiveIntervalAnalysis.cpp revision feddecd36ac9e9e7f716f9cb3c2c4042a9d973e1
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52static cl::opt<bool> EnableFastSpilling("fast-spill",
53                                        cl::init(false), cl::Hidden);
54
55STATISTIC(numIntervals, "Number of original intervals");
56STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70  AU.addPreservedID(PHIEliminationID);
71  AU.addRequiredID(PHIEliminationID);
72  AU.addRequiredID(TwoAddressInstructionPassID);
73  MachineFunctionPass::getAnalysisUsage(AU);
74}
75
76void LiveIntervals::releaseMemory() {
77  // Free the live intervals themselves.
78  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79       E = r2iMap_.end(); I != E; ++I)
80    delete I->second;
81
82  MBB2IdxMap.clear();
83  Idx2MBBMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86  r2iMap_.clear();
87  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88  VNInfoAllocator.Reset();
89  while (!ClonedMIs.empty()) {
90    MachineInstr *MI = ClonedMIs.back();
91    ClonedMIs.pop_back();
92    mf_->DeleteMachineInstr(MI);
93  }
94}
95
96void LiveIntervals::computeNumbering() {
97  Index2MiMap OldI2MI = i2miMap_;
98  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99
100  Idx2MBBMap.clear();
101  MBB2IdxMap.clear();
102  mi2iMap_.clear();
103  i2miMap_.clear();
104
105  FunctionSize = 0;
106
107  // Number MachineInstrs and MachineBasicBlocks.
108  // Initialize MBB indexes to a sentinal.
109  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
110
111  unsigned MIIndex = 0;
112  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
113       MBB != E; ++MBB) {
114    unsigned StartIdx = MIIndex;
115
116    // Insert an empty slot at the beginning of each block.
117    MIIndex += InstrSlots::NUM;
118    i2miMap_.push_back(0);
119
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ++I) {
122      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123      assert(inserted && "multiple MachineInstr -> index mappings");
124      i2miMap_.push_back(I);
125      MIIndex += InstrSlots::NUM;
126      FunctionSize++;
127
128      // Insert an empty slot after every instruction.
129      MIIndex += InstrSlots::NUM;
130      i2miMap_.push_back(0);
131    }
132
133    // Set the MBB2IdxMap entry for this MBB.
134    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
136  }
137  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
138
139  if (!OldI2MI.empty())
140    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141      for (LiveInterval::iterator LI = OI->second->begin(),
142           LE = OI->second->end(); LI != LE; ++LI) {
143
144        // Remap the start index of the live range to the corresponding new
145        // number, or our best guess at what it _should_ correspond to if the
146        // original instruction has been erased.  This is either the following
147        // instruction or its predecessor.
148        unsigned index = LI->start / InstrSlots::NUM;
149        unsigned offset = LI->start % InstrSlots::NUM;
150        if (offset == InstrSlots::LOAD) {
151          std::vector<IdxMBBPair>::const_iterator I =
152                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153          // Take the pair containing the index
154          std::vector<IdxMBBPair>::const_iterator J =
155                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
156
157          LI->start = getMBBStartIdx(J->second);
158        } else {
159          LI->start = mi2iMap_[OldI2MI[index]] + offset;
160        }
161
162        // Remap the ending index in the same way that we remapped the start,
163        // except for the final step where we always map to the immediately
164        // following instruction.
165        index = (LI->end - 1) / InstrSlots::NUM;
166        offset  = LI->end % InstrSlots::NUM;
167        if (offset == InstrSlots::LOAD) {
168          // VReg dies at end of block.
169          std::vector<IdxMBBPair>::const_iterator I =
170                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
171          --I;
172
173          LI->end = getMBBEndIdx(I->second) + 1;
174        } else {
175          unsigned idx = index;
176          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
177
178          if (index != OldI2MI.size())
179            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
180          else
181            LI->end = InstrSlots::NUM * i2miMap_.size();
182        }
183      }
184
185      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
187        VNInfo* vni = *VNI;
188
189        // Remap the VNInfo def index, which works the same as the
190        // start indices above. VN's with special sentinel defs
191        // don't need to be remapped.
192        if (vni->def != ~0U && vni->def != ~1U) {
193          unsigned index = vni->def / InstrSlots::NUM;
194          unsigned offset = vni->def % InstrSlots::NUM;
195          if (offset == InstrSlots::LOAD) {
196            std::vector<IdxMBBPair>::const_iterator I =
197                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198            // Take the pair containing the index
199            std::vector<IdxMBBPair>::const_iterator J =
200                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
201
202            vni->def = getMBBStartIdx(J->second);
203          } else {
204            vni->def = mi2iMap_[OldI2MI[index]] + offset;
205          }
206        }
207
208        // Remap the VNInfo kill indices, which works the same as
209        // the end indices above.
210        for (size_t i = 0; i < vni->kills.size(); ++i) {
211          // PHI kills don't need to be remapped.
212          if (!vni->kills[i]) continue;
213
214          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215          unsigned offset = vni->kills[i] % InstrSlots::NUM;
216          if (offset == InstrSlots::STORE) {
217            std::vector<IdxMBBPair>::const_iterator I =
218             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
219            --I;
220
221            vni->kills[i] = getMBBEndIdx(I->second);
222          } else {
223            unsigned idx = index;
224            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
225
226            if (index != OldI2MI.size())
227              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228                              (idx == index ? offset : 0);
229            else
230              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
231          }
232        }
233      }
234    }
235}
236
237/// runOnMachineFunction - Register allocate the whole function
238///
239bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
240  mf_ = &fn;
241  mri_ = &mf_->getRegInfo();
242  tm_ = &fn.getTarget();
243  tri_ = tm_->getRegisterInfo();
244  tii_ = tm_->getInstrInfo();
245  aa_ = &getAnalysis<AliasAnalysis>();
246  lv_ = &getAnalysis<LiveVariables>();
247  allocatableRegs_ = tri_->getAllocatableSet(fn);
248
249  computeNumbering();
250  computeIntervals();
251
252  numIntervals += getNumIntervals();
253
254  DOUT << "********** INTERVALS **********\n";
255  for (iterator I = begin(), E = end(); I != E; ++I) {
256    I->second->print(DOUT, tri_);
257    DOUT << "\n";
258  }
259
260  numIntervalsAfter += getNumIntervals();
261  DEBUG(dump());
262  return true;
263}
264
265/// print - Implement the dump method.
266void LiveIntervals::print(std::ostream &O, const Module* ) const {
267  O << "********** INTERVALS **********\n";
268  for (const_iterator I = begin(), E = end(); I != E; ++I) {
269    I->second->print(O, tri_);
270    O << "\n";
271  }
272
273  O << "********** MACHINEINSTRS **********\n";
274  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275       mbbi != mbbe; ++mbbi) {
276    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277    for (MachineBasicBlock::iterator mii = mbbi->begin(),
278           mie = mbbi->end(); mii != mie; ++mii) {
279      O << getInstructionIndex(mii) << '\t' << *mii;
280    }
281  }
282}
283
284/// conflictsWithPhysRegDef - Returns true if the specified register
285/// is defined during the duration of the specified interval.
286bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287                                            VirtRegMap &vrm, unsigned reg) {
288  for (LiveInterval::Ranges::const_iterator
289         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290    for (unsigned index = getBaseIndex(I->start),
291           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292         index += InstrSlots::NUM) {
293      // skip deleted instructions
294      while (index != end && !getInstructionFromIndex(index))
295        index += InstrSlots::NUM;
296      if (index == end) break;
297
298      MachineInstr *MI = getInstructionFromIndex(index);
299      unsigned SrcReg, DstReg;
300      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301        if (SrcReg == li.reg || DstReg == li.reg)
302          continue;
303      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304        MachineOperand& mop = MI->getOperand(i);
305        if (!mop.isRegister())
306          continue;
307        unsigned PhysReg = mop.getReg();
308        if (PhysReg == 0 || PhysReg == li.reg)
309          continue;
310        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311          if (!vrm.hasPhys(PhysReg))
312            continue;
313          PhysReg = vrm.getPhys(PhysReg);
314        }
315        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
316          return true;
317      }
318    }
319  }
320
321  return false;
322}
323
324void LiveIntervals::printRegName(unsigned reg) const {
325  if (TargetRegisterInfo::isPhysicalRegister(reg))
326    cerr << tri_->getName(reg);
327  else
328    cerr << "%reg" << reg;
329}
330
331void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332                                             MachineBasicBlock::iterator mi,
333                                             unsigned MIIdx, MachineOperand& MO,
334                                             unsigned MOIdx,
335                                             LiveInterval &interval) {
336  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
338
339  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340    DOUT << "is a implicit_def\n";
341    return;
342  }
343
344  // Virtual registers may be defined multiple times (due to phi
345  // elimination and 2-addr elimination).  Much of what we do only has to be
346  // done once for the vreg.  We use an empty interval to detect the first
347  // time we see a vreg.
348  if (interval.empty()) {
349    // Get the Idx of the defining instructions.
350    unsigned defIndex = getDefIndex(MIIdx);
351    // Earlyclobbers move back one.
352    if (MO.isEarlyClobber())
353      defIndex = getUseIndex(MIIdx);
354    VNInfo *ValNo;
355    MachineInstr *CopyMI = NULL;
356    unsigned SrcReg, DstReg;
357    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
358        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
359        tii_->isMoveInstr(*mi, SrcReg, DstReg))
360      CopyMI = mi;
361    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
362
363    assert(ValNo->id == 0 && "First value in interval is not 0?");
364
365    // Loop over all of the blocks that the vreg is defined in.  There are
366    // two cases we have to handle here.  The most common case is a vreg
367    // whose lifetime is contained within a basic block.  In this case there
368    // will be a single kill, in MBB, which comes after the definition.
369    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
370      // FIXME: what about dead vars?
371      unsigned killIdx;
372      if (vi.Kills[0] != mi)
373        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
374      else
375        killIdx = defIndex+1;
376
377      // If the kill happens after the definition, we have an intra-block
378      // live range.
379      if (killIdx > defIndex) {
380        assert(vi.AliveBlocks.none() &&
381               "Shouldn't be alive across any blocks!");
382        LiveRange LR(defIndex, killIdx, ValNo);
383        interval.addRange(LR);
384        DOUT << " +" << LR << "\n";
385        interval.addKill(ValNo, killIdx);
386        return;
387      }
388    }
389
390    // The other case we handle is when a virtual register lives to the end
391    // of the defining block, potentially live across some blocks, then is
392    // live into some number of blocks, but gets killed.  Start by adding a
393    // range that goes from this definition to the end of the defining block.
394    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
395    DOUT << " +" << NewLR;
396    interval.addRange(NewLR);
397
398    // Iterate over all of the blocks that the variable is completely
399    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
400    // live interval.
401    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
402      if (vi.AliveBlocks[i]) {
403        LiveRange LR(getMBBStartIdx(i),
404                     getMBBEndIdx(i)+1,  // MBB ends at -1.
405                     ValNo);
406        interval.addRange(LR);
407        DOUT << " +" << LR;
408      }
409    }
410
411    // Finally, this virtual register is live from the start of any killing
412    // block to the 'use' slot of the killing instruction.
413    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
414      MachineInstr *Kill = vi.Kills[i];
415      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
416      LiveRange LR(getMBBStartIdx(Kill->getParent()),
417                   killIdx, ValNo);
418      interval.addRange(LR);
419      interval.addKill(ValNo, killIdx);
420      DOUT << " +" << LR;
421    }
422
423  } else {
424    // If this is the second time we see a virtual register definition, it
425    // must be due to phi elimination or two addr elimination.  If this is
426    // the result of two address elimination, then the vreg is one of the
427    // def-and-use register operand.
428    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
429      // If this is a two-address definition, then we have already processed
430      // the live range.  The only problem is that we didn't realize there
431      // are actually two values in the live interval.  Because of this we
432      // need to take the LiveRegion that defines this register and split it
433      // into two values.
434      assert(interval.containsOneValue());
435      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
436      unsigned RedefIndex = getDefIndex(MIIdx);
437      // Earlyclobbers move back one.
438      if (MO.isEarlyClobber())
439        RedefIndex = getUseIndex(MIIdx);
440
441      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
442      VNInfo *OldValNo = OldLR->valno;
443
444      // Delete the initial value, which should be short and continuous,
445      // because the 2-addr copy must be in the same MBB as the redef.
446      interval.removeRange(DefIndex, RedefIndex);
447
448      // Two-address vregs should always only be redefined once.  This means
449      // that at this point, there should be exactly one value number in it.
450      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
451
452      // The new value number (#1) is defined by the instruction we claimed
453      // defined value #0.
454      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
455                                            VNInfoAllocator);
456
457      // Value#0 is now defined by the 2-addr instruction.
458      OldValNo->def  = RedefIndex;
459      OldValNo->copy = 0;
460
461      // Add the new live interval which replaces the range for the input copy.
462      LiveRange LR(DefIndex, RedefIndex, ValNo);
463      DOUT << " replace range with " << LR;
464      interval.addRange(LR);
465      interval.addKill(ValNo, RedefIndex);
466
467      // If this redefinition is dead, we need to add a dummy unit live
468      // range covering the def slot.
469      if (MO.isDead())
470        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
471
472      DOUT << " RESULT: ";
473      interval.print(DOUT, tri_);
474
475    } else {
476      // Otherwise, this must be because of phi elimination.  If this is the
477      // first redefinition of the vreg that we have seen, go back and change
478      // the live range in the PHI block to be a different value number.
479      if (interval.containsOneValue()) {
480        assert(vi.Kills.size() == 1 &&
481               "PHI elimination vreg should have one kill, the PHI itself!");
482
483        // Remove the old range that we now know has an incorrect number.
484        VNInfo *VNI = interval.getValNumInfo(0);
485        MachineInstr *Killer = vi.Kills[0];
486        unsigned Start = getMBBStartIdx(Killer->getParent());
487        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
488        DOUT << " Removing [" << Start << "," << End << "] from: ";
489        interval.print(DOUT, tri_); DOUT << "\n";
490        interval.removeRange(Start, End);
491        VNI->hasPHIKill = true;
492        DOUT << " RESULT: "; interval.print(DOUT, tri_);
493
494        // Replace the interval with one of a NEW value number.  Note that this
495        // value number isn't actually defined by an instruction, weird huh? :)
496        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
497        DOUT << " replace range with " << LR;
498        interval.addRange(LR);
499        interval.addKill(LR.valno, End);
500        DOUT << " RESULT: "; interval.print(DOUT, tri_);
501      }
502
503      // In the case of PHI elimination, each variable definition is only
504      // live until the end of the block.  We've already taken care of the
505      // rest of the live range.
506      unsigned defIndex = getDefIndex(MIIdx);
507      // Earlyclobbers move back one.
508      if (MO.isEarlyClobber())
509        defIndex = getUseIndex(MIIdx);
510
511      VNInfo *ValNo;
512      MachineInstr *CopyMI = NULL;
513      unsigned SrcReg, DstReg;
514      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
515          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
516          tii_->isMoveInstr(*mi, SrcReg, DstReg))
517        CopyMI = mi;
518      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
519
520      unsigned killIndex = getMBBEndIdx(mbb) + 1;
521      LiveRange LR(defIndex, killIndex, ValNo);
522      interval.addRange(LR);
523      interval.addKill(ValNo, killIndex);
524      ValNo->hasPHIKill = true;
525      DOUT << " +" << LR;
526    }
527  }
528
529  DOUT << '\n';
530}
531
532void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
533                                              MachineBasicBlock::iterator mi,
534                                              unsigned MIIdx,
535                                              MachineOperand& MO,
536                                              LiveInterval &interval,
537                                              MachineInstr *CopyMI) {
538  // A physical register cannot be live across basic block, so its
539  // lifetime must end somewhere in its defining basic block.
540  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
541
542  unsigned baseIndex = MIIdx;
543  unsigned start = getDefIndex(baseIndex);
544  // Earlyclobbers move back one.
545  if (MO.isEarlyClobber())
546    start = getUseIndex(MIIdx);
547  unsigned end = start;
548
549  // If it is not used after definition, it is considered dead at
550  // the instruction defining it. Hence its interval is:
551  // [defSlot(def), defSlot(def)+1)
552  if (MO.isDead()) {
553    DOUT << " dead";
554    end = start + 1;
555    goto exit;
556  }
557
558  // If it is not dead on definition, it must be killed by a
559  // subsequent instruction. Hence its interval is:
560  // [defSlot(def), useSlot(kill)+1)
561  baseIndex += InstrSlots::NUM;
562  while (++mi != MBB->end()) {
563    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
564           getInstructionFromIndex(baseIndex) == 0)
565      baseIndex += InstrSlots::NUM;
566    if (mi->killsRegister(interval.reg, tri_)) {
567      DOUT << " killed";
568      end = getUseIndex(baseIndex) + 1;
569      goto exit;
570    } else if (mi->modifiesRegister(interval.reg, tri_)) {
571      // Another instruction redefines the register before it is ever read.
572      // Then the register is essentially dead at the instruction that defines
573      // it. Hence its interval is:
574      // [defSlot(def), defSlot(def)+1)
575      DOUT << " dead";
576      end = start + 1;
577      goto exit;
578    }
579
580    baseIndex += InstrSlots::NUM;
581  }
582
583  // The only case we should have a dead physreg here without a killing or
584  // instruction where we know it's dead is if it is live-in to the function
585  // and never used.
586  assert(!CopyMI && "physreg was not killed in defining block!");
587  end = start + 1;
588
589exit:
590  assert(start < end && "did not find end of interval?");
591
592  // Already exists? Extend old live interval.
593  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
594  VNInfo *ValNo = (OldLR != interval.end())
595    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
596  LiveRange LR(start, end, ValNo);
597  interval.addRange(LR);
598  interval.addKill(LR.valno, end);
599  DOUT << " +" << LR << '\n';
600}
601
602void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
603                                      MachineBasicBlock::iterator MI,
604                                      unsigned MIIdx,
605                                      MachineOperand& MO,
606                                      unsigned MOIdx) {
607  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
608    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
609                             getOrCreateInterval(MO.getReg()));
610  else if (allocatableRegs_[MO.getReg()]) {
611    MachineInstr *CopyMI = NULL;
612    unsigned SrcReg, DstReg;
613    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
614        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
615        tii_->isMoveInstr(*MI, SrcReg, DstReg))
616      CopyMI = MI;
617    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
618                              getOrCreateInterval(MO.getReg()), CopyMI);
619    // Def of a register also defines its sub-registers.
620    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
621      // If MI also modifies the sub-register explicitly, avoid processing it
622      // more than once. Do not pass in TRI here so it checks for exact match.
623      if (!MI->modifiesRegister(*AS))
624        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
625                                  getOrCreateInterval(*AS), 0);
626  }
627}
628
629void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
630                                         unsigned MIIdx,
631                                         LiveInterval &interval, bool isAlias) {
632  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
633
634  // Look for kills, if it reaches a def before it's killed, then it shouldn't
635  // be considered a livein.
636  MachineBasicBlock::iterator mi = MBB->begin();
637  unsigned baseIndex = MIIdx;
638  unsigned start = baseIndex;
639  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
640         getInstructionFromIndex(baseIndex) == 0)
641    baseIndex += InstrSlots::NUM;
642  unsigned end = baseIndex;
643
644  while (mi != MBB->end()) {
645    if (mi->killsRegister(interval.reg, tri_)) {
646      DOUT << " killed";
647      end = getUseIndex(baseIndex) + 1;
648      goto exit;
649    } else if (mi->modifiesRegister(interval.reg, tri_)) {
650      // Another instruction redefines the register before it is ever read.
651      // Then the register is essentially dead at the instruction that defines
652      // it. Hence its interval is:
653      // [defSlot(def), defSlot(def)+1)
654      DOUT << " dead";
655      end = getDefIndex(start) + 1;
656      goto exit;
657    }
658
659    baseIndex += InstrSlots::NUM;
660    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
661           getInstructionFromIndex(baseIndex) == 0)
662      baseIndex += InstrSlots::NUM;
663    ++mi;
664  }
665
666exit:
667  // Live-in register might not be used at all.
668  if (end == MIIdx) {
669    if (isAlias) {
670      DOUT << " dead";
671      end = getDefIndex(MIIdx) + 1;
672    } else {
673      DOUT << " live through";
674      end = baseIndex;
675    }
676  }
677
678  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
679  interval.addRange(LR);
680  interval.addKill(LR.valno, end);
681  DOUT << " +" << LR << '\n';
682}
683
684/// computeIntervals - computes the live intervals for virtual
685/// registers. for some ordering of the machine instructions [1,N] a
686/// live interval is an interval [i, j) where 1 <= i <= j < N for
687/// which a variable is live
688void LiveIntervals::computeIntervals() {
689
690  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
691       << "********** Function: "
692       << ((Value*)mf_->getFunction())->getName() << '\n';
693
694  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
695       MBBI != E; ++MBBI) {
696    MachineBasicBlock *MBB = MBBI;
697    // Track the index of the current machine instr.
698    unsigned MIIndex = getMBBStartIdx(MBB);
699    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
700
701    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702
703    // Create intervals for live-ins to this BB first.
704    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
705           LE = MBB->livein_end(); LI != LE; ++LI) {
706      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
707      // Multiple live-ins can alias the same register.
708      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
709        if (!hasInterval(*AS))
710          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
711                               true);
712    }
713
714    // Skip over empty initial indices.
715    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
716           getInstructionFromIndex(MIIndex) == 0)
717      MIIndex += InstrSlots::NUM;
718
719    for (; MI != miEnd; ++MI) {
720      DOUT << MIIndex << "\t" << *MI;
721
722      // Handle defs.
723      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
724        MachineOperand &MO = MI->getOperand(i);
725        // handle register defs - build intervals
726        if (MO.isRegister() && MO.getReg() && MO.isDef()) {
727          handleRegisterDef(MBB, MI, MIIndex, MO, i);
728        }
729      }
730
731      MIIndex += InstrSlots::NUM;
732
733      // Skip over empty indices.
734      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
735             getInstructionFromIndex(MIIndex) == 0)
736        MIIndex += InstrSlots::NUM;
737    }
738  }
739}
740
741bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
742                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
743  std::vector<IdxMBBPair>::const_iterator I =
744    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
745
746  bool ResVal = false;
747  while (I != Idx2MBBMap.end()) {
748    if (LR.end <= I->first)
749      break;
750    MBBs.push_back(I->second);
751    ResVal = true;
752    ++I;
753  }
754  return ResVal;
755}
756
757LiveInterval* LiveIntervals::createInterval(unsigned reg) {
758  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
759                       HUGE_VALF : 0.0F;
760  return new LiveInterval(reg, Weight);
761}
762
763/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
764/// copy field and returns the source register that defines it.
765unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
766  if (!VNI->copy)
767    return 0;
768
769  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
770    return VNI->copy->getOperand(1).getReg();
771  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
772    return VNI->copy->getOperand(2).getReg();
773  unsigned SrcReg, DstReg;
774  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
775    return SrcReg;
776  assert(0 && "Unrecognized copy instruction!");
777  return 0;
778}
779
780//===----------------------------------------------------------------------===//
781// Register allocator hooks.
782//
783
784/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
785/// allow one) virtual register operand, then its uses are implicitly using
786/// the register. Returns the virtual register.
787unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
788                                            MachineInstr *MI) const {
789  unsigned RegOp = 0;
790  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
791    MachineOperand &MO = MI->getOperand(i);
792    if (!MO.isRegister() || !MO.isUse())
793      continue;
794    unsigned Reg = MO.getReg();
795    if (Reg == 0 || Reg == li.reg)
796      continue;
797    // FIXME: For now, only remat MI with at most one register operand.
798    assert(!RegOp &&
799           "Can't rematerialize instruction with multiple register operand!");
800    RegOp = MO.getReg();
801#ifndef NDEBUG
802    break;
803#endif
804  }
805  return RegOp;
806}
807
808/// isValNoAvailableAt - Return true if the val# of the specified interval
809/// which reaches the given instruction also reaches the specified use index.
810bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
811                                       unsigned UseIdx) const {
812  unsigned Index = getInstructionIndex(MI);
813  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
814  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
815  return UI != li.end() && UI->valno == ValNo;
816}
817
818/// isReMaterializable - Returns true if the definition MI of the specified
819/// val# of the specified interval is re-materializable.
820bool LiveIntervals::isReMaterializable(const LiveInterval &li,
821                                       const VNInfo *ValNo, MachineInstr *MI,
822                                       SmallVectorImpl<LiveInterval*> &SpillIs,
823                                       bool &isLoad) {
824  if (DisableReMat)
825    return false;
826
827  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
828    return true;
829
830  int FrameIdx = 0;
831  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
832      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
833    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
834    // this but remember this is not safe to fold into a two-address
835    // instruction.
836    // This is a load from fixed stack slot. It can be rematerialized.
837    return true;
838
839  // If the target-specific rules don't identify an instruction as
840  // being trivially rematerializable, use some target-independent
841  // rules.
842  if (!MI->getDesc().isRematerializable() ||
843      !tii_->isTriviallyReMaterializable(MI)) {
844    if (!EnableAggressiveRemat)
845      return false;
846
847    // If the instruction accesses memory but the memoperands have been lost,
848    // we can't analyze it.
849    const TargetInstrDesc &TID = MI->getDesc();
850    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
851      return false;
852
853    // Avoid instructions obviously unsafe for remat.
854    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
855      return false;
856
857    // If the instruction accesses memory and the memory could be non-constant,
858    // assume the instruction is not rematerializable.
859    for (std::list<MachineMemOperand>::const_iterator
860           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
861      const MachineMemOperand &MMO = *I;
862      if (MMO.isVolatile() || MMO.isStore())
863        return false;
864      const Value *V = MMO.getValue();
865      if (!V)
866        return false;
867      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
868        if (!PSV->isConstant(mf_->getFrameInfo()))
869          return false;
870      } else if (!aa_->pointsToConstantMemory(V))
871        return false;
872    }
873
874    // If any of the registers accessed are non-constant, conservatively assume
875    // the instruction is not rematerializable.
876    unsigned ImpUse = 0;
877    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
878      const MachineOperand &MO = MI->getOperand(i);
879      if (MO.isRegister()) {
880        unsigned Reg = MO.getReg();
881        if (Reg == 0)
882          continue;
883        if (TargetRegisterInfo::isPhysicalRegister(Reg))
884          return false;
885
886        // Only allow one def, and that in the first operand.
887        if (MO.isDef() != (i == 0))
888          return false;
889
890        // Only allow constant-valued registers.
891        bool IsLiveIn = mri_->isLiveIn(Reg);
892        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
893                                          E = mri_->def_end();
894
895        // For the def, it should be the only def.
896        if (MO.isDef() && (next(I) != E || IsLiveIn))
897          return false;
898
899        if (MO.isUse()) {
900          // Only allow one use other register use, as that's all the
901          // remat mechanisms support currently.
902          if (Reg != li.reg) {
903            if (ImpUse == 0)
904              ImpUse = Reg;
905            else if (Reg != ImpUse)
906              return false;
907          }
908          // For uses, there should be only one associate def.
909          if (I != E && (next(I) != E || IsLiveIn))
910            return false;
911        }
912      }
913    }
914  }
915
916  unsigned ImpUse = getReMatImplicitUse(li, MI);
917  if (ImpUse) {
918    const LiveInterval &ImpLi = getInterval(ImpUse);
919    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
920           re = mri_->use_end(); ri != re; ++ri) {
921      MachineInstr *UseMI = &*ri;
922      unsigned UseIdx = getInstructionIndex(UseMI);
923      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
924        continue;
925      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
926        return false;
927    }
928
929    // If a register operand of the re-materialized instruction is going to
930    // be spilled next, then it's not legal to re-materialize this instruction.
931    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
932      if (ImpUse == SpillIs[i]->reg)
933        return false;
934  }
935  return true;
936}
937
938/// isReMaterializable - Returns true if every definition of MI of every
939/// val# of the specified interval is re-materializable.
940bool LiveIntervals::isReMaterializable(const LiveInterval &li,
941                                       SmallVectorImpl<LiveInterval*> &SpillIs,
942                                       bool &isLoad) {
943  isLoad = false;
944  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
945       i != e; ++i) {
946    const VNInfo *VNI = *i;
947    unsigned DefIdx = VNI->def;
948    if (DefIdx == ~1U)
949      continue; // Dead val#.
950    // Is the def for the val# rematerializable?
951    if (DefIdx == ~0u)
952      return false;
953    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
954    bool DefIsLoad = false;
955    if (!ReMatDefMI ||
956        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
957      return false;
958    isLoad |= DefIsLoad;
959  }
960  return true;
961}
962
963/// FilterFoldedOps - Filter out two-address use operands. Return
964/// true if it finds any issue with the operands that ought to prevent
965/// folding.
966static bool FilterFoldedOps(MachineInstr *MI,
967                            SmallVector<unsigned, 2> &Ops,
968                            unsigned &MRInfo,
969                            SmallVector<unsigned, 2> &FoldOps) {
970  const TargetInstrDesc &TID = MI->getDesc();
971
972  MRInfo = 0;
973  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
974    unsigned OpIdx = Ops[i];
975    MachineOperand &MO = MI->getOperand(OpIdx);
976    // FIXME: fold subreg use.
977    if (MO.getSubReg())
978      return true;
979    if (MO.isDef())
980      MRInfo |= (unsigned)VirtRegMap::isMod;
981    else {
982      // Filter out two-address use operand(s).
983      if (!MO.isImplicit() &&
984          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
985        MRInfo = VirtRegMap::isModRef;
986        continue;
987      }
988      MRInfo |= (unsigned)VirtRegMap::isRef;
989    }
990    FoldOps.push_back(OpIdx);
991  }
992  return false;
993}
994
995
996/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
997/// slot / to reg or any rematerialized load into ith operand of specified
998/// MI. If it is successul, MI is updated with the newly created MI and
999/// returns true.
1000bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1001                                         VirtRegMap &vrm, MachineInstr *DefMI,
1002                                         unsigned InstrIdx,
1003                                         SmallVector<unsigned, 2> &Ops,
1004                                         bool isSS, int Slot, unsigned Reg) {
1005  // If it is an implicit def instruction, just delete it.
1006  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1007    RemoveMachineInstrFromMaps(MI);
1008    vrm.RemoveMachineInstrFromMaps(MI);
1009    MI->eraseFromParent();
1010    ++numFolds;
1011    return true;
1012  }
1013
1014  // Filter the list of operand indexes that are to be folded. Abort if
1015  // any operand will prevent folding.
1016  unsigned MRInfo = 0;
1017  SmallVector<unsigned, 2> FoldOps;
1018  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1019    return false;
1020
1021  // The only time it's safe to fold into a two address instruction is when
1022  // it's folding reload and spill from / into a spill stack slot.
1023  if (DefMI && (MRInfo & VirtRegMap::isMod))
1024    return false;
1025
1026  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1027                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1028  if (fmi) {
1029    // Remember this instruction uses the spill slot.
1030    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1031
1032    // Attempt to fold the memory reference into the instruction. If
1033    // we can do this, we don't need to insert spill code.
1034    MachineBasicBlock &MBB = *MI->getParent();
1035    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1036      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1037    vrm.transferSpillPts(MI, fmi);
1038    vrm.transferRestorePts(MI, fmi);
1039    vrm.transferEmergencySpills(MI, fmi);
1040    mi2iMap_.erase(MI);
1041    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1042    mi2iMap_[fmi] = InstrIdx;
1043    MI = MBB.insert(MBB.erase(MI), fmi);
1044    ++numFolds;
1045    return true;
1046  }
1047  return false;
1048}
1049
1050/// canFoldMemoryOperand - Returns true if the specified load / store
1051/// folding is possible.
1052bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1053                                         SmallVector<unsigned, 2> &Ops,
1054                                         bool ReMat) const {
1055  // Filter the list of operand indexes that are to be folded. Abort if
1056  // any operand will prevent folding.
1057  unsigned MRInfo = 0;
1058  SmallVector<unsigned, 2> FoldOps;
1059  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1060    return false;
1061
1062  // It's only legal to remat for a use, not a def.
1063  if (ReMat && (MRInfo & VirtRegMap::isMod))
1064    return false;
1065
1066  return tii_->canFoldMemoryOperand(MI, FoldOps);
1067}
1068
1069bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1070  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1071  for (LiveInterval::Ranges::const_iterator
1072         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1073    std::vector<IdxMBBPair>::const_iterator II =
1074      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1075    if (II == Idx2MBBMap.end())
1076      continue;
1077    if (I->end > II->first)  // crossing a MBB.
1078      return false;
1079    MBBs.insert(II->second);
1080    if (MBBs.size() > 1)
1081      return false;
1082  }
1083  return true;
1084}
1085
1086/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1087/// interval on to-be re-materialized operands of MI) with new register.
1088void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1089                                       MachineInstr *MI, unsigned NewVReg,
1090                                       VirtRegMap &vrm) {
1091  // There is an implicit use. That means one of the other operand is
1092  // being remat'ed and the remat'ed instruction has li.reg as an
1093  // use operand. Make sure we rewrite that as well.
1094  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1095    MachineOperand &MO = MI->getOperand(i);
1096    if (!MO.isRegister())
1097      continue;
1098    unsigned Reg = MO.getReg();
1099    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1100      continue;
1101    if (!vrm.isReMaterialized(Reg))
1102      continue;
1103    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1104    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1105    if (UseMO)
1106      UseMO->setReg(NewVReg);
1107  }
1108}
1109
1110/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1111/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1112bool LiveIntervals::
1113rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1114                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1115                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1116                 unsigned Slot, int LdSlot,
1117                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1118                 VirtRegMap &vrm,
1119                 const TargetRegisterClass* rc,
1120                 SmallVector<int, 4> &ReMatIds,
1121                 const MachineLoopInfo *loopInfo,
1122                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1123                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1124                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1125  MachineBasicBlock *MBB = MI->getParent();
1126  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1127  bool CanFold = false;
1128 RestartInstruction:
1129  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1130    MachineOperand& mop = MI->getOperand(i);
1131    if (!mop.isRegister())
1132      continue;
1133    unsigned Reg = mop.getReg();
1134    unsigned RegI = Reg;
1135    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1136      continue;
1137    if (Reg != li.reg)
1138      continue;
1139
1140    bool TryFold = !DefIsReMat;
1141    bool FoldSS = true; // Default behavior unless it's a remat.
1142    int FoldSlot = Slot;
1143    if (DefIsReMat) {
1144      // If this is the rematerializable definition MI itself and
1145      // all of its uses are rematerialized, simply delete it.
1146      if (MI == ReMatOrigDefMI && CanDelete) {
1147        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1148        DOUT << MI << '\n';
1149        RemoveMachineInstrFromMaps(MI);
1150        vrm.RemoveMachineInstrFromMaps(MI);
1151        MI->eraseFromParent();
1152        break;
1153      }
1154
1155      // If def for this use can't be rematerialized, then try folding.
1156      // If def is rematerializable and it's a load, also try folding.
1157      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1158      if (isLoad) {
1159        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1160        FoldSS = isLoadSS;
1161        FoldSlot = LdSlot;
1162      }
1163    }
1164
1165    // Scan all of the operands of this instruction rewriting operands
1166    // to use NewVReg instead of li.reg as appropriate.  We do this for
1167    // two reasons:
1168    //
1169    //   1. If the instr reads the same spilled vreg multiple times, we
1170    //      want to reuse the NewVReg.
1171    //   2. If the instr is a two-addr instruction, we are required to
1172    //      keep the src/dst regs pinned.
1173    //
1174    // Keep track of whether we replace a use and/or def so that we can
1175    // create the spill interval with the appropriate range.
1176
1177    HasUse = mop.isUse();
1178    HasDef = mop.isDef();
1179    SmallVector<unsigned, 2> Ops;
1180    Ops.push_back(i);
1181    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1182      const MachineOperand &MOj = MI->getOperand(j);
1183      if (!MOj.isRegister())
1184        continue;
1185      unsigned RegJ = MOj.getReg();
1186      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1187        continue;
1188      if (RegJ == RegI) {
1189        Ops.push_back(j);
1190        HasUse |= MOj.isUse();
1191        HasDef |= MOj.isDef();
1192      }
1193    }
1194
1195    if (HasUse && !li.liveAt(getUseIndex(index)))
1196      // Must be defined by an implicit def. It should not be spilled. Note,
1197      // this is for correctness reason. e.g.
1198      // 8   %reg1024<def> = IMPLICIT_DEF
1199      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1200      // The live range [12, 14) are not part of the r1024 live interval since
1201      // it's defined by an implicit def. It will not conflicts with live
1202      // interval of r1025. Now suppose both registers are spilled, you can
1203      // easily see a situation where both registers are reloaded before
1204      // the INSERT_SUBREG and both target registers that would overlap.
1205      HasUse = false;
1206
1207    // Update stack slot spill weight if we are splitting.
1208    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1209      if (!TrySplit)
1210      SSWeight += Weight;
1211
1212    if (!TryFold)
1213      CanFold = false;
1214    else {
1215      // Do not fold load / store here if we are splitting. We'll find an
1216      // optimal point to insert a load / store later.
1217      if (!TrySplit) {
1218        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1219                                 Ops, FoldSS, FoldSlot, Reg)) {
1220          // Folding the load/store can completely change the instruction in
1221          // unpredictable ways, rescan it from the beginning.
1222          HasUse = false;
1223          HasDef = false;
1224          CanFold = false;
1225          if (isRemoved(MI)) {
1226            SSWeight -= Weight;
1227            break;
1228          }
1229          goto RestartInstruction;
1230        }
1231      } else {
1232        // We'll try to fold it later if it's profitable.
1233        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1234      }
1235    }
1236
1237    // Create a new virtual register for the spill interval.
1238    bool CreatedNewVReg = false;
1239    if (NewVReg == 0) {
1240      NewVReg = mri_->createVirtualRegister(rc);
1241      vrm.grow();
1242      CreatedNewVReg = true;
1243    }
1244    mop.setReg(NewVReg);
1245    if (mop.isImplicit())
1246      rewriteImplicitOps(li, MI, NewVReg, vrm);
1247
1248    // Reuse NewVReg for other reads.
1249    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1250      MachineOperand &mopj = MI->getOperand(Ops[j]);
1251      mopj.setReg(NewVReg);
1252      if (mopj.isImplicit())
1253        rewriteImplicitOps(li, MI, NewVReg, vrm);
1254    }
1255
1256    if (CreatedNewVReg) {
1257      if (DefIsReMat) {
1258        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1259        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1260          // Each valnum may have its own remat id.
1261          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1262        } else {
1263          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1264        }
1265        if (!CanDelete || (HasUse && HasDef)) {
1266          // If this is a two-addr instruction then its use operands are
1267          // rematerializable but its def is not. It should be assigned a
1268          // stack slot.
1269          vrm.assignVirt2StackSlot(NewVReg, Slot);
1270        }
1271      } else {
1272        vrm.assignVirt2StackSlot(NewVReg, Slot);
1273      }
1274    } else if (HasUse && HasDef &&
1275               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1276      // If this interval hasn't been assigned a stack slot (because earlier
1277      // def is a deleted remat def), do it now.
1278      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1279      vrm.assignVirt2StackSlot(NewVReg, Slot);
1280    }
1281
1282    // Re-matting an instruction with virtual register use. Add the
1283    // register as an implicit use on the use MI.
1284    if (DefIsReMat && ImpUse)
1285      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1286
1287    // create a new register interval for this spill / remat.
1288    LiveInterval &nI = getOrCreateInterval(NewVReg);
1289    if (CreatedNewVReg) {
1290      NewLIs.push_back(&nI);
1291      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1292      if (TrySplit)
1293        vrm.setIsSplitFromReg(NewVReg, li.reg);
1294    }
1295
1296    if (HasUse) {
1297      if (CreatedNewVReg) {
1298        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1299                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1300        DOUT << " +" << LR;
1301        nI.addRange(LR);
1302      } else {
1303        // Extend the split live interval to this def / use.
1304        unsigned End = getUseIndex(index)+1;
1305        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1306                     nI.getValNumInfo(nI.getNumValNums()-1));
1307        DOUT << " +" << LR;
1308        nI.addRange(LR);
1309      }
1310    }
1311    if (HasDef) {
1312      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1313                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1314      DOUT << " +" << LR;
1315      nI.addRange(LR);
1316    }
1317
1318    DOUT << "\t\t\t\tAdded new interval: ";
1319    nI.print(DOUT, tri_);
1320    DOUT << '\n';
1321  }
1322  return CanFold;
1323}
1324bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1325                                   const VNInfo *VNI,
1326                                   MachineBasicBlock *MBB, unsigned Idx) const {
1327  unsigned End = getMBBEndIdx(MBB);
1328  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1329    unsigned KillIdx = VNI->kills[j];
1330    if (KillIdx > Idx && KillIdx < End)
1331      return true;
1332  }
1333  return false;
1334}
1335
1336/// RewriteInfo - Keep track of machine instrs that will be rewritten
1337/// during spilling.
1338namespace {
1339  struct RewriteInfo {
1340    unsigned Index;
1341    MachineInstr *MI;
1342    bool HasUse;
1343    bool HasDef;
1344    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1345      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1346  };
1347
1348  struct RewriteInfoCompare {
1349    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1350      return LHS.Index < RHS.Index;
1351    }
1352  };
1353}
1354
1355void LiveIntervals::
1356rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1357                    LiveInterval::Ranges::const_iterator &I,
1358                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1359                    unsigned Slot, int LdSlot,
1360                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1361                    VirtRegMap &vrm,
1362                    const TargetRegisterClass* rc,
1363                    SmallVector<int, 4> &ReMatIds,
1364                    const MachineLoopInfo *loopInfo,
1365                    BitVector &SpillMBBs,
1366                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1367                    BitVector &RestoreMBBs,
1368                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1369                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1370                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1371  bool AllCanFold = true;
1372  unsigned NewVReg = 0;
1373  unsigned start = getBaseIndex(I->start);
1374  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1375
1376  // First collect all the def / use in this live range that will be rewritten.
1377  // Make sure they are sorted according to instruction index.
1378  std::vector<RewriteInfo> RewriteMIs;
1379  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1380         re = mri_->reg_end(); ri != re; ) {
1381    MachineInstr *MI = &*ri;
1382    MachineOperand &O = ri.getOperand();
1383    ++ri;
1384    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1385    unsigned index = getInstructionIndex(MI);
1386    if (index < start || index >= end)
1387      continue;
1388    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1389      // Must be defined by an implicit def. It should not be spilled. Note,
1390      // this is for correctness reason. e.g.
1391      // 8   %reg1024<def> = IMPLICIT_DEF
1392      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1393      // The live range [12, 14) are not part of the r1024 live interval since
1394      // it's defined by an implicit def. It will not conflicts with live
1395      // interval of r1025. Now suppose both registers are spilled, you can
1396      // easily see a situation where both registers are reloaded before
1397      // the INSERT_SUBREG and both target registers that would overlap.
1398      continue;
1399    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1400  }
1401  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1402
1403  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1404  // Now rewrite the defs and uses.
1405  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1406    RewriteInfo &rwi = RewriteMIs[i];
1407    ++i;
1408    unsigned index = rwi.Index;
1409    bool MIHasUse = rwi.HasUse;
1410    bool MIHasDef = rwi.HasDef;
1411    MachineInstr *MI = rwi.MI;
1412    // If MI def and/or use the same register multiple times, then there
1413    // are multiple entries.
1414    unsigned NumUses = MIHasUse;
1415    while (i != e && RewriteMIs[i].MI == MI) {
1416      assert(RewriteMIs[i].Index == index);
1417      bool isUse = RewriteMIs[i].HasUse;
1418      if (isUse) ++NumUses;
1419      MIHasUse |= isUse;
1420      MIHasDef |= RewriteMIs[i].HasDef;
1421      ++i;
1422    }
1423    MachineBasicBlock *MBB = MI->getParent();
1424
1425    if (ImpUse && MI != ReMatDefMI) {
1426      // Re-matting an instruction with virtual register use. Update the
1427      // register interval's spill weight to HUGE_VALF to prevent it from
1428      // being spilled.
1429      LiveInterval &ImpLi = getInterval(ImpUse);
1430      ImpLi.weight = HUGE_VALF;
1431    }
1432
1433    unsigned MBBId = MBB->getNumber();
1434    unsigned ThisVReg = 0;
1435    if (TrySplit) {
1436      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1437      if (NVI != MBBVRegsMap.end()) {
1438        ThisVReg = NVI->second;
1439        // One common case:
1440        // x = use
1441        // ...
1442        // ...
1443        // def = ...
1444        //     = use
1445        // It's better to start a new interval to avoid artifically
1446        // extend the new interval.
1447        if (MIHasDef && !MIHasUse) {
1448          MBBVRegsMap.erase(MBB->getNumber());
1449          ThisVReg = 0;
1450        }
1451      }
1452    }
1453
1454    bool IsNew = ThisVReg == 0;
1455    if (IsNew) {
1456      // This ends the previous live interval. If all of its def / use
1457      // can be folded, give it a low spill weight.
1458      if (NewVReg && TrySplit && AllCanFold) {
1459        LiveInterval &nI = getOrCreateInterval(NewVReg);
1460        nI.weight /= 10.0F;
1461      }
1462      AllCanFold = true;
1463    }
1464    NewVReg = ThisVReg;
1465
1466    bool HasDef = false;
1467    bool HasUse = false;
1468    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1469                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1470                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1471                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1472                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1473    if (!HasDef && !HasUse)
1474      continue;
1475
1476    AllCanFold &= CanFold;
1477
1478    // Update weight of spill interval.
1479    LiveInterval &nI = getOrCreateInterval(NewVReg);
1480    if (!TrySplit) {
1481      // The spill weight is now infinity as it cannot be spilled again.
1482      nI.weight = HUGE_VALF;
1483      continue;
1484    }
1485
1486    // Keep track of the last def and first use in each MBB.
1487    if (HasDef) {
1488      if (MI != ReMatOrigDefMI || !CanDelete) {
1489        bool HasKill = false;
1490        if (!HasUse)
1491          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1492        else {
1493          // If this is a two-address code, then this index starts a new VNInfo.
1494          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1495          if (VNI)
1496            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1497        }
1498        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1499          SpillIdxes.find(MBBId);
1500        if (!HasKill) {
1501          if (SII == SpillIdxes.end()) {
1502            std::vector<SRInfo> S;
1503            S.push_back(SRInfo(index, NewVReg, true));
1504            SpillIdxes.insert(std::make_pair(MBBId, S));
1505          } else if (SII->second.back().vreg != NewVReg) {
1506            SII->second.push_back(SRInfo(index, NewVReg, true));
1507          } else if ((int)index > SII->second.back().index) {
1508            // If there is an earlier def and this is a two-address
1509            // instruction, then it's not possible to fold the store (which
1510            // would also fold the load).
1511            SRInfo &Info = SII->second.back();
1512            Info.index = index;
1513            Info.canFold = !HasUse;
1514          }
1515          SpillMBBs.set(MBBId);
1516        } else if (SII != SpillIdxes.end() &&
1517                   SII->second.back().vreg == NewVReg &&
1518                   (int)index > SII->second.back().index) {
1519          // There is an earlier def that's not killed (must be two-address).
1520          // The spill is no longer needed.
1521          SII->second.pop_back();
1522          if (SII->second.empty()) {
1523            SpillIdxes.erase(MBBId);
1524            SpillMBBs.reset(MBBId);
1525          }
1526        }
1527      }
1528    }
1529
1530    if (HasUse) {
1531      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1532        SpillIdxes.find(MBBId);
1533      if (SII != SpillIdxes.end() &&
1534          SII->second.back().vreg == NewVReg &&
1535          (int)index > SII->second.back().index)
1536        // Use(s) following the last def, it's not safe to fold the spill.
1537        SII->second.back().canFold = false;
1538      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1539        RestoreIdxes.find(MBBId);
1540      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1541        // If we are splitting live intervals, only fold if it's the first
1542        // use and there isn't another use later in the MBB.
1543        RII->second.back().canFold = false;
1544      else if (IsNew) {
1545        // Only need a reload if there isn't an earlier def / use.
1546        if (RII == RestoreIdxes.end()) {
1547          std::vector<SRInfo> Infos;
1548          Infos.push_back(SRInfo(index, NewVReg, true));
1549          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1550        } else {
1551          RII->second.push_back(SRInfo(index, NewVReg, true));
1552        }
1553        RestoreMBBs.set(MBBId);
1554      }
1555    }
1556
1557    // Update spill weight.
1558    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1559    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1560  }
1561
1562  if (NewVReg && TrySplit && AllCanFold) {
1563    // If all of its def / use can be folded, give it a low spill weight.
1564    LiveInterval &nI = getOrCreateInterval(NewVReg);
1565    nI.weight /= 10.0F;
1566  }
1567}
1568
1569bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1570                        BitVector &RestoreMBBs,
1571                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1572  if (!RestoreMBBs[Id])
1573    return false;
1574  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1575  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1576    if (Restores[i].index == index &&
1577        Restores[i].vreg == vr &&
1578        Restores[i].canFold)
1579      return true;
1580  return false;
1581}
1582
1583void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1584                        BitVector &RestoreMBBs,
1585                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1586  if (!RestoreMBBs[Id])
1587    return;
1588  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1589  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1590    if (Restores[i].index == index && Restores[i].vreg)
1591      Restores[i].index = -1;
1592}
1593
1594/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1595/// spilled and create empty intervals for their uses.
1596void
1597LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1598                                    const TargetRegisterClass* rc,
1599                                    std::vector<LiveInterval*> &NewLIs) {
1600  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1601         re = mri_->reg_end(); ri != re; ) {
1602    MachineOperand &O = ri.getOperand();
1603    MachineInstr *MI = &*ri;
1604    ++ri;
1605    if (O.isDef()) {
1606      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1607             "Register def was not rewritten?");
1608      RemoveMachineInstrFromMaps(MI);
1609      vrm.RemoveMachineInstrFromMaps(MI);
1610      MI->eraseFromParent();
1611    } else {
1612      // This must be an use of an implicit_def so it's not part of the live
1613      // interval. Create a new empty live interval for it.
1614      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1615      unsigned NewVReg = mri_->createVirtualRegister(rc);
1616      vrm.grow();
1617      vrm.setIsImplicitlyDefined(NewVReg);
1618      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1619      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1620        MachineOperand &MO = MI->getOperand(i);
1621        if (MO.isRegister() && MO.getReg() == li.reg)
1622          MO.setReg(NewVReg);
1623      }
1624    }
1625  }
1626}
1627
1628namespace {
1629  struct LISorter {
1630    bool operator()(LiveInterval* A, LiveInterval* B) {
1631      return A->beginNumber() < B->beginNumber();
1632    }
1633  };
1634}
1635
1636std::vector<LiveInterval*> LiveIntervals::
1637addIntervalsForSpillsFast(const LiveInterval &li,
1638                          const MachineLoopInfo *loopInfo,
1639                          VirtRegMap &vrm, float& SSWeight) {
1640  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1641
1642  std::vector<LiveInterval*> added;
1643
1644  assert(li.weight != HUGE_VALF &&
1645         "attempt to spill already spilled interval!");
1646
1647  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1648  DEBUG(li.dump());
1649  DOUT << '\n';
1650
1651  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1652
1653  SSWeight = 0.0f;
1654
1655  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1656  while (RI != mri_->reg_end()) {
1657    MachineInstr* MI = &*RI;
1658
1659    SmallVector<unsigned, 2> Indices;
1660    bool HasUse = false;
1661    bool HasDef = false;
1662
1663    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1664      MachineOperand& mop = MI->getOperand(i);
1665      if (!mop.isRegister() || mop.getReg() != li.reg) continue;
1666
1667      HasUse |= MI->getOperand(i).isUse();
1668      HasDef |= MI->getOperand(i).isDef();
1669
1670      Indices.push_back(i);
1671    }
1672
1673    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1674                              Indices, true, slot, li.reg)) {
1675      unsigned NewVReg = mri_->createVirtualRegister(rc);
1676      vrm.grow();
1677      vrm.assignVirt2StackSlot(NewVReg, slot);
1678
1679      // create a new register for this spill
1680      LiveInterval &nI = getOrCreateInterval(NewVReg);
1681
1682      // the spill weight is now infinity as it
1683      // cannot be spilled again
1684      nI.weight = HUGE_VALF;
1685
1686      // Rewrite register operands to use the new vreg.
1687      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1688           E = Indices.end(); I != E; ++I) {
1689        MI->getOperand(*I).setReg(NewVReg);
1690
1691        if (MI->getOperand(*I).isUse())
1692          MI->getOperand(*I).setIsKill(true);
1693      }
1694
1695      // Fill in  the new live interval.
1696      unsigned index = getInstructionIndex(MI);
1697      if (HasUse) {
1698        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1699                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1700        DOUT << " +" << LR;
1701        nI.addRange(LR);
1702        vrm.addRestorePoint(NewVReg, MI);
1703      }
1704      if (HasDef) {
1705        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1706                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1707        DOUT << " +" << LR;
1708        nI.addRange(LR);
1709        vrm.addSpillPoint(NewVReg, true, MI);
1710      }
1711
1712      added.push_back(&nI);
1713
1714      DOUT << "\t\t\t\tadded new interval: ";
1715      DEBUG(nI.dump());
1716      DOUT << '\n';
1717
1718      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1719      if (HasUse) {
1720        if (HasDef)
1721          SSWeight += getSpillWeight(true, true, loopDepth);
1722        else
1723          SSWeight += getSpillWeight(false, true, loopDepth);
1724      } else
1725        SSWeight += getSpillWeight(true, false, loopDepth);
1726    }
1727
1728
1729    RI = mri_->reg_begin(li.reg);
1730  }
1731
1732  // Clients expect the new intervals to be returned in sorted order.
1733  std::sort(added.begin(), added.end(), LISorter());
1734
1735  return added;
1736}
1737
1738std::vector<LiveInterval*> LiveIntervals::
1739addIntervalsForSpills(const LiveInterval &li,
1740                      SmallVectorImpl<LiveInterval*> &SpillIs,
1741                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1742                      float &SSWeight) {
1743
1744  if (EnableFastSpilling)
1745    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1746
1747  assert(li.weight != HUGE_VALF &&
1748         "attempt to spill already spilled interval!");
1749
1750  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1751  li.print(DOUT, tri_);
1752  DOUT << '\n';
1753
1754  // Spill slot weight.
1755  SSWeight = 0.0f;
1756
1757  // Each bit specify whether it a spill is required in the MBB.
1758  BitVector SpillMBBs(mf_->getNumBlockIDs());
1759  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1760  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1761  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1762  DenseMap<unsigned,unsigned> MBBVRegsMap;
1763  std::vector<LiveInterval*> NewLIs;
1764  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1765
1766  unsigned NumValNums = li.getNumValNums();
1767  SmallVector<MachineInstr*, 4> ReMatDefs;
1768  ReMatDefs.resize(NumValNums, NULL);
1769  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1770  ReMatOrigDefs.resize(NumValNums, NULL);
1771  SmallVector<int, 4> ReMatIds;
1772  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1773  BitVector ReMatDelete(NumValNums);
1774  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1775
1776  // Spilling a split live interval. It cannot be split any further. Also,
1777  // it's also guaranteed to be a single val# / range interval.
1778  if (vrm.getPreSplitReg(li.reg)) {
1779    vrm.setIsSplitFromReg(li.reg, 0);
1780    // Unset the split kill marker on the last use.
1781    unsigned KillIdx = vrm.getKillPoint(li.reg);
1782    if (KillIdx) {
1783      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1784      assert(KillMI && "Last use disappeared?");
1785      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1786      assert(KillOp != -1 && "Last use disappeared?");
1787      KillMI->getOperand(KillOp).setIsKill(false);
1788    }
1789    vrm.removeKillPoint(li.reg);
1790    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1791    Slot = vrm.getStackSlot(li.reg);
1792    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1793    MachineInstr *ReMatDefMI = DefIsReMat ?
1794      vrm.getReMaterializedMI(li.reg) : NULL;
1795    int LdSlot = 0;
1796    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1797    bool isLoad = isLoadSS ||
1798      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1799    bool IsFirstRange = true;
1800    for (LiveInterval::Ranges::const_iterator
1801           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1802      // If this is a split live interval with multiple ranges, it means there
1803      // are two-address instructions that re-defined the value. Only the
1804      // first def can be rematerialized!
1805      if (IsFirstRange) {
1806        // Note ReMatOrigDefMI has already been deleted.
1807        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1808                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1809                             false, vrm, rc, ReMatIds, loopInfo,
1810                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1811                             MBBVRegsMap, NewLIs, SSWeight);
1812      } else {
1813        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1814                             Slot, 0, false, false, false,
1815                             false, vrm, rc, ReMatIds, loopInfo,
1816                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1817                             MBBVRegsMap, NewLIs, SSWeight);
1818      }
1819      IsFirstRange = false;
1820    }
1821
1822    SSWeight = 0.0f;  // Already accounted for when split.
1823    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1824    return NewLIs;
1825  }
1826
1827  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1828  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1829    TrySplit = false;
1830  if (TrySplit)
1831    ++numSplits;
1832  bool NeedStackSlot = false;
1833  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1834       i != e; ++i) {
1835    const VNInfo *VNI = *i;
1836    unsigned VN = VNI->id;
1837    unsigned DefIdx = VNI->def;
1838    if (DefIdx == ~1U)
1839      continue; // Dead val#.
1840    // Is the def for the val# rematerializable?
1841    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1842      ? 0 : getInstructionFromIndex(DefIdx);
1843    bool dummy;
1844    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1845      // Remember how to remat the def of this val#.
1846      ReMatOrigDefs[VN] = ReMatDefMI;
1847      // Original def may be modified so we have to make a copy here.
1848      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1849      ClonedMIs.push_back(Clone);
1850      ReMatDefs[VN] = Clone;
1851
1852      bool CanDelete = true;
1853      if (VNI->hasPHIKill) {
1854        // A kill is a phi node, not all of its uses can be rematerialized.
1855        // It must not be deleted.
1856        CanDelete = false;
1857        // Need a stack slot if there is any live range where uses cannot be
1858        // rematerialized.
1859        NeedStackSlot = true;
1860      }
1861      if (CanDelete)
1862        ReMatDelete.set(VN);
1863    } else {
1864      // Need a stack slot if there is any live range where uses cannot be
1865      // rematerialized.
1866      NeedStackSlot = true;
1867    }
1868  }
1869
1870  // One stack slot per live interval.
1871  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1872    Slot = vrm.assignVirt2StackSlot(li.reg);
1873
1874  // Create new intervals and rewrite defs and uses.
1875  for (LiveInterval::Ranges::const_iterator
1876         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1877    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1878    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1879    bool DefIsReMat = ReMatDefMI != NULL;
1880    bool CanDelete = ReMatDelete[I->valno->id];
1881    int LdSlot = 0;
1882    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1883    bool isLoad = isLoadSS ||
1884      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1885    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1886                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1887                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1888                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1889                               MBBVRegsMap, NewLIs, SSWeight);
1890  }
1891
1892  // Insert spills / restores if we are splitting.
1893  if (!TrySplit) {
1894    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1895    return NewLIs;
1896  }
1897
1898  SmallPtrSet<LiveInterval*, 4> AddedKill;
1899  SmallVector<unsigned, 2> Ops;
1900  if (NeedStackSlot) {
1901    int Id = SpillMBBs.find_first();
1902    while (Id != -1) {
1903      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1904      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1905      std::vector<SRInfo> &spills = SpillIdxes[Id];
1906      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1907        int index = spills[i].index;
1908        unsigned VReg = spills[i].vreg;
1909        LiveInterval &nI = getOrCreateInterval(VReg);
1910        bool isReMat = vrm.isReMaterialized(VReg);
1911        MachineInstr *MI = getInstructionFromIndex(index);
1912        bool CanFold = false;
1913        bool FoundUse = false;
1914        Ops.clear();
1915        if (spills[i].canFold) {
1916          CanFold = true;
1917          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1918            MachineOperand &MO = MI->getOperand(j);
1919            if (!MO.isRegister() || MO.getReg() != VReg)
1920              continue;
1921
1922            Ops.push_back(j);
1923            if (MO.isDef())
1924              continue;
1925            if (isReMat ||
1926                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1927                                                RestoreMBBs, RestoreIdxes))) {
1928              // MI has two-address uses of the same register. If the use
1929              // isn't the first and only use in the BB, then we can't fold
1930              // it. FIXME: Move this to rewriteInstructionsForSpills.
1931              CanFold = false;
1932              break;
1933            }
1934            FoundUse = true;
1935          }
1936        }
1937        // Fold the store into the def if possible.
1938        bool Folded = false;
1939        if (CanFold && !Ops.empty()) {
1940          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1941            Folded = true;
1942            if (FoundUse > 0) {
1943              // Also folded uses, do not issue a load.
1944              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1945              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1946            }
1947            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1948          }
1949        }
1950
1951        // Otherwise tell the spiller to issue a spill.
1952        if (!Folded) {
1953          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1954          bool isKill = LR->end == getStoreIndex(index);
1955          if (!MI->registerDefIsDead(nI.reg))
1956            // No need to spill a dead def.
1957            vrm.addSpillPoint(VReg, isKill, MI);
1958          if (isKill)
1959            AddedKill.insert(&nI);
1960        }
1961
1962        // Update spill slot weight.
1963        if (!isReMat)
1964          SSWeight += getSpillWeight(true, false, loopDepth);
1965      }
1966      Id = SpillMBBs.find_next(Id);
1967    }
1968  }
1969
1970  int Id = RestoreMBBs.find_first();
1971  while (Id != -1) {
1972    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1973    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1974
1975    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1976    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1977      int index = restores[i].index;
1978      if (index == -1)
1979        continue;
1980      unsigned VReg = restores[i].vreg;
1981      LiveInterval &nI = getOrCreateInterval(VReg);
1982      bool isReMat = vrm.isReMaterialized(VReg);
1983      MachineInstr *MI = getInstructionFromIndex(index);
1984      bool CanFold = false;
1985      Ops.clear();
1986      if (restores[i].canFold) {
1987        CanFold = true;
1988        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1989          MachineOperand &MO = MI->getOperand(j);
1990          if (!MO.isRegister() || MO.getReg() != VReg)
1991            continue;
1992
1993          if (MO.isDef()) {
1994            // If this restore were to be folded, it would have been folded
1995            // already.
1996            CanFold = false;
1997            break;
1998          }
1999          Ops.push_back(j);
2000        }
2001      }
2002
2003      // Fold the load into the use if possible.
2004      bool Folded = false;
2005      if (CanFold && !Ops.empty()) {
2006        if (!isReMat)
2007          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2008        else {
2009          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2010          int LdSlot = 0;
2011          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2012          // If the rematerializable def is a load, also try to fold it.
2013          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2014            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2015                                          Ops, isLoadSS, LdSlot, VReg);
2016          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2017          if (ImpUse) {
2018            // Re-matting an instruction with virtual register use. Add the
2019            // register as an implicit use on the use MI and update the register
2020            // interval's spill weight to HUGE_VALF to prevent it from being
2021            // spilled.
2022            LiveInterval &ImpLi = getInterval(ImpUse);
2023            ImpLi.weight = HUGE_VALF;
2024            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2025          }
2026        }
2027      }
2028      // If folding is not possible / failed, then tell the spiller to issue a
2029      // load / rematerialization for us.
2030      if (Folded)
2031        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2032      else
2033        vrm.addRestorePoint(VReg, MI);
2034
2035      // Update spill slot weight.
2036      if (!isReMat)
2037        SSWeight += getSpillWeight(false, true, loopDepth);
2038    }
2039    Id = RestoreMBBs.find_next(Id);
2040  }
2041
2042  // Finalize intervals: add kills, finalize spill weights, and filter out
2043  // dead intervals.
2044  std::vector<LiveInterval*> RetNewLIs;
2045  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2046    LiveInterval *LI = NewLIs[i];
2047    if (!LI->empty()) {
2048      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2049      if (!AddedKill.count(LI)) {
2050        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2051        unsigned LastUseIdx = getBaseIndex(LR->end);
2052        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2053        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2054        assert(UseIdx != -1);
2055        if (LastUse->getOperand(UseIdx).isImplicit() ||
2056            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2057          LastUse->getOperand(UseIdx).setIsKill();
2058          vrm.addKillPoint(LI->reg, LastUseIdx);
2059        }
2060      }
2061      RetNewLIs.push_back(LI);
2062    }
2063  }
2064
2065  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2066  return RetNewLIs;
2067}
2068
2069/// hasAllocatableSuperReg - Return true if the specified physical register has
2070/// any super register that's allocatable.
2071bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2072  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2073    if (allocatableRegs_[*AS] && hasInterval(*AS))
2074      return true;
2075  return false;
2076}
2077
2078/// getRepresentativeReg - Find the largest super register of the specified
2079/// physical register.
2080unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2081  // Find the largest super-register that is allocatable.
2082  unsigned BestReg = Reg;
2083  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2084    unsigned SuperReg = *AS;
2085    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2086      BestReg = SuperReg;
2087      break;
2088    }
2089  }
2090  return BestReg;
2091}
2092
2093/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2094/// specified interval that conflicts with the specified physical register.
2095unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2096                                                   unsigned PhysReg) const {
2097  unsigned NumConflicts = 0;
2098  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2099  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2100         E = mri_->reg_end(); I != E; ++I) {
2101    MachineOperand &O = I.getOperand();
2102    MachineInstr *MI = O.getParent();
2103    unsigned Index = getInstructionIndex(MI);
2104    if (pli.liveAt(Index))
2105      ++NumConflicts;
2106  }
2107  return NumConflicts;
2108}
2109
2110/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2111/// around all defs and uses of the specified interval.
2112void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2113                                            unsigned PhysReg, VirtRegMap &vrm) {
2114  unsigned SpillReg = getRepresentativeReg(PhysReg);
2115
2116  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2117    // If there are registers which alias PhysReg, but which are not a
2118    // sub-register of the chosen representative super register. Assert
2119    // since we can't handle it yet.
2120    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2121           tri_->isSuperRegister(*AS, SpillReg));
2122
2123  LiveInterval &pli = getInterval(SpillReg);
2124  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2125  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2126         E = mri_->reg_end(); I != E; ++I) {
2127    MachineOperand &O = I.getOperand();
2128    MachineInstr *MI = O.getParent();
2129    if (SeenMIs.count(MI))
2130      continue;
2131    SeenMIs.insert(MI);
2132    unsigned Index = getInstructionIndex(MI);
2133    if (pli.liveAt(Index)) {
2134      vrm.addEmergencySpill(SpillReg, MI);
2135      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2136      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2137        if (!hasInterval(*AS))
2138          continue;
2139        LiveInterval &spli = getInterval(*AS);
2140        if (spli.liveAt(Index))
2141          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2142      }
2143    }
2144  }
2145}
2146
2147LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2148                                                   MachineInstr* startInst) {
2149  LiveInterval& Interval = getOrCreateInterval(reg);
2150  VNInfo* VN = Interval.getNextValue(
2151            getInstructionIndex(startInst) + InstrSlots::DEF,
2152            startInst, getVNInfoAllocator());
2153  VN->hasPHIKill = true;
2154  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2155  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2156               getMBBEndIdx(startInst->getParent()) + 1, VN);
2157  Interval.addRange(LR);
2158
2159  return LR;
2160}
2161