LiveIntervalAnalysis.cpp revision 68abc43d979f4218553846cd099f5a64eb5e62f2
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53STATISTIC(numIntervals , "Number of original intervals");
54STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits    , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.setPreservesCFG();
62  AU.addRequired<AliasAnalysis>();
63  AU.addPreserved<AliasAnalysis>();
64  AU.addPreserved<LiveVariables>();
65  AU.addRequired<LiveVariables>();
66  AU.addPreservedID(MachineLoopInfoID);
67  AU.addPreservedID(MachineDominatorsID);
68
69  if (!StrongPHIElim) {
70    AU.addPreservedID(PHIEliminationID);
71    AU.addRequiredID(PHIEliminationID);
72  }
73
74  AU.addRequiredID(TwoAddressInstructionPassID);
75  AU.addPreserved<ProcessImplicitDefs>();
76  AU.addRequired<ProcessImplicitDefs>();
77  AU.addPreserved<SlotIndexes>();
78  AU.addRequiredTransitive<SlotIndexes>();
79  MachineFunctionPass::getAnalysisUsage(AU);
80}
81
82void LiveIntervals::releaseMemory() {
83  // Free the live intervals themselves.
84  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
85       E = r2iMap_.end(); I != E; ++I)
86    delete I->second;
87
88  r2iMap_.clear();
89
90  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
91  VNInfoAllocator.Reset();
92  while (!CloneMIs.empty()) {
93    MachineInstr *MI = CloneMIs.back();
94    CloneMIs.pop_back();
95    mf_->DeleteMachineInstr(MI);
96  }
97}
98
99/// runOnMachineFunction - Register allocate the whole function
100///
101bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
102  mf_ = &fn;
103  mri_ = &mf_->getRegInfo();
104  tm_ = &fn.getTarget();
105  tri_ = tm_->getRegisterInfo();
106  tii_ = tm_->getInstrInfo();
107  aa_ = &getAnalysis<AliasAnalysis>();
108  lv_ = &getAnalysis<LiveVariables>();
109  indexes_ = &getAnalysis<SlotIndexes>();
110  allocatableRegs_ = tri_->getAllocatableSet(fn);
111
112  computeIntervals();
113
114  numIntervals += getNumIntervals();
115
116  DEBUG(dump());
117  return true;
118}
119
120/// print - Implement the dump method.
121void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
122  OS << "********** INTERVALS **********\n";
123  for (const_iterator I = begin(), E = end(); I != E; ++I) {
124    I->second->print(OS, tri_);
125    OS << "\n";
126  }
127
128  printInstrs(OS);
129}
130
131void LiveIntervals::printInstrs(raw_ostream &OS) const {
132  OS << "********** MACHINEINSTRS **********\n";
133
134  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
135       mbbi != mbbe; ++mbbi) {
136    OS << "BB#" << mbbi->getNumber()
137       << ":\t\t# derived from " << mbbi->getName() << "\n";
138    for (MachineBasicBlock::iterator mii = mbbi->begin(),
139           mie = mbbi->end(); mii != mie; ++mii) {
140      if (mii->isDebugValue())
141        OS << "    \t" << *mii;
142      else
143        OS << getInstructionIndex(mii) << '\t' << *mii;
144    }
145  }
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(dbgs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193      if (SrcReg == li.reg || DstReg == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220  for (LiveInterval::Ranges::const_iterator
221         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222    for (SlotIndex index = I->start.getBaseIndex(),
223           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224           index != end;
225           index = index.getNextIndex()) {
226      MachineInstr *MI = getInstructionFromIndex(index);
227      if (!MI)
228        continue;               // skip deleted instructions
229
230      if (JoinedCopies.count(MI))
231        continue;
232      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233        MachineOperand& MO = MI->getOperand(i);
234        if (!MO.isReg())
235          continue;
236        unsigned PhysReg = MO.getReg();
237        if (PhysReg == 0 || PhysReg == Reg ||
238            TargetRegisterInfo::isVirtualRegister(PhysReg))
239          continue;
240        if (tri_->regsOverlap(Reg, PhysReg))
241          return true;
242      }
243    }
244  }
245
246  return false;
247}
248
249#ifndef NDEBUG
250static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
251  if (TargetRegisterInfo::isPhysicalRegister(reg))
252    dbgs() << tri_->getName(reg);
253  else
254    dbgs() << "%reg" << reg;
255}
256#endif
257
258static
259bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
260  unsigned Reg = MI.getOperand(MOIdx).getReg();
261  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
262    const MachineOperand &MO = MI.getOperand(i);
263    if (!MO.isReg())
264      continue;
265    if (MO.getReg() == Reg && MO.isDef()) {
266      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
267             MI.getOperand(MOIdx).getSubReg() &&
268             (MO.getSubReg() || MO.isImplicit()));
269      return true;
270    }
271  }
272  return false;
273}
274
275/// isPartialRedef - Return true if the specified def at the specific index is
276/// partially re-defining the specified live interval. A common case of this is
277/// a definition of the sub-register.
278bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
279                                   LiveInterval &interval) {
280  if (!MO.getSubReg() || MO.isEarlyClobber())
281    return false;
282
283  SlotIndex RedefIndex = MIIdx.getDefIndex();
284  const LiveRange *OldLR =
285    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
286  if (OldLR->valno->isDefAccurate()) {
287    MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
288    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
289  }
290  return false;
291}
292
293void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
294                                             MachineBasicBlock::iterator mi,
295                                             SlotIndex MIIdx,
296                                             MachineOperand& MO,
297                                             unsigned MOIdx,
298                                             LiveInterval &interval) {
299  DEBUG({
300      dbgs() << "\t\tregister: ";
301      printRegName(interval.reg, tri_);
302    });
303
304  // Virtual registers may be defined multiple times (due to phi
305  // elimination and 2-addr elimination).  Much of what we do only has to be
306  // done once for the vreg.  We use an empty interval to detect the first
307  // time we see a vreg.
308  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
309  if (interval.empty()) {
310    // Get the Idx of the defining instructions.
311    SlotIndex defIndex = MIIdx.getDefIndex();
312    // Earlyclobbers move back one, so that they overlap the live range
313    // of inputs.
314    if (MO.isEarlyClobber())
315      defIndex = MIIdx.getUseIndex();
316
317    // Make sure the first definition is not a partial redefinition. Add an
318    // <imp-def> of the full register.
319    if (MO.getSubReg())
320      mi->addRegisterDefined(interval.reg);
321
322    MachineInstr *CopyMI = NULL;
323    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
324    if (mi->isCopyLike() ||
325        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
326      CopyMI = mi;
327    }
328
329    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
330                                          VNInfoAllocator);
331    assert(ValNo->id == 0 && "First value in interval is not 0?");
332
333    // Loop over all of the blocks that the vreg is defined in.  There are
334    // two cases we have to handle here.  The most common case is a vreg
335    // whose lifetime is contained within a basic block.  In this case there
336    // will be a single kill, in MBB, which comes after the definition.
337    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338      // FIXME: what about dead vars?
339      SlotIndex killIdx;
340      if (vi.Kills[0] != mi)
341        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
342      else
343        killIdx = defIndex.getStoreIndex();
344
345      // If the kill happens after the definition, we have an intra-block
346      // live range.
347      if (killIdx > defIndex) {
348        assert(vi.AliveBlocks.empty() &&
349               "Shouldn't be alive across any blocks!");
350        LiveRange LR(defIndex, killIdx, ValNo);
351        interval.addRange(LR);
352        DEBUG(dbgs() << " +" << LR << "\n");
353        return;
354      }
355    }
356
357    // The other case we handle is when a virtual register lives to the end
358    // of the defining block, potentially live across some blocks, then is
359    // live into some number of blocks, but gets killed.  Start by adding a
360    // range that goes from this definition to the end of the defining block.
361    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
362    DEBUG(dbgs() << " +" << NewLR);
363    interval.addRange(NewLR);
364
365    bool PHIJoin = lv_->isPHIJoin(interval.reg);
366
367    if (PHIJoin) {
368      // A phi join register is killed at the end of the MBB and revived as a new
369      // valno in the killing blocks.
370      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
371      DEBUG(dbgs() << " phi-join");
372      ValNo->setHasPHIKill(true);
373    } else {
374      // Iterate over all of the blocks that the variable is completely
375      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
376      // live interval.
377      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
378               E = vi.AliveBlocks.end(); I != E; ++I) {
379        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
380        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
381        interval.addRange(LR);
382        DEBUG(dbgs() << " +" << LR);
383      }
384    }
385
386    // Finally, this virtual register is live from the start of any killing
387    // block to the 'use' slot of the killing instruction.
388    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
389      MachineInstr *Kill = vi.Kills[i];
390      SlotIndex Start = getMBBStartIdx(Kill->getParent());
391      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
392
393      // Create interval with one of a NEW value number.  Note that this value
394      // number isn't actually defined by an instruction, weird huh? :)
395      if (PHIJoin) {
396        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
397                                      VNInfoAllocator);
398        ValNo->setIsPHIDef(true);
399      }
400      LiveRange LR(Start, killIdx, ValNo);
401      interval.addRange(LR);
402      DEBUG(dbgs() << " +" << LR);
403    }
404
405  } else {
406    if (MultipleDefsBySameMI(*mi, MOIdx))
407      // Multiple defs of the same virtual register by the same instruction.
408      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
409      // This is likely due to elimination of REG_SEQUENCE instructions. Return
410      // here since there is nothing to do.
411      return;
412
413    // If this is the second time we see a virtual register definition, it
414    // must be due to phi elimination or two addr elimination.  If this is
415    // the result of two address elimination, then the vreg is one of the
416    // def-and-use register operand.
417
418    // It may also be partial redef like this:
419    // 80	%reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
420    // 120	%reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
421    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
422    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
423      // If this is a two-address definition, then we have already processed
424      // the live range.  The only problem is that we didn't realize there
425      // are actually two values in the live interval.  Because of this we
426      // need to take the LiveRegion that defines this register and split it
427      // into two values.
428      SlotIndex RedefIndex = MIIdx.getDefIndex();
429      if (MO.isEarlyClobber())
430        RedefIndex = MIIdx.getUseIndex();
431
432      const LiveRange *OldLR =
433        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
434      VNInfo *OldValNo = OldLR->valno;
435      SlotIndex DefIndex = OldValNo->def.getDefIndex();
436
437      // Delete the previous value, which should be short and continuous,
438      // because the 2-addr copy must be in the same MBB as the redef.
439      interval.removeRange(DefIndex, RedefIndex);
440
441      // The new value number (#1) is defined by the instruction we claimed
442      // defined value #0.
443      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
444                                            false, // update at *
445                                            VNInfoAllocator);
446      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
447
448      // Value#0 is now defined by the 2-addr instruction.
449      OldValNo->def  = RedefIndex;
450      OldValNo->setCopy(0);
451
452      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
453      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
454      if (PartReDef && (mi->isCopyLike() ||
455          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)))
456        OldValNo->setCopy(&*mi);
457
458      // Add the new live interval which replaces the range for the input copy.
459      LiveRange LR(DefIndex, RedefIndex, ValNo);
460      DEBUG(dbgs() << " replace range with " << LR);
461      interval.addRange(LR);
462
463      // If this redefinition is dead, we need to add a dummy unit live
464      // range covering the def slot.
465      if (MO.isDead())
466        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
467                                    OldValNo));
468
469      DEBUG({
470          dbgs() << " RESULT: ";
471          interval.print(dbgs(), tri_);
472        });
473    } else if (lv_->isPHIJoin(interval.reg)) {
474      // In the case of PHI elimination, each variable definition is only
475      // live until the end of the block.  We've already taken care of the
476      // rest of the live range.
477
478      SlotIndex defIndex = MIIdx.getDefIndex();
479      if (MO.isEarlyClobber())
480        defIndex = MIIdx.getUseIndex();
481
482      VNInfo *ValNo;
483      MachineInstr *CopyMI = NULL;
484      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
485      if (mi->isCopyLike() ||
486          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
487        CopyMI = mi;
488      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
489
490      SlotIndex killIndex = getMBBEndIdx(mbb);
491      LiveRange LR(defIndex, killIndex, ValNo);
492      interval.addRange(LR);
493      ValNo->setHasPHIKill(true);
494      DEBUG(dbgs() << " phi-join +" << LR);
495    } else {
496      llvm_unreachable("Multiply defined register");
497    }
498  }
499
500  DEBUG(dbgs() << '\n');
501}
502
503void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
504                                              MachineBasicBlock::iterator mi,
505                                              SlotIndex MIIdx,
506                                              MachineOperand& MO,
507                                              LiveInterval &interval,
508                                              MachineInstr *CopyMI) {
509  // A physical register cannot be live across basic block, so its
510  // lifetime must end somewhere in its defining basic block.
511  DEBUG({
512      dbgs() << "\t\tregister: ";
513      printRegName(interval.reg, tri_);
514    });
515
516  SlotIndex baseIndex = MIIdx;
517  SlotIndex start = baseIndex.getDefIndex();
518  // Earlyclobbers move back one.
519  if (MO.isEarlyClobber())
520    start = MIIdx.getUseIndex();
521  SlotIndex end = start;
522
523  // If it is not used after definition, it is considered dead at
524  // the instruction defining it. Hence its interval is:
525  // [defSlot(def), defSlot(def)+1)
526  // For earlyclobbers, the defSlot was pushed back one; the extra
527  // advance below compensates.
528  if (MO.isDead()) {
529    DEBUG(dbgs() << " dead");
530    end = start.getStoreIndex();
531    goto exit;
532  }
533
534  // If it is not dead on definition, it must be killed by a
535  // subsequent instruction. Hence its interval is:
536  // [defSlot(def), useSlot(kill)+1)
537  baseIndex = baseIndex.getNextIndex();
538  while (++mi != MBB->end()) {
539
540    if (mi->isDebugValue())
541      continue;
542    if (getInstructionFromIndex(baseIndex) == 0)
543      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
544
545    if (mi->killsRegister(interval.reg, tri_)) {
546      DEBUG(dbgs() << " killed");
547      end = baseIndex.getDefIndex();
548      goto exit;
549    } else {
550      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
551      if (DefIdx != -1) {
552        if (mi->isRegTiedToUseOperand(DefIdx)) {
553          // Two-address instruction.
554          end = baseIndex.getDefIndex();
555        } else {
556          // Another instruction redefines the register before it is ever read.
557          // Then the register is essentially dead at the instruction that
558          // defines it. Hence its interval is:
559          // [defSlot(def), defSlot(def)+1)
560          DEBUG(dbgs() << " dead");
561          end = start.getStoreIndex();
562        }
563        goto exit;
564      }
565    }
566
567    baseIndex = baseIndex.getNextIndex();
568  }
569
570  // The only case we should have a dead physreg here without a killing or
571  // instruction where we know it's dead is if it is live-in to the function
572  // and never used. Another possible case is the implicit use of the
573  // physical register has been deleted by two-address pass.
574  end = start.getStoreIndex();
575
576exit:
577  assert(start < end && "did not find end of interval?");
578
579  // Already exists? Extend old live interval.
580  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
581  bool Extend = OldLR != interval.end();
582  VNInfo *ValNo = Extend
583    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
584  if (MO.isEarlyClobber() && Extend)
585    ValNo->setHasRedefByEC(true);
586  LiveRange LR(start, end, ValNo);
587  interval.addRange(LR);
588  DEBUG(dbgs() << " +" << LR << '\n');
589}
590
591void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
592                                      MachineBasicBlock::iterator MI,
593                                      SlotIndex MIIdx,
594                                      MachineOperand& MO,
595                                      unsigned MOIdx) {
596  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
597    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
598                             getOrCreateInterval(MO.getReg()));
599  else if (allocatableRegs_[MO.getReg()]) {
600    MachineInstr *CopyMI = NULL;
601    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
602    if (MI->isCopyLike() ||
603        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
604      CopyMI = MI;
605    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606                              getOrCreateInterval(MO.getReg()), CopyMI);
607    // Def of a register also defines its sub-registers.
608    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609      // If MI also modifies the sub-register explicitly, avoid processing it
610      // more than once. Do not pass in TRI here so it checks for exact match.
611      if (!MI->definesRegister(*AS))
612        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613                                  getOrCreateInterval(*AS), 0);
614  }
615}
616
617void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
618                                         SlotIndex MIIdx,
619                                         LiveInterval &interval, bool isAlias) {
620  DEBUG({
621      dbgs() << "\t\tlivein register: ";
622      printRegName(interval.reg, tri_);
623    });
624
625  // Look for kills, if it reaches a def before it's killed, then it shouldn't
626  // be considered a livein.
627  MachineBasicBlock::iterator mi = MBB->begin();
628  MachineBasicBlock::iterator E = MBB->end();
629  // Skip over DBG_VALUE at the start of the MBB.
630  if (mi != E && mi->isDebugValue()) {
631    while (++mi != E && mi->isDebugValue())
632      ;
633    if (mi == E)
634      // MBB is empty except for DBG_VALUE's.
635      return;
636  }
637
638  SlotIndex baseIndex = MIIdx;
639  SlotIndex start = baseIndex;
640  if (getInstructionFromIndex(baseIndex) == 0)
641    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
642
643  SlotIndex end = baseIndex;
644  bool SeenDefUse = false;
645
646  while (mi != E) {
647    if (mi->killsRegister(interval.reg, tri_)) {
648      DEBUG(dbgs() << " killed");
649      end = baseIndex.getDefIndex();
650      SeenDefUse = true;
651      break;
652    } else if (mi->definesRegister(interval.reg, tri_)) {
653      // Another instruction redefines the register before it is ever read.
654      // Then the register is essentially dead at the instruction that defines
655      // it. Hence its interval is:
656      // [defSlot(def), defSlot(def)+1)
657      DEBUG(dbgs() << " dead");
658      end = start.getStoreIndex();
659      SeenDefUse = true;
660      break;
661    }
662
663    while (++mi != E && mi->isDebugValue())
664      // Skip over DBG_VALUE.
665      ;
666    if (mi != E)
667      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
668  }
669
670  // Live-in register might not be used at all.
671  if (!SeenDefUse) {
672    if (isAlias) {
673      DEBUG(dbgs() << " dead");
674      end = MIIdx.getStoreIndex();
675    } else {
676      DEBUG(dbgs() << " live through");
677      end = baseIndex;
678    }
679  }
680
681  VNInfo *vni =
682    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
683                          0, false, VNInfoAllocator);
684  vni->setIsPHIDef(true);
685  LiveRange LR(start, end, vni);
686
687  interval.addRange(LR);
688  DEBUG(dbgs() << " +" << LR << '\n');
689}
690
691/// computeIntervals - computes the live intervals for virtual
692/// registers. for some ordering of the machine instructions [1,N] a
693/// live interval is an interval [i, j) where 1 <= i <= j < N for
694/// which a variable is live
695void LiveIntervals::computeIntervals() {
696  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
697               << "********** Function: "
698               << ((Value*)mf_->getFunction())->getName() << '\n');
699
700  SmallVector<unsigned, 8> UndefUses;
701  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
702       MBBI != E; ++MBBI) {
703    MachineBasicBlock *MBB = MBBI;
704    if (MBB->empty())
705      continue;
706
707    // Track the index of the current machine instr.
708    SlotIndex MIIndex = getMBBStartIdx(MBB);
709    DEBUG(dbgs() << "BB#" << MBB->getNumber()
710          << ":\t\t# derived from " << MBB->getName() << "\n");
711
712    // Create intervals for live-ins to this BB first.
713    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
714           LE = MBB->livein_end(); LI != LE; ++LI) {
715      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716      // Multiple live-ins can alias the same register.
717      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718        if (!hasInterval(*AS))
719          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
720                               true);
721    }
722
723    // Skip over empty initial indices.
724    if (getInstructionFromIndex(MIIndex) == 0)
725      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
726
727    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
728         MI != miEnd; ++MI) {
729      DEBUG(dbgs() << MIIndex << "\t" << *MI);
730      if (MI->isDebugValue())
731        continue;
732
733      // Handle defs.
734      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
735        MachineOperand &MO = MI->getOperand(i);
736        if (!MO.isReg() || !MO.getReg())
737          continue;
738
739        // handle register defs - build intervals
740        if (MO.isDef())
741          handleRegisterDef(MBB, MI, MIIndex, MO, i);
742        else if (MO.isUndef())
743          UndefUses.push_back(MO.getReg());
744      }
745
746      // Move to the next instr slot.
747      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
748    }
749  }
750
751  // Create empty intervals for registers defined by implicit_def's (except
752  // for those implicit_def that define values which are liveout of their
753  // blocks.
754  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
755    unsigned UndefReg = UndefUses[i];
756    (void)getOrCreateInterval(UndefReg);
757  }
758}
759
760LiveInterval* LiveIntervals::createInterval(unsigned reg) {
761  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
762  return new LiveInterval(reg, Weight);
763}
764
765/// dupInterval - Duplicate a live interval. The caller is responsible for
766/// managing the allocated memory.
767LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
768  LiveInterval *NewLI = createInterval(li->reg);
769  NewLI->Copy(*li, mri_, getVNInfoAllocator());
770  return NewLI;
771}
772
773//===----------------------------------------------------------------------===//
774// Register allocator hooks.
775//
776
777/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
778/// allow one) virtual register operand, then its uses are implicitly using
779/// the register. Returns the virtual register.
780unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
781                                            MachineInstr *MI) const {
782  unsigned RegOp = 0;
783  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
784    MachineOperand &MO = MI->getOperand(i);
785    if (!MO.isReg() || !MO.isUse())
786      continue;
787    unsigned Reg = MO.getReg();
788    if (Reg == 0 || Reg == li.reg)
789      continue;
790
791    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
792        !allocatableRegs_[Reg])
793      continue;
794    // FIXME: For now, only remat MI with at most one register operand.
795    assert(!RegOp &&
796           "Can't rematerialize instruction with multiple register operand!");
797    RegOp = MO.getReg();
798#ifndef NDEBUG
799    break;
800#endif
801  }
802  return RegOp;
803}
804
805/// isValNoAvailableAt - Return true if the val# of the specified interval
806/// which reaches the given instruction also reaches the specified use index.
807bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
808                                       SlotIndex UseIdx) const {
809  SlotIndex Index = getInstructionIndex(MI);
810  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
811  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
812  return UI != li.end() && UI->valno == ValNo;
813}
814
815/// isReMaterializable - Returns true if the definition MI of the specified
816/// val# of the specified interval is re-materializable.
817bool LiveIntervals::isReMaterializable(const LiveInterval &li,
818                                       const VNInfo *ValNo, MachineInstr *MI,
819                                       SmallVectorImpl<LiveInterval*> &SpillIs,
820                                       bool &isLoad) {
821  if (DisableReMat)
822    return false;
823
824  if (!tii_->isTriviallyReMaterializable(MI, aa_))
825    return false;
826
827  // Target-specific code can mark an instruction as being rematerializable
828  // if it has one virtual reg use, though it had better be something like
829  // a PIC base register which is likely to be live everywhere.
830  unsigned ImpUse = getReMatImplicitUse(li, MI);
831  if (ImpUse) {
832    const LiveInterval &ImpLi = getInterval(ImpUse);
833    for (MachineRegisterInfo::use_nodbg_iterator
834           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
835         ri != re; ++ri) {
836      MachineInstr *UseMI = &*ri;
837      SlotIndex UseIdx = getInstructionIndex(UseMI);
838      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
839        continue;
840      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
841        return false;
842    }
843
844    // If a register operand of the re-materialized instruction is going to
845    // be spilled next, then it's not legal to re-materialize this instruction.
846    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
847      if (ImpUse == SpillIs[i]->reg)
848        return false;
849  }
850  return true;
851}
852
853/// isReMaterializable - Returns true if the definition MI of the specified
854/// val# of the specified interval is re-materializable.
855bool LiveIntervals::isReMaterializable(const LiveInterval &li,
856                                       const VNInfo *ValNo, MachineInstr *MI) {
857  SmallVector<LiveInterval*, 4> Dummy1;
858  bool Dummy2;
859  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
860}
861
862/// isReMaterializable - Returns true if every definition of MI of every
863/// val# of the specified interval is re-materializable.
864bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865                                       SmallVectorImpl<LiveInterval*> &SpillIs,
866                                       bool &isLoad) {
867  isLoad = false;
868  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
869       i != e; ++i) {
870    const VNInfo *VNI = *i;
871    if (VNI->isUnused())
872      continue; // Dead val#.
873    // Is the def for the val# rematerializable?
874    if (!VNI->isDefAccurate())
875      return false;
876    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
877    bool DefIsLoad = false;
878    if (!ReMatDefMI ||
879        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
880      return false;
881    isLoad |= DefIsLoad;
882  }
883  return true;
884}
885
886/// FilterFoldedOps - Filter out two-address use operands. Return
887/// true if it finds any issue with the operands that ought to prevent
888/// folding.
889static bool FilterFoldedOps(MachineInstr *MI,
890                            SmallVector<unsigned, 2> &Ops,
891                            unsigned &MRInfo,
892                            SmallVector<unsigned, 2> &FoldOps) {
893  MRInfo = 0;
894  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
895    unsigned OpIdx = Ops[i];
896    MachineOperand &MO = MI->getOperand(OpIdx);
897    // FIXME: fold subreg use.
898    if (MO.getSubReg())
899      return true;
900    if (MO.isDef())
901      MRInfo |= (unsigned)VirtRegMap::isMod;
902    else {
903      // Filter out two-address use operand(s).
904      if (MI->isRegTiedToDefOperand(OpIdx)) {
905        MRInfo = VirtRegMap::isModRef;
906        continue;
907      }
908      MRInfo |= (unsigned)VirtRegMap::isRef;
909    }
910    FoldOps.push_back(OpIdx);
911  }
912  return false;
913}
914
915
916/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
917/// slot / to reg or any rematerialized load into ith operand of specified
918/// MI. If it is successul, MI is updated with the newly created MI and
919/// returns true.
920bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
921                                         VirtRegMap &vrm, MachineInstr *DefMI,
922                                         SlotIndex InstrIdx,
923                                         SmallVector<unsigned, 2> &Ops,
924                                         bool isSS, int Slot, unsigned Reg) {
925  // If it is an implicit def instruction, just delete it.
926  if (MI->isImplicitDef()) {
927    RemoveMachineInstrFromMaps(MI);
928    vrm.RemoveMachineInstrFromMaps(MI);
929    MI->eraseFromParent();
930    ++numFolds;
931    return true;
932  }
933
934  // Filter the list of operand indexes that are to be folded. Abort if
935  // any operand will prevent folding.
936  unsigned MRInfo = 0;
937  SmallVector<unsigned, 2> FoldOps;
938  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
939    return false;
940
941  // The only time it's safe to fold into a two address instruction is when
942  // it's folding reload and spill from / into a spill stack slot.
943  if (DefMI && (MRInfo & VirtRegMap::isMod))
944    return false;
945
946  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
947                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
948  if (fmi) {
949    // Remember this instruction uses the spill slot.
950    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
951
952    // Attempt to fold the memory reference into the instruction. If
953    // we can do this, we don't need to insert spill code.
954    MachineBasicBlock &MBB = *MI->getParent();
955    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
956      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
957    vrm.transferSpillPts(MI, fmi);
958    vrm.transferRestorePts(MI, fmi);
959    vrm.transferEmergencySpills(MI, fmi);
960    ReplaceMachineInstrInMaps(MI, fmi);
961    MI = MBB.insert(MBB.erase(MI), fmi);
962    ++numFolds;
963    return true;
964  }
965  return false;
966}
967
968/// canFoldMemoryOperand - Returns true if the specified load / store
969/// folding is possible.
970bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
971                                         SmallVector<unsigned, 2> &Ops,
972                                         bool ReMat) const {
973  // Filter the list of operand indexes that are to be folded. Abort if
974  // any operand will prevent folding.
975  unsigned MRInfo = 0;
976  SmallVector<unsigned, 2> FoldOps;
977  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
978    return false;
979
980  // It's only legal to remat for a use, not a def.
981  if (ReMat && (MRInfo & VirtRegMap::isMod))
982    return false;
983
984  return tii_->canFoldMemoryOperand(MI, FoldOps);
985}
986
987bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
988  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
989
990  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
991
992  if (mbb == 0)
993    return false;
994
995  for (++itr; itr != li.ranges.end(); ++itr) {
996    MachineBasicBlock *mbb2 =
997      indexes_->getMBBCoveringRange(itr->start, itr->end);
998
999    if (mbb2 != mbb)
1000      return false;
1001  }
1002
1003  return true;
1004}
1005
1006/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1007/// interval on to-be re-materialized operands of MI) with new register.
1008void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1009                                       MachineInstr *MI, unsigned NewVReg,
1010                                       VirtRegMap &vrm) {
1011  // There is an implicit use. That means one of the other operand is
1012  // being remat'ed and the remat'ed instruction has li.reg as an
1013  // use operand. Make sure we rewrite that as well.
1014  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1015    MachineOperand &MO = MI->getOperand(i);
1016    if (!MO.isReg())
1017      continue;
1018    unsigned Reg = MO.getReg();
1019    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1020      continue;
1021    if (!vrm.isReMaterialized(Reg))
1022      continue;
1023    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1024    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1025    if (UseMO)
1026      UseMO->setReg(NewVReg);
1027  }
1028}
1029
1030/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1031/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1032bool LiveIntervals::
1033rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1034                 bool TrySplit, SlotIndex index, SlotIndex end,
1035                 MachineInstr *MI,
1036                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1037                 unsigned Slot, int LdSlot,
1038                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1039                 VirtRegMap &vrm,
1040                 const TargetRegisterClass* rc,
1041                 SmallVector<int, 4> &ReMatIds,
1042                 const MachineLoopInfo *loopInfo,
1043                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1044                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1045                 std::vector<LiveInterval*> &NewLIs) {
1046  bool CanFold = false;
1047 RestartInstruction:
1048  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1049    MachineOperand& mop = MI->getOperand(i);
1050    if (!mop.isReg())
1051      continue;
1052    unsigned Reg = mop.getReg();
1053    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1054      continue;
1055    if (Reg != li.reg)
1056      continue;
1057
1058    bool TryFold = !DefIsReMat;
1059    bool FoldSS = true; // Default behavior unless it's a remat.
1060    int FoldSlot = Slot;
1061    if (DefIsReMat) {
1062      // If this is the rematerializable definition MI itself and
1063      // all of its uses are rematerialized, simply delete it.
1064      if (MI == ReMatOrigDefMI && CanDelete) {
1065        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1066                     << *MI << '\n');
1067        RemoveMachineInstrFromMaps(MI);
1068        vrm.RemoveMachineInstrFromMaps(MI);
1069        MI->eraseFromParent();
1070        break;
1071      }
1072
1073      // If def for this use can't be rematerialized, then try folding.
1074      // If def is rematerializable and it's a load, also try folding.
1075      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1076      if (isLoad) {
1077        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1078        FoldSS = isLoadSS;
1079        FoldSlot = LdSlot;
1080      }
1081    }
1082
1083    // Scan all of the operands of this instruction rewriting operands
1084    // to use NewVReg instead of li.reg as appropriate.  We do this for
1085    // two reasons:
1086    //
1087    //   1. If the instr reads the same spilled vreg multiple times, we
1088    //      want to reuse the NewVReg.
1089    //   2. If the instr is a two-addr instruction, we are required to
1090    //      keep the src/dst regs pinned.
1091    //
1092    // Keep track of whether we replace a use and/or def so that we can
1093    // create the spill interval with the appropriate range.
1094    SmallVector<unsigned, 2> Ops;
1095    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1096
1097    // Create a new virtual register for the spill interval.
1098    // Create the new register now so we can map the fold instruction
1099    // to the new register so when it is unfolded we get the correct
1100    // answer.
1101    bool CreatedNewVReg = false;
1102    if (NewVReg == 0) {
1103      NewVReg = mri_->createVirtualRegister(rc);
1104      vrm.grow();
1105      CreatedNewVReg = true;
1106
1107      // The new virtual register should get the same allocation hints as the
1108      // old one.
1109      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1110      if (Hint.first || Hint.second)
1111        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1112    }
1113
1114    if (!TryFold)
1115      CanFold = false;
1116    else {
1117      // Do not fold load / store here if we are splitting. We'll find an
1118      // optimal point to insert a load / store later.
1119      if (!TrySplit) {
1120        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1121                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1122          // Folding the load/store can completely change the instruction in
1123          // unpredictable ways, rescan it from the beginning.
1124
1125          if (FoldSS) {
1126            // We need to give the new vreg the same stack slot as the
1127            // spilled interval.
1128            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1129          }
1130
1131          HasUse = false;
1132          HasDef = false;
1133          CanFold = false;
1134          if (isNotInMIMap(MI))
1135            break;
1136          goto RestartInstruction;
1137        }
1138      } else {
1139        // We'll try to fold it later if it's profitable.
1140        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1141      }
1142    }
1143
1144    mop.setReg(NewVReg);
1145    if (mop.isImplicit())
1146      rewriteImplicitOps(li, MI, NewVReg, vrm);
1147
1148    // Reuse NewVReg for other reads.
1149    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1150      MachineOperand &mopj = MI->getOperand(Ops[j]);
1151      mopj.setReg(NewVReg);
1152      if (mopj.isImplicit())
1153        rewriteImplicitOps(li, MI, NewVReg, vrm);
1154    }
1155
1156    if (CreatedNewVReg) {
1157      if (DefIsReMat) {
1158        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1159        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1160          // Each valnum may have its own remat id.
1161          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1162        } else {
1163          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1164        }
1165        if (!CanDelete || (HasUse && HasDef)) {
1166          // If this is a two-addr instruction then its use operands are
1167          // rematerializable but its def is not. It should be assigned a
1168          // stack slot.
1169          vrm.assignVirt2StackSlot(NewVReg, Slot);
1170        }
1171      } else {
1172        vrm.assignVirt2StackSlot(NewVReg, Slot);
1173      }
1174    } else if (HasUse && HasDef &&
1175               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1176      // If this interval hasn't been assigned a stack slot (because earlier
1177      // def is a deleted remat def), do it now.
1178      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1179      vrm.assignVirt2StackSlot(NewVReg, Slot);
1180    }
1181
1182    // Re-matting an instruction with virtual register use. Add the
1183    // register as an implicit use on the use MI.
1184    if (DefIsReMat && ImpUse)
1185      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1186
1187    // Create a new register interval for this spill / remat.
1188    LiveInterval &nI = getOrCreateInterval(NewVReg);
1189    if (CreatedNewVReg) {
1190      NewLIs.push_back(&nI);
1191      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1192      if (TrySplit)
1193        vrm.setIsSplitFromReg(NewVReg, li.reg);
1194    }
1195
1196    if (HasUse) {
1197      if (CreatedNewVReg) {
1198        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1199                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1200        DEBUG(dbgs() << " +" << LR);
1201        nI.addRange(LR);
1202      } else {
1203        // Extend the split live interval to this def / use.
1204        SlotIndex End = index.getDefIndex();
1205        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1206                     nI.getValNumInfo(nI.getNumValNums()-1));
1207        DEBUG(dbgs() << " +" << LR);
1208        nI.addRange(LR);
1209      }
1210    }
1211    if (HasDef) {
1212      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1213                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1214      DEBUG(dbgs() << " +" << LR);
1215      nI.addRange(LR);
1216    }
1217
1218    DEBUG({
1219        dbgs() << "\t\t\t\tAdded new interval: ";
1220        nI.print(dbgs(), tri_);
1221        dbgs() << '\n';
1222      });
1223  }
1224  return CanFold;
1225}
1226bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1227                                   const VNInfo *VNI,
1228                                   MachineBasicBlock *MBB,
1229                                   SlotIndex Idx) const {
1230  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1231}
1232
1233/// RewriteInfo - Keep track of machine instrs that will be rewritten
1234/// during spilling.
1235namespace {
1236  struct RewriteInfo {
1237    SlotIndex Index;
1238    MachineInstr *MI;
1239    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1240  };
1241
1242  struct RewriteInfoCompare {
1243    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1244      return LHS.Index < RHS.Index;
1245    }
1246  };
1247}
1248
1249void LiveIntervals::
1250rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1251                    LiveInterval::Ranges::const_iterator &I,
1252                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1253                    unsigned Slot, int LdSlot,
1254                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1255                    VirtRegMap &vrm,
1256                    const TargetRegisterClass* rc,
1257                    SmallVector<int, 4> &ReMatIds,
1258                    const MachineLoopInfo *loopInfo,
1259                    BitVector &SpillMBBs,
1260                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1261                    BitVector &RestoreMBBs,
1262                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1263                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1264                    std::vector<LiveInterval*> &NewLIs) {
1265  bool AllCanFold = true;
1266  unsigned NewVReg = 0;
1267  SlotIndex start = I->start.getBaseIndex();
1268  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1269
1270  // First collect all the def / use in this live range that will be rewritten.
1271  // Make sure they are sorted according to instruction index.
1272  std::vector<RewriteInfo> RewriteMIs;
1273  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1274         re = mri_->reg_end(); ri != re; ) {
1275    MachineInstr *MI = &*ri;
1276    MachineOperand &O = ri.getOperand();
1277    ++ri;
1278    if (MI->isDebugValue()) {
1279      // Modify DBG_VALUE now that the value is in a spill slot.
1280      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1281        uint64_t Offset = MI->getOperand(1).getImm();
1282        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1283        DebugLoc DL = MI->getDebugLoc();
1284        int FI = isLoadSS ? LdSlot : (int)Slot;
1285        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1286                                                           Offset, MDPtr, DL)) {
1287          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1288          ReplaceMachineInstrInMaps(MI, NewDV);
1289          MachineBasicBlock *MBB = MI->getParent();
1290          MBB->insert(MBB->erase(MI), NewDV);
1291          continue;
1292        }
1293      }
1294
1295      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1296      RemoveMachineInstrFromMaps(MI);
1297      vrm.RemoveMachineInstrFromMaps(MI);
1298      MI->eraseFromParent();
1299      continue;
1300    }
1301    assert(!(O.isImplicit() && O.isUse()) &&
1302           "Spilling register that's used as implicit use?");
1303    SlotIndex index = getInstructionIndex(MI);
1304    if (index < start || index >= end)
1305      continue;
1306
1307    if (O.isUndef())
1308      // Must be defined by an implicit def. It should not be spilled. Note,
1309      // this is for correctness reason. e.g.
1310      // 8   %reg1024<def> = IMPLICIT_DEF
1311      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1312      // The live range [12, 14) are not part of the r1024 live interval since
1313      // it's defined by an implicit def. It will not conflicts with live
1314      // interval of r1025. Now suppose both registers are spilled, you can
1315      // easily see a situation where both registers are reloaded before
1316      // the INSERT_SUBREG and both target registers that would overlap.
1317      continue;
1318    RewriteMIs.push_back(RewriteInfo(index, MI));
1319  }
1320  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1321
1322  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1323  // Now rewrite the defs and uses.
1324  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1325    RewriteInfo &rwi = RewriteMIs[i];
1326    ++i;
1327    SlotIndex index = rwi.Index;
1328    MachineInstr *MI = rwi.MI;
1329    // If MI def and/or use the same register multiple times, then there
1330    // are multiple entries.
1331    while (i != e && RewriteMIs[i].MI == MI) {
1332      assert(RewriteMIs[i].Index == index);
1333      ++i;
1334    }
1335    MachineBasicBlock *MBB = MI->getParent();
1336
1337    if (ImpUse && MI != ReMatDefMI) {
1338      // Re-matting an instruction with virtual register use. Prevent interval
1339      // from being spilled.
1340      getInterval(ImpUse).markNotSpillable();
1341    }
1342
1343    unsigned MBBId = MBB->getNumber();
1344    unsigned ThisVReg = 0;
1345    if (TrySplit) {
1346      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1347      if (NVI != MBBVRegsMap.end()) {
1348        ThisVReg = NVI->second;
1349        // One common case:
1350        // x = use
1351        // ...
1352        // ...
1353        // def = ...
1354        //     = use
1355        // It's better to start a new interval to avoid artifically
1356        // extend the new interval.
1357        if (MI->readsWritesVirtualRegister(li.reg) ==
1358            std::make_pair(false,true)) {
1359          MBBVRegsMap.erase(MBB->getNumber());
1360          ThisVReg = 0;
1361        }
1362      }
1363    }
1364
1365    bool IsNew = ThisVReg == 0;
1366    if (IsNew) {
1367      // This ends the previous live interval. If all of its def / use
1368      // can be folded, give it a low spill weight.
1369      if (NewVReg && TrySplit && AllCanFold) {
1370        LiveInterval &nI = getOrCreateInterval(NewVReg);
1371        nI.weight /= 10.0F;
1372      }
1373      AllCanFold = true;
1374    }
1375    NewVReg = ThisVReg;
1376
1377    bool HasDef = false;
1378    bool HasUse = false;
1379    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1380                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1381                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1382                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1383                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1384    if (!HasDef && !HasUse)
1385      continue;
1386
1387    AllCanFold &= CanFold;
1388
1389    // Update weight of spill interval.
1390    LiveInterval &nI = getOrCreateInterval(NewVReg);
1391    if (!TrySplit) {
1392      // The spill weight is now infinity as it cannot be spilled again.
1393      nI.markNotSpillable();
1394      continue;
1395    }
1396
1397    // Keep track of the last def and first use in each MBB.
1398    if (HasDef) {
1399      if (MI != ReMatOrigDefMI || !CanDelete) {
1400        bool HasKill = false;
1401        if (!HasUse)
1402          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1403        else {
1404          // If this is a two-address code, then this index starts a new VNInfo.
1405          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1406          if (VNI)
1407            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1408        }
1409        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1410          SpillIdxes.find(MBBId);
1411        if (!HasKill) {
1412          if (SII == SpillIdxes.end()) {
1413            std::vector<SRInfo> S;
1414            S.push_back(SRInfo(index, NewVReg, true));
1415            SpillIdxes.insert(std::make_pair(MBBId, S));
1416          } else if (SII->second.back().vreg != NewVReg) {
1417            SII->second.push_back(SRInfo(index, NewVReg, true));
1418          } else if (index > SII->second.back().index) {
1419            // If there is an earlier def and this is a two-address
1420            // instruction, then it's not possible to fold the store (which
1421            // would also fold the load).
1422            SRInfo &Info = SII->second.back();
1423            Info.index = index;
1424            Info.canFold = !HasUse;
1425          }
1426          SpillMBBs.set(MBBId);
1427        } else if (SII != SpillIdxes.end() &&
1428                   SII->second.back().vreg == NewVReg &&
1429                   index > SII->second.back().index) {
1430          // There is an earlier def that's not killed (must be two-address).
1431          // The spill is no longer needed.
1432          SII->second.pop_back();
1433          if (SII->second.empty()) {
1434            SpillIdxes.erase(MBBId);
1435            SpillMBBs.reset(MBBId);
1436          }
1437        }
1438      }
1439    }
1440
1441    if (HasUse) {
1442      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1443        SpillIdxes.find(MBBId);
1444      if (SII != SpillIdxes.end() &&
1445          SII->second.back().vreg == NewVReg &&
1446          index > SII->second.back().index)
1447        // Use(s) following the last def, it's not safe to fold the spill.
1448        SII->second.back().canFold = false;
1449      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1450        RestoreIdxes.find(MBBId);
1451      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1452        // If we are splitting live intervals, only fold if it's the first
1453        // use and there isn't another use later in the MBB.
1454        RII->second.back().canFold = false;
1455      else if (IsNew) {
1456        // Only need a reload if there isn't an earlier def / use.
1457        if (RII == RestoreIdxes.end()) {
1458          std::vector<SRInfo> Infos;
1459          Infos.push_back(SRInfo(index, NewVReg, true));
1460          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1461        } else {
1462          RII->second.push_back(SRInfo(index, NewVReg, true));
1463        }
1464        RestoreMBBs.set(MBBId);
1465      }
1466    }
1467
1468    // Update spill weight.
1469    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1470    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1471  }
1472
1473  if (NewVReg && TrySplit && AllCanFold) {
1474    // If all of its def / use can be folded, give it a low spill weight.
1475    LiveInterval &nI = getOrCreateInterval(NewVReg);
1476    nI.weight /= 10.0F;
1477  }
1478}
1479
1480bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1481                        unsigned vr, BitVector &RestoreMBBs,
1482                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1483  if (!RestoreMBBs[Id])
1484    return false;
1485  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1486  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1487    if (Restores[i].index == index &&
1488        Restores[i].vreg == vr &&
1489        Restores[i].canFold)
1490      return true;
1491  return false;
1492}
1493
1494void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1495                        unsigned vr, BitVector &RestoreMBBs,
1496                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1497  if (!RestoreMBBs[Id])
1498    return;
1499  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1500  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1501    if (Restores[i].index == index && Restores[i].vreg)
1502      Restores[i].index = SlotIndex();
1503}
1504
1505/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1506/// spilled and create empty intervals for their uses.
1507void
1508LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1509                                    const TargetRegisterClass* rc,
1510                                    std::vector<LiveInterval*> &NewLIs) {
1511  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1512         re = mri_->reg_end(); ri != re; ) {
1513    MachineOperand &O = ri.getOperand();
1514    MachineInstr *MI = &*ri;
1515    ++ri;
1516    if (MI->isDebugValue()) {
1517      // Remove debug info for now.
1518      O.setReg(0U);
1519      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1520      continue;
1521    }
1522    if (O.isDef()) {
1523      assert(MI->isImplicitDef() &&
1524             "Register def was not rewritten?");
1525      RemoveMachineInstrFromMaps(MI);
1526      vrm.RemoveMachineInstrFromMaps(MI);
1527      MI->eraseFromParent();
1528    } else {
1529      // This must be an use of an implicit_def so it's not part of the live
1530      // interval. Create a new empty live interval for it.
1531      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1532      unsigned NewVReg = mri_->createVirtualRegister(rc);
1533      vrm.grow();
1534      vrm.setIsImplicitlyDefined(NewVReg);
1535      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1536      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1537        MachineOperand &MO = MI->getOperand(i);
1538        if (MO.isReg() && MO.getReg() == li.reg) {
1539          MO.setReg(NewVReg);
1540          MO.setIsUndef();
1541        }
1542      }
1543    }
1544  }
1545}
1546
1547float
1548LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1549  // Limit the loop depth ridiculousness.
1550  if (loopDepth > 200)
1551    loopDepth = 200;
1552
1553  // The loop depth is used to roughly estimate the number of times the
1554  // instruction is executed. Something like 10^d is simple, but will quickly
1555  // overflow a float. This expression behaves like 10^d for small d, but is
1556  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1557  // headroom before overflow.
1558  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1559
1560  return (isDef + isUse) * lc;
1561}
1562
1563void
1564LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1565  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1566    normalizeSpillWeight(*NewLIs[i]);
1567}
1568
1569std::vector<LiveInterval*> LiveIntervals::
1570addIntervalsForSpills(const LiveInterval &li,
1571                      SmallVectorImpl<LiveInterval*> &SpillIs,
1572                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1573  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1574
1575  DEBUG({
1576      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1577      li.print(dbgs(), tri_);
1578      dbgs() << '\n';
1579    });
1580
1581  // Each bit specify whether a spill is required in the MBB.
1582  BitVector SpillMBBs(mf_->getNumBlockIDs());
1583  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1584  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1585  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1586  DenseMap<unsigned,unsigned> MBBVRegsMap;
1587  std::vector<LiveInterval*> NewLIs;
1588  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1589
1590  unsigned NumValNums = li.getNumValNums();
1591  SmallVector<MachineInstr*, 4> ReMatDefs;
1592  ReMatDefs.resize(NumValNums, NULL);
1593  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1594  ReMatOrigDefs.resize(NumValNums, NULL);
1595  SmallVector<int, 4> ReMatIds;
1596  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1597  BitVector ReMatDelete(NumValNums);
1598  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1599
1600  // Spilling a split live interval. It cannot be split any further. Also,
1601  // it's also guaranteed to be a single val# / range interval.
1602  if (vrm.getPreSplitReg(li.reg)) {
1603    vrm.setIsSplitFromReg(li.reg, 0);
1604    // Unset the split kill marker on the last use.
1605    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1606    if (KillIdx != SlotIndex()) {
1607      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1608      assert(KillMI && "Last use disappeared?");
1609      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1610      assert(KillOp != -1 && "Last use disappeared?");
1611      KillMI->getOperand(KillOp).setIsKill(false);
1612    }
1613    vrm.removeKillPoint(li.reg);
1614    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1615    Slot = vrm.getStackSlot(li.reg);
1616    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1617    MachineInstr *ReMatDefMI = DefIsReMat ?
1618      vrm.getReMaterializedMI(li.reg) : NULL;
1619    int LdSlot = 0;
1620    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1621    bool isLoad = isLoadSS ||
1622      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1623    bool IsFirstRange = true;
1624    for (LiveInterval::Ranges::const_iterator
1625           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1626      // If this is a split live interval with multiple ranges, it means there
1627      // are two-address instructions that re-defined the value. Only the
1628      // first def can be rematerialized!
1629      if (IsFirstRange) {
1630        // Note ReMatOrigDefMI has already been deleted.
1631        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1632                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1633                             false, vrm, rc, ReMatIds, loopInfo,
1634                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635                             MBBVRegsMap, NewLIs);
1636      } else {
1637        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1638                             Slot, 0, false, false, false,
1639                             false, vrm, rc, ReMatIds, loopInfo,
1640                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1641                             MBBVRegsMap, NewLIs);
1642      }
1643      IsFirstRange = false;
1644    }
1645
1646    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1647    normalizeSpillWeights(NewLIs);
1648    return NewLIs;
1649  }
1650
1651  bool TrySplit = !intervalIsInOneMBB(li);
1652  if (TrySplit)
1653    ++numSplits;
1654  bool NeedStackSlot = false;
1655  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1656       i != e; ++i) {
1657    const VNInfo *VNI = *i;
1658    unsigned VN = VNI->id;
1659    if (VNI->isUnused())
1660      continue; // Dead val#.
1661    // Is the def for the val# rematerializable?
1662    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1663      ? getInstructionFromIndex(VNI->def) : 0;
1664    bool dummy;
1665    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1666      // Remember how to remat the def of this val#.
1667      ReMatOrigDefs[VN] = ReMatDefMI;
1668      // Original def may be modified so we have to make a copy here.
1669      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1670      CloneMIs.push_back(Clone);
1671      ReMatDefs[VN] = Clone;
1672
1673      bool CanDelete = true;
1674      if (VNI->hasPHIKill()) {
1675        // A kill is a phi node, not all of its uses can be rematerialized.
1676        // It must not be deleted.
1677        CanDelete = false;
1678        // Need a stack slot if there is any live range where uses cannot be
1679        // rematerialized.
1680        NeedStackSlot = true;
1681      }
1682      if (CanDelete)
1683        ReMatDelete.set(VN);
1684    } else {
1685      // Need a stack slot if there is any live range where uses cannot be
1686      // rematerialized.
1687      NeedStackSlot = true;
1688    }
1689  }
1690
1691  // One stack slot per live interval.
1692  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1693    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1694      Slot = vrm.assignVirt2StackSlot(li.reg);
1695
1696    // This case only occurs when the prealloc splitter has already assigned
1697    // a stack slot to this vreg.
1698    else
1699      Slot = vrm.getStackSlot(li.reg);
1700  }
1701
1702  // Create new intervals and rewrite defs and uses.
1703  for (LiveInterval::Ranges::const_iterator
1704         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1705    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1706    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1707    bool DefIsReMat = ReMatDefMI != NULL;
1708    bool CanDelete = ReMatDelete[I->valno->id];
1709    int LdSlot = 0;
1710    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1711    bool isLoad = isLoadSS ||
1712      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1713    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1714                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1715                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1716                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1717                               MBBVRegsMap, NewLIs);
1718  }
1719
1720  // Insert spills / restores if we are splitting.
1721  if (!TrySplit) {
1722    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1723    normalizeSpillWeights(NewLIs);
1724    return NewLIs;
1725  }
1726
1727  SmallPtrSet<LiveInterval*, 4> AddedKill;
1728  SmallVector<unsigned, 2> Ops;
1729  if (NeedStackSlot) {
1730    int Id = SpillMBBs.find_first();
1731    while (Id != -1) {
1732      std::vector<SRInfo> &spills = SpillIdxes[Id];
1733      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1734        SlotIndex index = spills[i].index;
1735        unsigned VReg = spills[i].vreg;
1736        LiveInterval &nI = getOrCreateInterval(VReg);
1737        bool isReMat = vrm.isReMaterialized(VReg);
1738        MachineInstr *MI = getInstructionFromIndex(index);
1739        bool CanFold = false;
1740        bool FoundUse = false;
1741        Ops.clear();
1742        if (spills[i].canFold) {
1743          CanFold = true;
1744          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1745            MachineOperand &MO = MI->getOperand(j);
1746            if (!MO.isReg() || MO.getReg() != VReg)
1747              continue;
1748
1749            Ops.push_back(j);
1750            if (MO.isDef())
1751              continue;
1752            if (isReMat ||
1753                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1754                                                RestoreMBBs, RestoreIdxes))) {
1755              // MI has two-address uses of the same register. If the use
1756              // isn't the first and only use in the BB, then we can't fold
1757              // it. FIXME: Move this to rewriteInstructionsForSpills.
1758              CanFold = false;
1759              break;
1760            }
1761            FoundUse = true;
1762          }
1763        }
1764        // Fold the store into the def if possible.
1765        bool Folded = false;
1766        if (CanFold && !Ops.empty()) {
1767          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1768            Folded = true;
1769            if (FoundUse) {
1770              // Also folded uses, do not issue a load.
1771              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1772              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1773            }
1774            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1775          }
1776        }
1777
1778        // Otherwise tell the spiller to issue a spill.
1779        if (!Folded) {
1780          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1781          bool isKill = LR->end == index.getStoreIndex();
1782          if (!MI->registerDefIsDead(nI.reg))
1783            // No need to spill a dead def.
1784            vrm.addSpillPoint(VReg, isKill, MI);
1785          if (isKill)
1786            AddedKill.insert(&nI);
1787        }
1788      }
1789      Id = SpillMBBs.find_next(Id);
1790    }
1791  }
1792
1793  int Id = RestoreMBBs.find_first();
1794  while (Id != -1) {
1795    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1796    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1797      SlotIndex index = restores[i].index;
1798      if (index == SlotIndex())
1799        continue;
1800      unsigned VReg = restores[i].vreg;
1801      LiveInterval &nI = getOrCreateInterval(VReg);
1802      bool isReMat = vrm.isReMaterialized(VReg);
1803      MachineInstr *MI = getInstructionFromIndex(index);
1804      bool CanFold = false;
1805      Ops.clear();
1806      if (restores[i].canFold) {
1807        CanFold = true;
1808        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1809          MachineOperand &MO = MI->getOperand(j);
1810          if (!MO.isReg() || MO.getReg() != VReg)
1811            continue;
1812
1813          if (MO.isDef()) {
1814            // If this restore were to be folded, it would have been folded
1815            // already.
1816            CanFold = false;
1817            break;
1818          }
1819          Ops.push_back(j);
1820        }
1821      }
1822
1823      // Fold the load into the use if possible.
1824      bool Folded = false;
1825      if (CanFold && !Ops.empty()) {
1826        if (!isReMat)
1827          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1828        else {
1829          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1830          int LdSlot = 0;
1831          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1832          // If the rematerializable def is a load, also try to fold it.
1833          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1834            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1835                                          Ops, isLoadSS, LdSlot, VReg);
1836          if (!Folded) {
1837            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1838            if (ImpUse) {
1839              // Re-matting an instruction with virtual register use. Add the
1840              // register as an implicit use on the use MI and mark the register
1841              // interval as unspillable.
1842              LiveInterval &ImpLi = getInterval(ImpUse);
1843              ImpLi.markNotSpillable();
1844              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1845            }
1846          }
1847        }
1848      }
1849      // If folding is not possible / failed, then tell the spiller to issue a
1850      // load / rematerialization for us.
1851      if (Folded)
1852        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1853      else
1854        vrm.addRestorePoint(VReg, MI);
1855    }
1856    Id = RestoreMBBs.find_next(Id);
1857  }
1858
1859  // Finalize intervals: add kills, finalize spill weights, and filter out
1860  // dead intervals.
1861  std::vector<LiveInterval*> RetNewLIs;
1862  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1863    LiveInterval *LI = NewLIs[i];
1864    if (!LI->empty()) {
1865      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1866      if (!AddedKill.count(LI)) {
1867        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1868        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1869        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1870        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1871        assert(UseIdx != -1);
1872        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1873          LastUse->getOperand(UseIdx).setIsKill();
1874          vrm.addKillPoint(LI->reg, LastUseIdx);
1875        }
1876      }
1877      RetNewLIs.push_back(LI);
1878    }
1879  }
1880
1881  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1882  normalizeSpillWeights(RetNewLIs);
1883  return RetNewLIs;
1884}
1885
1886/// hasAllocatableSuperReg - Return true if the specified physical register has
1887/// any super register that's allocatable.
1888bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1889  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1890    if (allocatableRegs_[*AS] && hasInterval(*AS))
1891      return true;
1892  return false;
1893}
1894
1895/// getRepresentativeReg - Find the largest super register of the specified
1896/// physical register.
1897unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1898  // Find the largest super-register that is allocatable.
1899  unsigned BestReg = Reg;
1900  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1901    unsigned SuperReg = *AS;
1902    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1903      BestReg = SuperReg;
1904      break;
1905    }
1906  }
1907  return BestReg;
1908}
1909
1910/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1911/// specified interval that conflicts with the specified physical register.
1912unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1913                                                   unsigned PhysReg) const {
1914  unsigned NumConflicts = 0;
1915  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1916  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1917         E = mri_->reg_end(); I != E; ++I) {
1918    MachineOperand &O = I.getOperand();
1919    MachineInstr *MI = O.getParent();
1920    if (MI->isDebugValue())
1921      continue;
1922    SlotIndex Index = getInstructionIndex(MI);
1923    if (pli.liveAt(Index))
1924      ++NumConflicts;
1925  }
1926  return NumConflicts;
1927}
1928
1929/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1930/// around all defs and uses of the specified interval. Return true if it
1931/// was able to cut its interval.
1932bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1933                                            unsigned PhysReg, VirtRegMap &vrm) {
1934  unsigned SpillReg = getRepresentativeReg(PhysReg);
1935
1936  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1937    // If there are registers which alias PhysReg, but which are not a
1938    // sub-register of the chosen representative super register. Assert
1939    // since we can't handle it yet.
1940    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1941           tri_->isSuperRegister(*AS, SpillReg));
1942
1943  bool Cut = false;
1944  SmallVector<unsigned, 4> PRegs;
1945  if (hasInterval(SpillReg))
1946    PRegs.push_back(SpillReg);
1947  else {
1948    SmallSet<unsigned, 4> Added;
1949    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1950      if (Added.insert(*AS) && hasInterval(*AS)) {
1951        PRegs.push_back(*AS);
1952        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1953          Added.insert(*ASS);
1954      }
1955  }
1956
1957  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1958  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1959         E = mri_->reg_end(); I != E; ++I) {
1960    MachineOperand &O = I.getOperand();
1961    MachineInstr *MI = O.getParent();
1962    if (MI->isDebugValue() || SeenMIs.count(MI))
1963      continue;
1964    SeenMIs.insert(MI);
1965    SlotIndex Index = getInstructionIndex(MI);
1966    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1967      unsigned PReg = PRegs[i];
1968      LiveInterval &pli = getInterval(PReg);
1969      if (!pli.liveAt(Index))
1970        continue;
1971      vrm.addEmergencySpill(PReg, MI);
1972      SlotIndex StartIdx = Index.getLoadIndex();
1973      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1974      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1975        pli.removeRange(StartIdx, EndIdx);
1976        Cut = true;
1977      } else {
1978        std::string msg;
1979        raw_string_ostream Msg(msg);
1980        Msg << "Ran out of registers during register allocation!";
1981        if (MI->isInlineAsm()) {
1982          Msg << "\nPlease check your inline asm statement for invalid "
1983              << "constraints:\n";
1984          MI->print(Msg, tm_);
1985        }
1986        report_fatal_error(Msg.str());
1987      }
1988      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1989        if (!hasInterval(*AS))
1990          continue;
1991        LiveInterval &spli = getInterval(*AS);
1992        if (spli.liveAt(Index))
1993          spli.removeRange(Index.getLoadIndex(),
1994                           Index.getNextIndex().getBaseIndex());
1995      }
1996    }
1997  }
1998  return Cut;
1999}
2000
2001LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2002                                                  MachineInstr* startInst) {
2003  LiveInterval& Interval = getOrCreateInterval(reg);
2004  VNInfo* VN = Interval.getNextValue(
2005    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2006    startInst, true, getVNInfoAllocator());
2007  VN->setHasPHIKill(true);
2008  LiveRange LR(
2009     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2010     getMBBEndIdx(startInst->getParent()), VN);
2011  Interval.addRange(LR);
2012
2013  return LR;
2014}
2015
2016