LiveIntervalAnalysis.cpp revision e9157e10cd2be2b3dbaf26baa925765562819266
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53STATISTIC(numIntervals , "Number of original intervals");
54STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits    , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59                "Live Interval Analysis", false, false)
60INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68                "Live Interval Analysis", false, false)
69
70void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71  AU.setPreservesCFG();
72  AU.addRequired<AliasAnalysis>();
73  AU.addPreserved<AliasAnalysis>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreserved<LiveVariables>();
76  AU.addRequired<MachineLoopInfo>();
77  AU.addPreserved<MachineLoopInfo>();
78  AU.addPreservedID(MachineDominatorsID);
79
80  if (!StrongPHIElim) {
81    AU.addPreservedID(PHIEliminationID);
82    AU.addRequiredID(PHIEliminationID);
83  }
84
85  AU.addRequiredID(TwoAddressInstructionPassID);
86  AU.addPreserved<ProcessImplicitDefs>();
87  AU.addRequired<ProcessImplicitDefs>();
88  AU.addPreserved<SlotIndexes>();
89  AU.addRequiredTransitive<SlotIndexes>();
90  MachineFunctionPass::getAnalysisUsage(AU);
91}
92
93void LiveIntervals::releaseMemory() {
94  // Free the live intervals themselves.
95  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96       E = r2iMap_.end(); I != E; ++I)
97    delete I->second;
98
99  r2iMap_.clear();
100
101  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102  VNInfoAllocator.Reset();
103  while (!CloneMIs.empty()) {
104    MachineInstr *MI = CloneMIs.back();
105    CloneMIs.pop_back();
106    mf_->DeleteMachineInstr(MI);
107  }
108}
109
110/// runOnMachineFunction - Register allocate the whole function
111///
112bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113  mf_ = &fn;
114  mri_ = &mf_->getRegInfo();
115  tm_ = &fn.getTarget();
116  tri_ = tm_->getRegisterInfo();
117  tii_ = tm_->getInstrInfo();
118  aa_ = &getAnalysis<AliasAnalysis>();
119  lv_ = &getAnalysis<LiveVariables>();
120  indexes_ = &getAnalysis<SlotIndexes>();
121  allocatableRegs_ = tri_->getAllocatableSet(fn);
122
123  computeIntervals();
124
125  numIntervals += getNumIntervals();
126
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133  OS << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second->print(OS, tri_);
136    OS << "\n";
137  }
138
139  printInstrs(OS);
140}
141
142void LiveIntervals::printInstrs(raw_ostream &OS) const {
143  OS << "********** MACHINEINSTRS **********\n";
144  mf_->print(OS, indexes_);
145}
146
147void LiveIntervals::dumpInstrs() const {
148  printInstrs(dbgs());
149}
150
151bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
152                                         VirtRegMap &vrm, unsigned reg) {
153  // We don't handle fancy stuff crossing basic block boundaries
154  if (li.ranges.size() != 1)
155    return true;
156  const LiveRange &range = li.ranges.front();
157  SlotIndex idx = range.start.getBaseIndex();
158  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
159
160  // Skip deleted instructions
161  MachineInstr *firstMI = getInstructionFromIndex(idx);
162  while (!firstMI && idx != end) {
163    idx = idx.getNextIndex();
164    firstMI = getInstructionFromIndex(idx);
165  }
166  if (!firstMI)
167    return false;
168
169  // Find last instruction in range
170  SlotIndex lastIdx = end.getPrevIndex();
171  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
172  while (!lastMI && lastIdx != idx) {
173    lastIdx = lastIdx.getPrevIndex();
174    lastMI = getInstructionFromIndex(lastIdx);
175  }
176  if (!lastMI)
177    return false;
178
179  // Range cannot cross basic block boundaries or terminators
180  MachineBasicBlock *MBB = firstMI->getParent();
181  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
182    return true;
183
184  MachineBasicBlock::const_iterator E = lastMI;
185  ++E;
186  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
187    const MachineInstr &MI = *I;
188
189    // Allow copies to and from li.reg
190    if (MI.isCopy())
191      if (MI.getOperand(0).getReg() == li.reg ||
192          MI.getOperand(1).getReg() == li.reg)
193        continue;
194
195    // Check for operands using reg
196    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
197      const MachineOperand& mop = MI.getOperand(i);
198      if (!mop.isReg())
199        continue;
200      unsigned PhysReg = mop.getReg();
201      if (PhysReg == 0 || PhysReg == li.reg)
202        continue;
203      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
204        if (!vrm.hasPhys(PhysReg))
205          continue;
206        PhysReg = vrm.getPhys(PhysReg);
207      }
208      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
209        return true;
210    }
211  }
212
213  // No conflicts found.
214  return false;
215}
216
217bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
218                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
219  for (LiveInterval::Ranges::const_iterator
220         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
221    for (SlotIndex index = I->start.getBaseIndex(),
222           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
223           index != end;
224           index = index.getNextIndex()) {
225      MachineInstr *MI = getInstructionFromIndex(index);
226      if (!MI)
227        continue;               // skip deleted instructions
228
229      if (JoinedCopies.count(MI))
230        continue;
231      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
232        MachineOperand& MO = MI->getOperand(i);
233        if (!MO.isReg())
234          continue;
235        unsigned PhysReg = MO.getReg();
236        if (PhysReg == 0 || PhysReg == Reg ||
237            TargetRegisterInfo::isVirtualRegister(PhysReg))
238          continue;
239        if (tri_->regsOverlap(Reg, PhysReg))
240          return true;
241      }
242    }
243  }
244
245  return false;
246}
247
248#ifndef NDEBUG
249static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
250  if (TargetRegisterInfo::isPhysicalRegister(reg))
251    dbgs() << tri_->getName(reg);
252  else
253    dbgs() << "%reg" << reg;
254}
255#endif
256
257static
258bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
259  unsigned Reg = MI.getOperand(MOIdx).getReg();
260  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
261    const MachineOperand &MO = MI.getOperand(i);
262    if (!MO.isReg())
263      continue;
264    if (MO.getReg() == Reg && MO.isDef()) {
265      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
266             MI.getOperand(MOIdx).getSubReg() &&
267             (MO.getSubReg() || MO.isImplicit()));
268      return true;
269    }
270  }
271  return false;
272}
273
274/// isPartialRedef - Return true if the specified def at the specific index is
275/// partially re-defining the specified live interval. A common case of this is
276/// a definition of the sub-register.
277bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
278                                   LiveInterval &interval) {
279  if (!MO.getSubReg() || MO.isEarlyClobber())
280    return false;
281
282  SlotIndex RedefIndex = MIIdx.getDefIndex();
283  const LiveRange *OldLR =
284    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
285  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
286  if (DefMI != 0) {
287    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
288  }
289  return false;
290}
291
292void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
293                                             MachineBasicBlock::iterator mi,
294                                             SlotIndex MIIdx,
295                                             MachineOperand& MO,
296                                             unsigned MOIdx,
297                                             LiveInterval &interval) {
298  DEBUG({
299      dbgs() << "\t\tregister: ";
300      printRegName(interval.reg, tri_);
301    });
302
303  // Virtual registers may be defined multiple times (due to phi
304  // elimination and 2-addr elimination).  Much of what we do only has to be
305  // done once for the vreg.  We use an empty interval to detect the first
306  // time we see a vreg.
307  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
308  if (interval.empty()) {
309    // Get the Idx of the defining instructions.
310    SlotIndex defIndex = MIIdx.getDefIndex();
311    // Earlyclobbers move back one, so that they overlap the live range
312    // of inputs.
313    if (MO.isEarlyClobber())
314      defIndex = MIIdx.getUseIndex();
315
316    // Make sure the first definition is not a partial redefinition. Add an
317    // <imp-def> of the full register.
318    if (MO.getSubReg())
319      mi->addRegisterDefined(interval.reg);
320
321    MachineInstr *CopyMI = NULL;
322    if (mi->isCopyLike()) {
323      CopyMI = mi;
324    }
325
326    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
327    assert(ValNo->id == 0 && "First value in interval is not 0?");
328
329    // Loop over all of the blocks that the vreg is defined in.  There are
330    // two cases we have to handle here.  The most common case is a vreg
331    // whose lifetime is contained within a basic block.  In this case there
332    // will be a single kill, in MBB, which comes after the definition.
333    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
334      // FIXME: what about dead vars?
335      SlotIndex killIdx;
336      if (vi.Kills[0] != mi)
337        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
338      else
339        killIdx = defIndex.getStoreIndex();
340
341      // If the kill happens after the definition, we have an intra-block
342      // live range.
343      if (killIdx > defIndex) {
344        assert(vi.AliveBlocks.empty() &&
345               "Shouldn't be alive across any blocks!");
346        LiveRange LR(defIndex, killIdx, ValNo);
347        interval.addRange(LR);
348        DEBUG(dbgs() << " +" << LR << "\n");
349        return;
350      }
351    }
352
353    // The other case we handle is when a virtual register lives to the end
354    // of the defining block, potentially live across some blocks, then is
355    // live into some number of blocks, but gets killed.  Start by adding a
356    // range that goes from this definition to the end of the defining block.
357    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
358    DEBUG(dbgs() << " +" << NewLR);
359    interval.addRange(NewLR);
360
361    bool PHIJoin = lv_->isPHIJoin(interval.reg);
362
363    if (PHIJoin) {
364      // A phi join register is killed at the end of the MBB and revived as a new
365      // valno in the killing blocks.
366      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
367      DEBUG(dbgs() << " phi-join");
368      ValNo->setHasPHIKill(true);
369    } else {
370      // Iterate over all of the blocks that the variable is completely
371      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
372      // live interval.
373      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
374               E = vi.AliveBlocks.end(); I != E; ++I) {
375        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
376        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
377        interval.addRange(LR);
378        DEBUG(dbgs() << " +" << LR);
379      }
380    }
381
382    // Finally, this virtual register is live from the start of any killing
383    // block to the 'use' slot of the killing instruction.
384    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
385      MachineInstr *Kill = vi.Kills[i];
386      SlotIndex Start = getMBBStartIdx(Kill->getParent());
387      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
388
389      // Create interval with one of a NEW value number.  Note that this value
390      // number isn't actually defined by an instruction, weird huh? :)
391      if (PHIJoin) {
392        assert(getInstructionFromIndex(Start) == 0 &&
393               "PHI def index points at actual instruction.");
394        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
395        ValNo->setIsPHIDef(true);
396      }
397      LiveRange LR(Start, killIdx, ValNo);
398      interval.addRange(LR);
399      DEBUG(dbgs() << " +" << LR);
400    }
401
402  } else {
403    if (MultipleDefsBySameMI(*mi, MOIdx))
404      // Multiple defs of the same virtual register by the same instruction.
405      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
406      // This is likely due to elimination of REG_SEQUENCE instructions. Return
407      // here since there is nothing to do.
408      return;
409
410    // If this is the second time we see a virtual register definition, it
411    // must be due to phi elimination or two addr elimination.  If this is
412    // the result of two address elimination, then the vreg is one of the
413    // def-and-use register operand.
414
415    // It may also be partial redef like this:
416    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
417    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
418    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
419    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
420      // If this is a two-address definition, then we have already processed
421      // the live range.  The only problem is that we didn't realize there
422      // are actually two values in the live interval.  Because of this we
423      // need to take the LiveRegion that defines this register and split it
424      // into two values.
425      SlotIndex RedefIndex = MIIdx.getDefIndex();
426      if (MO.isEarlyClobber())
427        RedefIndex = MIIdx.getUseIndex();
428
429      const LiveRange *OldLR =
430        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
431      VNInfo *OldValNo = OldLR->valno;
432      SlotIndex DefIndex = OldValNo->def.getDefIndex();
433
434      // Delete the previous value, which should be short and continuous,
435      // because the 2-addr copy must be in the same MBB as the redef.
436      interval.removeRange(DefIndex, RedefIndex);
437
438      // The new value number (#1) is defined by the instruction we claimed
439      // defined value #0.
440      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
441
442      // Value#0 is now defined by the 2-addr instruction.
443      OldValNo->def  = RedefIndex;
444      OldValNo->setCopy(0);
445
446      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
447      if (PartReDef && mi->isCopyLike())
448        OldValNo->setCopy(&*mi);
449
450      // Add the new live interval which replaces the range for the input copy.
451      LiveRange LR(DefIndex, RedefIndex, ValNo);
452      DEBUG(dbgs() << " replace range with " << LR);
453      interval.addRange(LR);
454
455      // If this redefinition is dead, we need to add a dummy unit live
456      // range covering the def slot.
457      if (MO.isDead())
458        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
459                                    OldValNo));
460
461      DEBUG({
462          dbgs() << " RESULT: ";
463          interval.print(dbgs(), tri_);
464        });
465    } else if (lv_->isPHIJoin(interval.reg)) {
466      // In the case of PHI elimination, each variable definition is only
467      // live until the end of the block.  We've already taken care of the
468      // rest of the live range.
469
470      SlotIndex defIndex = MIIdx.getDefIndex();
471      if (MO.isEarlyClobber())
472        defIndex = MIIdx.getUseIndex();
473
474      VNInfo *ValNo;
475      MachineInstr *CopyMI = NULL;
476      if (mi->isCopyLike())
477        CopyMI = mi;
478      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
479
480      SlotIndex killIndex = getMBBEndIdx(mbb);
481      LiveRange LR(defIndex, killIndex, ValNo);
482      interval.addRange(LR);
483      ValNo->setHasPHIKill(true);
484      DEBUG(dbgs() << " phi-join +" << LR);
485    } else {
486      llvm_unreachable("Multiply defined register");
487    }
488  }
489
490  DEBUG(dbgs() << '\n');
491}
492
493void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
494                                              MachineBasicBlock::iterator mi,
495                                              SlotIndex MIIdx,
496                                              MachineOperand& MO,
497                                              LiveInterval &interval,
498                                              MachineInstr *CopyMI) {
499  // A physical register cannot be live across basic block, so its
500  // lifetime must end somewhere in its defining basic block.
501  DEBUG({
502      dbgs() << "\t\tregister: ";
503      printRegName(interval.reg, tri_);
504    });
505
506  SlotIndex baseIndex = MIIdx;
507  SlotIndex start = baseIndex.getDefIndex();
508  // Earlyclobbers move back one.
509  if (MO.isEarlyClobber())
510    start = MIIdx.getUseIndex();
511  SlotIndex end = start;
512
513  // If it is not used after definition, it is considered dead at
514  // the instruction defining it. Hence its interval is:
515  // [defSlot(def), defSlot(def)+1)
516  // For earlyclobbers, the defSlot was pushed back one; the extra
517  // advance below compensates.
518  if (MO.isDead()) {
519    DEBUG(dbgs() << " dead");
520    end = start.getStoreIndex();
521    goto exit;
522  }
523
524  // If it is not dead on definition, it must be killed by a
525  // subsequent instruction. Hence its interval is:
526  // [defSlot(def), useSlot(kill)+1)
527  baseIndex = baseIndex.getNextIndex();
528  while (++mi != MBB->end()) {
529
530    if (mi->isDebugValue())
531      continue;
532    if (getInstructionFromIndex(baseIndex) == 0)
533      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
534
535    if (mi->killsRegister(interval.reg, tri_)) {
536      DEBUG(dbgs() << " killed");
537      end = baseIndex.getDefIndex();
538      goto exit;
539    } else {
540      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
541      if (DefIdx != -1) {
542        if (mi->isRegTiedToUseOperand(DefIdx)) {
543          // Two-address instruction.
544          end = baseIndex.getDefIndex();
545        } else {
546          // Another instruction redefines the register before it is ever read.
547          // Then the register is essentially dead at the instruction that
548          // defines it. Hence its interval is:
549          // [defSlot(def), defSlot(def)+1)
550          DEBUG(dbgs() << " dead");
551          end = start.getStoreIndex();
552        }
553        goto exit;
554      }
555    }
556
557    baseIndex = baseIndex.getNextIndex();
558  }
559
560  // The only case we should have a dead physreg here without a killing or
561  // instruction where we know it's dead is if it is live-in to the function
562  // and never used. Another possible case is the implicit use of the
563  // physical register has been deleted by two-address pass.
564  end = start.getStoreIndex();
565
566exit:
567  assert(start < end && "did not find end of interval?");
568
569  // Already exists? Extend old live interval.
570  VNInfo *ValNo = interval.getVNInfoAt(start);
571  bool Extend = ValNo != 0;
572  if (!Extend)
573    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
574  if (Extend && MO.isEarlyClobber())
575    ValNo->setHasRedefByEC(true);
576  LiveRange LR(start, end, ValNo);
577  interval.addRange(LR);
578  DEBUG(dbgs() << " +" << LR << '\n');
579}
580
581void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
582                                      MachineBasicBlock::iterator MI,
583                                      SlotIndex MIIdx,
584                                      MachineOperand& MO,
585                                      unsigned MOIdx) {
586  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
587    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
588                             getOrCreateInterval(MO.getReg()));
589  else if (allocatableRegs_[MO.getReg()]) {
590    MachineInstr *CopyMI = NULL;
591    if (MI->isCopyLike())
592      CopyMI = MI;
593    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594                              getOrCreateInterval(MO.getReg()), CopyMI);
595    // Def of a register also defines its sub-registers.
596    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
597      // If MI also modifies the sub-register explicitly, avoid processing it
598      // more than once. Do not pass in TRI here so it checks for exact match.
599      if (!MI->definesRegister(*AS))
600        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
601                                  getOrCreateInterval(*AS), 0);
602  }
603}
604
605void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
606                                         SlotIndex MIIdx,
607                                         LiveInterval &interval, bool isAlias) {
608  DEBUG({
609      dbgs() << "\t\tlivein register: ";
610      printRegName(interval.reg, tri_);
611    });
612
613  // Look for kills, if it reaches a def before it's killed, then it shouldn't
614  // be considered a livein.
615  MachineBasicBlock::iterator mi = MBB->begin();
616  MachineBasicBlock::iterator E = MBB->end();
617  // Skip over DBG_VALUE at the start of the MBB.
618  if (mi != E && mi->isDebugValue()) {
619    while (++mi != E && mi->isDebugValue())
620      ;
621    if (mi == E)
622      // MBB is empty except for DBG_VALUE's.
623      return;
624  }
625
626  SlotIndex baseIndex = MIIdx;
627  SlotIndex start = baseIndex;
628  if (getInstructionFromIndex(baseIndex) == 0)
629    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
630
631  SlotIndex end = baseIndex;
632  bool SeenDefUse = false;
633
634  while (mi != E) {
635    if (mi->killsRegister(interval.reg, tri_)) {
636      DEBUG(dbgs() << " killed");
637      end = baseIndex.getDefIndex();
638      SeenDefUse = true;
639      break;
640    } else if (mi->definesRegister(interval.reg, tri_)) {
641      // Another instruction redefines the register before it is ever read.
642      // Then the register is essentially dead at the instruction that defines
643      // it. Hence its interval is:
644      // [defSlot(def), defSlot(def)+1)
645      DEBUG(dbgs() << " dead");
646      end = start.getStoreIndex();
647      SeenDefUse = true;
648      break;
649    }
650
651    while (++mi != E && mi->isDebugValue())
652      // Skip over DBG_VALUE.
653      ;
654    if (mi != E)
655      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
656  }
657
658  // Live-in register might not be used at all.
659  if (!SeenDefUse) {
660    if (isAlias) {
661      DEBUG(dbgs() << " dead");
662      end = MIIdx.getStoreIndex();
663    } else {
664      DEBUG(dbgs() << " live through");
665      end = baseIndex;
666    }
667  }
668
669  SlotIndex defIdx = getMBBStartIdx(MBB);
670  assert(getInstructionFromIndex(defIdx) == 0 &&
671         "PHI def index points at actual instruction.");
672  VNInfo *vni =
673    interval.getNextValue(defIdx, 0, VNInfoAllocator);
674  vni->setIsPHIDef(true);
675  LiveRange LR(start, end, vni);
676
677  interval.addRange(LR);
678  DEBUG(dbgs() << " +" << LR << '\n');
679}
680
681/// computeIntervals - computes the live intervals for virtual
682/// registers. for some ordering of the machine instructions [1,N] a
683/// live interval is an interval [i, j) where 1 <= i <= j < N for
684/// which a variable is live
685void LiveIntervals::computeIntervals() {
686  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
687               << "********** Function: "
688               << ((Value*)mf_->getFunction())->getName() << '\n');
689
690  SmallVector<unsigned, 8> UndefUses;
691  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
692       MBBI != E; ++MBBI) {
693    MachineBasicBlock *MBB = MBBI;
694    if (MBB->empty())
695      continue;
696
697    // Track the index of the current machine instr.
698    SlotIndex MIIndex = getMBBStartIdx(MBB);
699    DEBUG(dbgs() << "BB#" << MBB->getNumber()
700          << ":\t\t# derived from " << MBB->getName() << "\n");
701
702    // Create intervals for live-ins to this BB first.
703    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
704           LE = MBB->livein_end(); LI != LE; ++LI) {
705      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
706      // Multiple live-ins can alias the same register.
707      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
708        if (!hasInterval(*AS))
709          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
710                               true);
711    }
712
713    // Skip over empty initial indices.
714    if (getInstructionFromIndex(MIIndex) == 0)
715      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
716
717    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
718         MI != miEnd; ++MI) {
719      DEBUG(dbgs() << MIIndex << "\t" << *MI);
720      if (MI->isDebugValue())
721        continue;
722
723      // Handle defs.
724      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
725        MachineOperand &MO = MI->getOperand(i);
726        if (!MO.isReg() || !MO.getReg())
727          continue;
728
729        // handle register defs - build intervals
730        if (MO.isDef())
731          handleRegisterDef(MBB, MI, MIIndex, MO, i);
732        else if (MO.isUndef())
733          UndefUses.push_back(MO.getReg());
734      }
735
736      // Move to the next instr slot.
737      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
738    }
739  }
740
741  // Create empty intervals for registers defined by implicit_def's (except
742  // for those implicit_def that define values which are liveout of their
743  // blocks.
744  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
745    unsigned UndefReg = UndefUses[i];
746    (void)getOrCreateInterval(UndefReg);
747  }
748}
749
750LiveInterval* LiveIntervals::createInterval(unsigned reg) {
751  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
752  return new LiveInterval(reg, Weight);
753}
754
755/// dupInterval - Duplicate a live interval. The caller is responsible for
756/// managing the allocated memory.
757LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
758  LiveInterval *NewLI = createInterval(li->reg);
759  NewLI->Copy(*li, mri_, getVNInfoAllocator());
760  return NewLI;
761}
762
763//===----------------------------------------------------------------------===//
764// Register allocator hooks.
765//
766
767/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768/// allow one) virtual register operand, then its uses are implicitly using
769/// the register. Returns the virtual register.
770unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771                                            MachineInstr *MI) const {
772  unsigned RegOp = 0;
773  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774    MachineOperand &MO = MI->getOperand(i);
775    if (!MO.isReg() || !MO.isUse())
776      continue;
777    unsigned Reg = MO.getReg();
778    if (Reg == 0 || Reg == li.reg)
779      continue;
780
781    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
782        !allocatableRegs_[Reg])
783      continue;
784    // FIXME: For now, only remat MI with at most one register operand.
785    assert(!RegOp &&
786           "Can't rematerialize instruction with multiple register operand!");
787    RegOp = MO.getReg();
788#ifndef NDEBUG
789    break;
790#endif
791  }
792  return RegOp;
793}
794
795/// isValNoAvailableAt - Return true if the val# of the specified interval
796/// which reaches the given instruction also reaches the specified use index.
797bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
798                                       SlotIndex UseIdx) const {
799  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
800  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
801}
802
803/// isReMaterializable - Returns true if the definition MI of the specified
804/// val# of the specified interval is re-materializable.
805bool
806LiveIntervals::isReMaterializable(const LiveInterval &li,
807                                  const VNInfo *ValNo, MachineInstr *MI,
808                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
809                                  bool &isLoad) {
810  if (DisableReMat)
811    return false;
812
813  if (!tii_->isTriviallyReMaterializable(MI, aa_))
814    return false;
815
816  // Target-specific code can mark an instruction as being rematerializable
817  // if it has one virtual reg use, though it had better be something like
818  // a PIC base register which is likely to be live everywhere.
819  unsigned ImpUse = getReMatImplicitUse(li, MI);
820  if (ImpUse) {
821    const LiveInterval &ImpLi = getInterval(ImpUse);
822    for (MachineRegisterInfo::use_nodbg_iterator
823           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
824         ri != re; ++ri) {
825      MachineInstr *UseMI = &*ri;
826      SlotIndex UseIdx = getInstructionIndex(UseMI);
827      if (li.getVNInfoAt(UseIdx) != ValNo)
828        continue;
829      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
830        return false;
831    }
832
833    // If a register operand of the re-materialized instruction is going to
834    // be spilled next, then it's not legal to re-materialize this instruction.
835    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
836      if (ImpUse == SpillIs[i]->reg)
837        return false;
838  }
839  return true;
840}
841
842/// isReMaterializable - Returns true if the definition MI of the specified
843/// val# of the specified interval is re-materializable.
844bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845                                       const VNInfo *ValNo, MachineInstr *MI) {
846  SmallVector<LiveInterval*, 4> Dummy1;
847  bool Dummy2;
848  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
849}
850
851/// isReMaterializable - Returns true if every definition of MI of every
852/// val# of the specified interval is re-materializable.
853bool
854LiveIntervals::isReMaterializable(const LiveInterval &li,
855                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
856                                  bool &isLoad) {
857  isLoad = false;
858  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
859       i != e; ++i) {
860    const VNInfo *VNI = *i;
861    if (VNI->isUnused())
862      continue; // Dead val#.
863    // Is the def for the val# rematerializable?
864    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
865    if (!ReMatDefMI)
866      return false;
867    bool DefIsLoad = false;
868    if (!ReMatDefMI ||
869        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
870      return false;
871    isLoad |= DefIsLoad;
872  }
873  return true;
874}
875
876/// FilterFoldedOps - Filter out two-address use operands. Return
877/// true if it finds any issue with the operands that ought to prevent
878/// folding.
879static bool FilterFoldedOps(MachineInstr *MI,
880                            SmallVector<unsigned, 2> &Ops,
881                            unsigned &MRInfo,
882                            SmallVector<unsigned, 2> &FoldOps) {
883  MRInfo = 0;
884  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885    unsigned OpIdx = Ops[i];
886    MachineOperand &MO = MI->getOperand(OpIdx);
887    // FIXME: fold subreg use.
888    if (MO.getSubReg())
889      return true;
890    if (MO.isDef())
891      MRInfo |= (unsigned)VirtRegMap::isMod;
892    else {
893      // Filter out two-address use operand(s).
894      if (MI->isRegTiedToDefOperand(OpIdx)) {
895        MRInfo = VirtRegMap::isModRef;
896        continue;
897      }
898      MRInfo |= (unsigned)VirtRegMap::isRef;
899    }
900    FoldOps.push_back(OpIdx);
901  }
902  return false;
903}
904
905
906/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907/// slot / to reg or any rematerialized load into ith operand of specified
908/// MI. If it is successul, MI is updated with the newly created MI and
909/// returns true.
910bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911                                         VirtRegMap &vrm, MachineInstr *DefMI,
912                                         SlotIndex InstrIdx,
913                                         SmallVector<unsigned, 2> &Ops,
914                                         bool isSS, int Slot, unsigned Reg) {
915  // If it is an implicit def instruction, just delete it.
916  if (MI->isImplicitDef()) {
917    RemoveMachineInstrFromMaps(MI);
918    vrm.RemoveMachineInstrFromMaps(MI);
919    MI->eraseFromParent();
920    ++numFolds;
921    return true;
922  }
923
924  // Filter the list of operand indexes that are to be folded. Abort if
925  // any operand will prevent folding.
926  unsigned MRInfo = 0;
927  SmallVector<unsigned, 2> FoldOps;
928  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
929    return false;
930
931  // The only time it's safe to fold into a two address instruction is when
932  // it's folding reload and spill from / into a spill stack slot.
933  if (DefMI && (MRInfo & VirtRegMap::isMod))
934    return false;
935
936  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
937                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
938  if (fmi) {
939    // Remember this instruction uses the spill slot.
940    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
941
942    // Attempt to fold the memory reference into the instruction. If
943    // we can do this, we don't need to insert spill code.
944    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
945      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
946    vrm.transferSpillPts(MI, fmi);
947    vrm.transferRestorePts(MI, fmi);
948    vrm.transferEmergencySpills(MI, fmi);
949    ReplaceMachineInstrInMaps(MI, fmi);
950    MI->eraseFromParent();
951    MI = fmi;
952    ++numFolds;
953    return true;
954  }
955  return false;
956}
957
958/// canFoldMemoryOperand - Returns true if the specified load / store
959/// folding is possible.
960bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961                                         SmallVector<unsigned, 2> &Ops,
962                                         bool ReMat) const {
963  // Filter the list of operand indexes that are to be folded. Abort if
964  // any operand will prevent folding.
965  unsigned MRInfo = 0;
966  SmallVector<unsigned, 2> FoldOps;
967  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
968    return false;
969
970  // It's only legal to remat for a use, not a def.
971  if (ReMat && (MRInfo & VirtRegMap::isMod))
972    return false;
973
974  return tii_->canFoldMemoryOperand(MI, FoldOps);
975}
976
977bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
979
980  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
981
982  if (mbb == 0)
983    return false;
984
985  for (++itr; itr != li.ranges.end(); ++itr) {
986    MachineBasicBlock *mbb2 =
987      indexes_->getMBBCoveringRange(itr->start, itr->end);
988
989    if (mbb2 != mbb)
990      return false;
991  }
992
993  return true;
994}
995
996/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997/// interval on to-be re-materialized operands of MI) with new register.
998void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999                                       MachineInstr *MI, unsigned NewVReg,
1000                                       VirtRegMap &vrm) {
1001  // There is an implicit use. That means one of the other operand is
1002  // being remat'ed and the remat'ed instruction has li.reg as an
1003  // use operand. Make sure we rewrite that as well.
1004  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005    MachineOperand &MO = MI->getOperand(i);
1006    if (!MO.isReg())
1007      continue;
1008    unsigned Reg = MO.getReg();
1009    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1010      continue;
1011    if (!vrm.isReMaterialized(Reg))
1012      continue;
1013    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1015    if (UseMO)
1016      UseMO->setReg(NewVReg);
1017  }
1018}
1019
1020/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022bool LiveIntervals::
1023rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024                 bool TrySplit, SlotIndex index, SlotIndex end,
1025                 MachineInstr *MI,
1026                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027                 unsigned Slot, int LdSlot,
1028                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1029                 VirtRegMap &vrm,
1030                 const TargetRegisterClass* rc,
1031                 SmallVector<int, 4> &ReMatIds,
1032                 const MachineLoopInfo *loopInfo,
1033                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035                 std::vector<LiveInterval*> &NewLIs) {
1036  bool CanFold = false;
1037 RestartInstruction:
1038  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039    MachineOperand& mop = MI->getOperand(i);
1040    if (!mop.isReg())
1041      continue;
1042    unsigned Reg = mop.getReg();
1043    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1044      continue;
1045    if (Reg != li.reg)
1046      continue;
1047
1048    bool TryFold = !DefIsReMat;
1049    bool FoldSS = true; // Default behavior unless it's a remat.
1050    int FoldSlot = Slot;
1051    if (DefIsReMat) {
1052      // If this is the rematerializable definition MI itself and
1053      // all of its uses are rematerialized, simply delete it.
1054      if (MI == ReMatOrigDefMI && CanDelete) {
1055        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1056                     << *MI << '\n');
1057        RemoveMachineInstrFromMaps(MI);
1058        vrm.RemoveMachineInstrFromMaps(MI);
1059        MI->eraseFromParent();
1060        break;
1061      }
1062
1063      // If def for this use can't be rematerialized, then try folding.
1064      // If def is rematerializable and it's a load, also try folding.
1065      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1066      if (isLoad) {
1067        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1068        FoldSS = isLoadSS;
1069        FoldSlot = LdSlot;
1070      }
1071    }
1072
1073    // Scan all of the operands of this instruction rewriting operands
1074    // to use NewVReg instead of li.reg as appropriate.  We do this for
1075    // two reasons:
1076    //
1077    //   1. If the instr reads the same spilled vreg multiple times, we
1078    //      want to reuse the NewVReg.
1079    //   2. If the instr is a two-addr instruction, we are required to
1080    //      keep the src/dst regs pinned.
1081    //
1082    // Keep track of whether we replace a use and/or def so that we can
1083    // create the spill interval with the appropriate range.
1084    SmallVector<unsigned, 2> Ops;
1085    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1086
1087    // Create a new virtual register for the spill interval.
1088    // Create the new register now so we can map the fold instruction
1089    // to the new register so when it is unfolded we get the correct
1090    // answer.
1091    bool CreatedNewVReg = false;
1092    if (NewVReg == 0) {
1093      NewVReg = mri_->createVirtualRegister(rc);
1094      vrm.grow();
1095      CreatedNewVReg = true;
1096
1097      // The new virtual register should get the same allocation hints as the
1098      // old one.
1099      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1100      if (Hint.first || Hint.second)
1101        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1102    }
1103
1104    if (!TryFold)
1105      CanFold = false;
1106    else {
1107      // Do not fold load / store here if we are splitting. We'll find an
1108      // optimal point to insert a load / store later.
1109      if (!TrySplit) {
1110        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1111                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1112          // Folding the load/store can completely change the instruction in
1113          // unpredictable ways, rescan it from the beginning.
1114
1115          if (FoldSS) {
1116            // We need to give the new vreg the same stack slot as the
1117            // spilled interval.
1118            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1119          }
1120
1121          HasUse = false;
1122          HasDef = false;
1123          CanFold = false;
1124          if (isNotInMIMap(MI))
1125            break;
1126          goto RestartInstruction;
1127        }
1128      } else {
1129        // We'll try to fold it later if it's profitable.
1130        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1131      }
1132    }
1133
1134    mop.setReg(NewVReg);
1135    if (mop.isImplicit())
1136      rewriteImplicitOps(li, MI, NewVReg, vrm);
1137
1138    // Reuse NewVReg for other reads.
1139    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1140      MachineOperand &mopj = MI->getOperand(Ops[j]);
1141      mopj.setReg(NewVReg);
1142      if (mopj.isImplicit())
1143        rewriteImplicitOps(li, MI, NewVReg, vrm);
1144    }
1145
1146    if (CreatedNewVReg) {
1147      if (DefIsReMat) {
1148        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1149        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1150          // Each valnum may have its own remat id.
1151          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1152        } else {
1153          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1154        }
1155        if (!CanDelete || (HasUse && HasDef)) {
1156          // If this is a two-addr instruction then its use operands are
1157          // rematerializable but its def is not. It should be assigned a
1158          // stack slot.
1159          vrm.assignVirt2StackSlot(NewVReg, Slot);
1160        }
1161      } else {
1162        vrm.assignVirt2StackSlot(NewVReg, Slot);
1163      }
1164    } else if (HasUse && HasDef &&
1165               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1166      // If this interval hasn't been assigned a stack slot (because earlier
1167      // def is a deleted remat def), do it now.
1168      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1169      vrm.assignVirt2StackSlot(NewVReg, Slot);
1170    }
1171
1172    // Re-matting an instruction with virtual register use. Add the
1173    // register as an implicit use on the use MI.
1174    if (DefIsReMat && ImpUse)
1175      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1176
1177    // Create a new register interval for this spill / remat.
1178    LiveInterval &nI = getOrCreateInterval(NewVReg);
1179    if (CreatedNewVReg) {
1180      NewLIs.push_back(&nI);
1181      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1182      if (TrySplit)
1183        vrm.setIsSplitFromReg(NewVReg, li.reg);
1184    }
1185
1186    if (HasUse) {
1187      if (CreatedNewVReg) {
1188        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1189                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1190        DEBUG(dbgs() << " +" << LR);
1191        nI.addRange(LR);
1192      } else {
1193        // Extend the split live interval to this def / use.
1194        SlotIndex End = index.getDefIndex();
1195        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1196                     nI.getValNumInfo(nI.getNumValNums()-1));
1197        DEBUG(dbgs() << " +" << LR);
1198        nI.addRange(LR);
1199      }
1200    }
1201    if (HasDef) {
1202      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1203                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1204      DEBUG(dbgs() << " +" << LR);
1205      nI.addRange(LR);
1206    }
1207
1208    DEBUG({
1209        dbgs() << "\t\t\t\tAdded new interval: ";
1210        nI.print(dbgs(), tri_);
1211        dbgs() << '\n';
1212      });
1213  }
1214  return CanFold;
1215}
1216bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1217                                   const VNInfo *VNI,
1218                                   MachineBasicBlock *MBB,
1219                                   SlotIndex Idx) const {
1220  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1221}
1222
1223/// RewriteInfo - Keep track of machine instrs that will be rewritten
1224/// during spilling.
1225namespace {
1226  struct RewriteInfo {
1227    SlotIndex Index;
1228    MachineInstr *MI;
1229    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1230  };
1231
1232  struct RewriteInfoCompare {
1233    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1234      return LHS.Index < RHS.Index;
1235    }
1236  };
1237}
1238
1239void LiveIntervals::
1240rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1241                    LiveInterval::Ranges::const_iterator &I,
1242                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1243                    unsigned Slot, int LdSlot,
1244                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1245                    VirtRegMap &vrm,
1246                    const TargetRegisterClass* rc,
1247                    SmallVector<int, 4> &ReMatIds,
1248                    const MachineLoopInfo *loopInfo,
1249                    BitVector &SpillMBBs,
1250                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1251                    BitVector &RestoreMBBs,
1252                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1253                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1254                    std::vector<LiveInterval*> &NewLIs) {
1255  bool AllCanFold = true;
1256  unsigned NewVReg = 0;
1257  SlotIndex start = I->start.getBaseIndex();
1258  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1259
1260  // First collect all the def / use in this live range that will be rewritten.
1261  // Make sure they are sorted according to instruction index.
1262  std::vector<RewriteInfo> RewriteMIs;
1263  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1264         re = mri_->reg_end(); ri != re; ) {
1265    MachineInstr *MI = &*ri;
1266    MachineOperand &O = ri.getOperand();
1267    ++ri;
1268    if (MI->isDebugValue()) {
1269      // Modify DBG_VALUE now that the value is in a spill slot.
1270      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1271        uint64_t Offset = MI->getOperand(1).getImm();
1272        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1273        DebugLoc DL = MI->getDebugLoc();
1274        int FI = isLoadSS ? LdSlot : (int)Slot;
1275        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1276                                                           Offset, MDPtr, DL)) {
1277          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1278          ReplaceMachineInstrInMaps(MI, NewDV);
1279          MachineBasicBlock *MBB = MI->getParent();
1280          MBB->insert(MBB->erase(MI), NewDV);
1281          continue;
1282        }
1283      }
1284
1285      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1286      RemoveMachineInstrFromMaps(MI);
1287      vrm.RemoveMachineInstrFromMaps(MI);
1288      MI->eraseFromParent();
1289      continue;
1290    }
1291    assert(!(O.isImplicit() && O.isUse()) &&
1292           "Spilling register that's used as implicit use?");
1293    SlotIndex index = getInstructionIndex(MI);
1294    if (index < start || index >= end)
1295      continue;
1296
1297    if (O.isUndef())
1298      // Must be defined by an implicit def. It should not be spilled. Note,
1299      // this is for correctness reason. e.g.
1300      // 8   %reg1024<def> = IMPLICIT_DEF
1301      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1302      // The live range [12, 14) are not part of the r1024 live interval since
1303      // it's defined by an implicit def. It will not conflicts with live
1304      // interval of r1025. Now suppose both registers are spilled, you can
1305      // easily see a situation where both registers are reloaded before
1306      // the INSERT_SUBREG and both target registers that would overlap.
1307      continue;
1308    RewriteMIs.push_back(RewriteInfo(index, MI));
1309  }
1310  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1311
1312  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1313  // Now rewrite the defs and uses.
1314  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1315    RewriteInfo &rwi = RewriteMIs[i];
1316    ++i;
1317    SlotIndex index = rwi.Index;
1318    MachineInstr *MI = rwi.MI;
1319    // If MI def and/or use the same register multiple times, then there
1320    // are multiple entries.
1321    while (i != e && RewriteMIs[i].MI == MI) {
1322      assert(RewriteMIs[i].Index == index);
1323      ++i;
1324    }
1325    MachineBasicBlock *MBB = MI->getParent();
1326
1327    if (ImpUse && MI != ReMatDefMI) {
1328      // Re-matting an instruction with virtual register use. Prevent interval
1329      // from being spilled.
1330      getInterval(ImpUse).markNotSpillable();
1331    }
1332
1333    unsigned MBBId = MBB->getNumber();
1334    unsigned ThisVReg = 0;
1335    if (TrySplit) {
1336      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1337      if (NVI != MBBVRegsMap.end()) {
1338        ThisVReg = NVI->second;
1339        // One common case:
1340        // x = use
1341        // ...
1342        // ...
1343        // def = ...
1344        //     = use
1345        // It's better to start a new interval to avoid artifically
1346        // extend the new interval.
1347        if (MI->readsWritesVirtualRegister(li.reg) ==
1348            std::make_pair(false,true)) {
1349          MBBVRegsMap.erase(MBB->getNumber());
1350          ThisVReg = 0;
1351        }
1352      }
1353    }
1354
1355    bool IsNew = ThisVReg == 0;
1356    if (IsNew) {
1357      // This ends the previous live interval. If all of its def / use
1358      // can be folded, give it a low spill weight.
1359      if (NewVReg && TrySplit && AllCanFold) {
1360        LiveInterval &nI = getOrCreateInterval(NewVReg);
1361        nI.weight /= 10.0F;
1362      }
1363      AllCanFold = true;
1364    }
1365    NewVReg = ThisVReg;
1366
1367    bool HasDef = false;
1368    bool HasUse = false;
1369    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1370                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1371                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1372                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1373                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1374    if (!HasDef && !HasUse)
1375      continue;
1376
1377    AllCanFold &= CanFold;
1378
1379    // Update weight of spill interval.
1380    LiveInterval &nI = getOrCreateInterval(NewVReg);
1381    if (!TrySplit) {
1382      // The spill weight is now infinity as it cannot be spilled again.
1383      nI.markNotSpillable();
1384      continue;
1385    }
1386
1387    // Keep track of the last def and first use in each MBB.
1388    if (HasDef) {
1389      if (MI != ReMatOrigDefMI || !CanDelete) {
1390        bool HasKill = false;
1391        if (!HasUse)
1392          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1393        else {
1394          // If this is a two-address code, then this index starts a new VNInfo.
1395          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1396          if (VNI)
1397            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1398        }
1399        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1400          SpillIdxes.find(MBBId);
1401        if (!HasKill) {
1402          if (SII == SpillIdxes.end()) {
1403            std::vector<SRInfo> S;
1404            S.push_back(SRInfo(index, NewVReg, true));
1405            SpillIdxes.insert(std::make_pair(MBBId, S));
1406          } else if (SII->second.back().vreg != NewVReg) {
1407            SII->second.push_back(SRInfo(index, NewVReg, true));
1408          } else if (index > SII->second.back().index) {
1409            // If there is an earlier def and this is a two-address
1410            // instruction, then it's not possible to fold the store (which
1411            // would also fold the load).
1412            SRInfo &Info = SII->second.back();
1413            Info.index = index;
1414            Info.canFold = !HasUse;
1415          }
1416          SpillMBBs.set(MBBId);
1417        } else if (SII != SpillIdxes.end() &&
1418                   SII->second.back().vreg == NewVReg &&
1419                   index > SII->second.back().index) {
1420          // There is an earlier def that's not killed (must be two-address).
1421          // The spill is no longer needed.
1422          SII->second.pop_back();
1423          if (SII->second.empty()) {
1424            SpillIdxes.erase(MBBId);
1425            SpillMBBs.reset(MBBId);
1426          }
1427        }
1428      }
1429    }
1430
1431    if (HasUse) {
1432      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1433        SpillIdxes.find(MBBId);
1434      if (SII != SpillIdxes.end() &&
1435          SII->second.back().vreg == NewVReg &&
1436          index > SII->second.back().index)
1437        // Use(s) following the last def, it's not safe to fold the spill.
1438        SII->second.back().canFold = false;
1439      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1440        RestoreIdxes.find(MBBId);
1441      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1442        // If we are splitting live intervals, only fold if it's the first
1443        // use and there isn't another use later in the MBB.
1444        RII->second.back().canFold = false;
1445      else if (IsNew) {
1446        // Only need a reload if there isn't an earlier def / use.
1447        if (RII == RestoreIdxes.end()) {
1448          std::vector<SRInfo> Infos;
1449          Infos.push_back(SRInfo(index, NewVReg, true));
1450          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1451        } else {
1452          RII->second.push_back(SRInfo(index, NewVReg, true));
1453        }
1454        RestoreMBBs.set(MBBId);
1455      }
1456    }
1457
1458    // Update spill weight.
1459    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1460    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1461  }
1462
1463  if (NewVReg && TrySplit && AllCanFold) {
1464    // If all of its def / use can be folded, give it a low spill weight.
1465    LiveInterval &nI = getOrCreateInterval(NewVReg);
1466    nI.weight /= 10.0F;
1467  }
1468}
1469
1470bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1471                        unsigned vr, BitVector &RestoreMBBs,
1472                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1473  if (!RestoreMBBs[Id])
1474    return false;
1475  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1476  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1477    if (Restores[i].index == index &&
1478        Restores[i].vreg == vr &&
1479        Restores[i].canFold)
1480      return true;
1481  return false;
1482}
1483
1484void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1485                        unsigned vr, BitVector &RestoreMBBs,
1486                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1487  if (!RestoreMBBs[Id])
1488    return;
1489  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1490  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1491    if (Restores[i].index == index && Restores[i].vreg)
1492      Restores[i].index = SlotIndex();
1493}
1494
1495/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1496/// spilled and create empty intervals for their uses.
1497void
1498LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1499                                    const TargetRegisterClass* rc,
1500                                    std::vector<LiveInterval*> &NewLIs) {
1501  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1502         re = mri_->reg_end(); ri != re; ) {
1503    MachineOperand &O = ri.getOperand();
1504    MachineInstr *MI = &*ri;
1505    ++ri;
1506    if (MI->isDebugValue()) {
1507      // Remove debug info for now.
1508      O.setReg(0U);
1509      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1510      continue;
1511    }
1512    if (O.isDef()) {
1513      assert(MI->isImplicitDef() &&
1514             "Register def was not rewritten?");
1515      RemoveMachineInstrFromMaps(MI);
1516      vrm.RemoveMachineInstrFromMaps(MI);
1517      MI->eraseFromParent();
1518    } else {
1519      // This must be an use of an implicit_def so it's not part of the live
1520      // interval. Create a new empty live interval for it.
1521      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1522      unsigned NewVReg = mri_->createVirtualRegister(rc);
1523      vrm.grow();
1524      vrm.setIsImplicitlyDefined(NewVReg);
1525      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1526      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1527        MachineOperand &MO = MI->getOperand(i);
1528        if (MO.isReg() && MO.getReg() == li.reg) {
1529          MO.setReg(NewVReg);
1530          MO.setIsUndef();
1531        }
1532      }
1533    }
1534  }
1535}
1536
1537float
1538LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1539  // Limit the loop depth ridiculousness.
1540  if (loopDepth > 200)
1541    loopDepth = 200;
1542
1543  // The loop depth is used to roughly estimate the number of times the
1544  // instruction is executed. Something like 10^d is simple, but will quickly
1545  // overflow a float. This expression behaves like 10^d for small d, but is
1546  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1547  // headroom before overflow.
1548  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1549
1550  return (isDef + isUse) * lc;
1551}
1552
1553void
1554LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1555  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1556    normalizeSpillWeight(*NewLIs[i]);
1557}
1558
1559std::vector<LiveInterval*> LiveIntervals::
1560addIntervalsForSpills(const LiveInterval &li,
1561                      const SmallVectorImpl<LiveInterval*> &SpillIs,
1562                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1563  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1564
1565  DEBUG({
1566      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1567      li.print(dbgs(), tri_);
1568      dbgs() << '\n';
1569    });
1570
1571  // Each bit specify whether a spill is required in the MBB.
1572  BitVector SpillMBBs(mf_->getNumBlockIDs());
1573  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1574  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1575  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1576  DenseMap<unsigned,unsigned> MBBVRegsMap;
1577  std::vector<LiveInterval*> NewLIs;
1578  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1579
1580  unsigned NumValNums = li.getNumValNums();
1581  SmallVector<MachineInstr*, 4> ReMatDefs;
1582  ReMatDefs.resize(NumValNums, NULL);
1583  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1584  ReMatOrigDefs.resize(NumValNums, NULL);
1585  SmallVector<int, 4> ReMatIds;
1586  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1587  BitVector ReMatDelete(NumValNums);
1588  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1589
1590  // Spilling a split live interval. It cannot be split any further. Also,
1591  // it's also guaranteed to be a single val# / range interval.
1592  if (vrm.getPreSplitReg(li.reg)) {
1593    vrm.setIsSplitFromReg(li.reg, 0);
1594    // Unset the split kill marker on the last use.
1595    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1596    if (KillIdx != SlotIndex()) {
1597      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1598      assert(KillMI && "Last use disappeared?");
1599      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1600      assert(KillOp != -1 && "Last use disappeared?");
1601      KillMI->getOperand(KillOp).setIsKill(false);
1602    }
1603    vrm.removeKillPoint(li.reg);
1604    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1605    Slot = vrm.getStackSlot(li.reg);
1606    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1607    MachineInstr *ReMatDefMI = DefIsReMat ?
1608      vrm.getReMaterializedMI(li.reg) : NULL;
1609    int LdSlot = 0;
1610    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1611    bool isLoad = isLoadSS ||
1612      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1613    bool IsFirstRange = true;
1614    for (LiveInterval::Ranges::const_iterator
1615           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1616      // If this is a split live interval with multiple ranges, it means there
1617      // are two-address instructions that re-defined the value. Only the
1618      // first def can be rematerialized!
1619      if (IsFirstRange) {
1620        // Note ReMatOrigDefMI has already been deleted.
1621        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1622                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1623                             false, vrm, rc, ReMatIds, loopInfo,
1624                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1625                             MBBVRegsMap, NewLIs);
1626      } else {
1627        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1628                             Slot, 0, false, false, false,
1629                             false, vrm, rc, ReMatIds, loopInfo,
1630                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1631                             MBBVRegsMap, NewLIs);
1632      }
1633      IsFirstRange = false;
1634    }
1635
1636    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1637    normalizeSpillWeights(NewLIs);
1638    return NewLIs;
1639  }
1640
1641  bool TrySplit = !intervalIsInOneMBB(li);
1642  if (TrySplit)
1643    ++numSplits;
1644  bool NeedStackSlot = false;
1645  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1646       i != e; ++i) {
1647    const VNInfo *VNI = *i;
1648    unsigned VN = VNI->id;
1649    if (VNI->isUnused())
1650      continue; // Dead val#.
1651    // Is the def for the val# rematerializable?
1652    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1653    bool dummy;
1654    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1655      // Remember how to remat the def of this val#.
1656      ReMatOrigDefs[VN] = ReMatDefMI;
1657      // Original def may be modified so we have to make a copy here.
1658      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1659      CloneMIs.push_back(Clone);
1660      ReMatDefs[VN] = Clone;
1661
1662      bool CanDelete = true;
1663      if (VNI->hasPHIKill()) {
1664        // A kill is a phi node, not all of its uses can be rematerialized.
1665        // It must not be deleted.
1666        CanDelete = false;
1667        // Need a stack slot if there is any live range where uses cannot be
1668        // rematerialized.
1669        NeedStackSlot = true;
1670      }
1671      if (CanDelete)
1672        ReMatDelete.set(VN);
1673    } else {
1674      // Need a stack slot if there is any live range where uses cannot be
1675      // rematerialized.
1676      NeedStackSlot = true;
1677    }
1678  }
1679
1680  // One stack slot per live interval.
1681  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1682    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1683      Slot = vrm.assignVirt2StackSlot(li.reg);
1684
1685    // This case only occurs when the prealloc splitter has already assigned
1686    // a stack slot to this vreg.
1687    else
1688      Slot = vrm.getStackSlot(li.reg);
1689  }
1690
1691  // Create new intervals and rewrite defs and uses.
1692  for (LiveInterval::Ranges::const_iterator
1693         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1694    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1695    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1696    bool DefIsReMat = ReMatDefMI != NULL;
1697    bool CanDelete = ReMatDelete[I->valno->id];
1698    int LdSlot = 0;
1699    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1700    bool isLoad = isLoadSS ||
1701      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1702    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1703                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1704                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1705                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1706                               MBBVRegsMap, NewLIs);
1707  }
1708
1709  // Insert spills / restores if we are splitting.
1710  if (!TrySplit) {
1711    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1712    normalizeSpillWeights(NewLIs);
1713    return NewLIs;
1714  }
1715
1716  SmallPtrSet<LiveInterval*, 4> AddedKill;
1717  SmallVector<unsigned, 2> Ops;
1718  if (NeedStackSlot) {
1719    int Id = SpillMBBs.find_first();
1720    while (Id != -1) {
1721      std::vector<SRInfo> &spills = SpillIdxes[Id];
1722      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1723        SlotIndex index = spills[i].index;
1724        unsigned VReg = spills[i].vreg;
1725        LiveInterval &nI = getOrCreateInterval(VReg);
1726        bool isReMat = vrm.isReMaterialized(VReg);
1727        MachineInstr *MI = getInstructionFromIndex(index);
1728        bool CanFold = false;
1729        bool FoundUse = false;
1730        Ops.clear();
1731        if (spills[i].canFold) {
1732          CanFold = true;
1733          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1734            MachineOperand &MO = MI->getOperand(j);
1735            if (!MO.isReg() || MO.getReg() != VReg)
1736              continue;
1737
1738            Ops.push_back(j);
1739            if (MO.isDef())
1740              continue;
1741            if (isReMat ||
1742                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1743                                                RestoreMBBs, RestoreIdxes))) {
1744              // MI has two-address uses of the same register. If the use
1745              // isn't the first and only use in the BB, then we can't fold
1746              // it. FIXME: Move this to rewriteInstructionsForSpills.
1747              CanFold = false;
1748              break;
1749            }
1750            FoundUse = true;
1751          }
1752        }
1753        // Fold the store into the def if possible.
1754        bool Folded = false;
1755        if (CanFold && !Ops.empty()) {
1756          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1757            Folded = true;
1758            if (FoundUse) {
1759              // Also folded uses, do not issue a load.
1760              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1761              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1762            }
1763            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1764          }
1765        }
1766
1767        // Otherwise tell the spiller to issue a spill.
1768        if (!Folded) {
1769          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1770          bool isKill = LR->end == index.getStoreIndex();
1771          if (!MI->registerDefIsDead(nI.reg))
1772            // No need to spill a dead def.
1773            vrm.addSpillPoint(VReg, isKill, MI);
1774          if (isKill)
1775            AddedKill.insert(&nI);
1776        }
1777      }
1778      Id = SpillMBBs.find_next(Id);
1779    }
1780  }
1781
1782  int Id = RestoreMBBs.find_first();
1783  while (Id != -1) {
1784    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1785    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1786      SlotIndex index = restores[i].index;
1787      if (index == SlotIndex())
1788        continue;
1789      unsigned VReg = restores[i].vreg;
1790      LiveInterval &nI = getOrCreateInterval(VReg);
1791      bool isReMat = vrm.isReMaterialized(VReg);
1792      MachineInstr *MI = getInstructionFromIndex(index);
1793      bool CanFold = false;
1794      Ops.clear();
1795      if (restores[i].canFold) {
1796        CanFold = true;
1797        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1798          MachineOperand &MO = MI->getOperand(j);
1799          if (!MO.isReg() || MO.getReg() != VReg)
1800            continue;
1801
1802          if (MO.isDef()) {
1803            // If this restore were to be folded, it would have been folded
1804            // already.
1805            CanFold = false;
1806            break;
1807          }
1808          Ops.push_back(j);
1809        }
1810      }
1811
1812      // Fold the load into the use if possible.
1813      bool Folded = false;
1814      if (CanFold && !Ops.empty()) {
1815        if (!isReMat)
1816          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1817        else {
1818          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1819          int LdSlot = 0;
1820          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1821          // If the rematerializable def is a load, also try to fold it.
1822          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1823            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1824                                          Ops, isLoadSS, LdSlot, VReg);
1825          if (!Folded) {
1826            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1827            if (ImpUse) {
1828              // Re-matting an instruction with virtual register use. Add the
1829              // register as an implicit use on the use MI and mark the register
1830              // interval as unspillable.
1831              LiveInterval &ImpLi = getInterval(ImpUse);
1832              ImpLi.markNotSpillable();
1833              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1834            }
1835          }
1836        }
1837      }
1838      // If folding is not possible / failed, then tell the spiller to issue a
1839      // load / rematerialization for us.
1840      if (Folded)
1841        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1842      else
1843        vrm.addRestorePoint(VReg, MI);
1844    }
1845    Id = RestoreMBBs.find_next(Id);
1846  }
1847
1848  // Finalize intervals: add kills, finalize spill weights, and filter out
1849  // dead intervals.
1850  std::vector<LiveInterval*> RetNewLIs;
1851  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1852    LiveInterval *LI = NewLIs[i];
1853    if (!LI->empty()) {
1854      if (!AddedKill.count(LI)) {
1855        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1856        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1857        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1858        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1859        assert(UseIdx != -1);
1860        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1861          LastUse->getOperand(UseIdx).setIsKill();
1862          vrm.addKillPoint(LI->reg, LastUseIdx);
1863        }
1864      }
1865      RetNewLIs.push_back(LI);
1866    }
1867  }
1868
1869  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1870  normalizeSpillWeights(RetNewLIs);
1871  return RetNewLIs;
1872}
1873
1874/// hasAllocatableSuperReg - Return true if the specified physical register has
1875/// any super register that's allocatable.
1876bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1877  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1878    if (allocatableRegs_[*AS] && hasInterval(*AS))
1879      return true;
1880  return false;
1881}
1882
1883/// getRepresentativeReg - Find the largest super register of the specified
1884/// physical register.
1885unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1886  // Find the largest super-register that is allocatable.
1887  unsigned BestReg = Reg;
1888  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1889    unsigned SuperReg = *AS;
1890    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1891      BestReg = SuperReg;
1892      break;
1893    }
1894  }
1895  return BestReg;
1896}
1897
1898/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1899/// specified interval that conflicts with the specified physical register.
1900unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1901                                                   unsigned PhysReg) const {
1902  unsigned NumConflicts = 0;
1903  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1904  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1905         E = mri_->reg_end(); I != E; ++I) {
1906    MachineOperand &O = I.getOperand();
1907    MachineInstr *MI = O.getParent();
1908    if (MI->isDebugValue())
1909      continue;
1910    SlotIndex Index = getInstructionIndex(MI);
1911    if (pli.liveAt(Index))
1912      ++NumConflicts;
1913  }
1914  return NumConflicts;
1915}
1916
1917/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1918/// around all defs and uses of the specified interval. Return true if it
1919/// was able to cut its interval.
1920bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1921                                            unsigned PhysReg, VirtRegMap &vrm) {
1922  unsigned SpillReg = getRepresentativeReg(PhysReg);
1923
1924  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1925    // If there are registers which alias PhysReg, but which are not a
1926    // sub-register of the chosen representative super register. Assert
1927    // since we can't handle it yet.
1928    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1929           tri_->isSuperRegister(*AS, SpillReg));
1930
1931  bool Cut = false;
1932  SmallVector<unsigned, 4> PRegs;
1933  if (hasInterval(SpillReg))
1934    PRegs.push_back(SpillReg);
1935  else {
1936    SmallSet<unsigned, 4> Added;
1937    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1938      if (Added.insert(*AS) && hasInterval(*AS)) {
1939        PRegs.push_back(*AS);
1940        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1941          Added.insert(*ASS);
1942      }
1943  }
1944
1945  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1946  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1947         E = mri_->reg_end(); I != E; ++I) {
1948    MachineOperand &O = I.getOperand();
1949    MachineInstr *MI = O.getParent();
1950    if (MI->isDebugValue() || SeenMIs.count(MI))
1951      continue;
1952    SeenMIs.insert(MI);
1953    SlotIndex Index = getInstructionIndex(MI);
1954    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1955      unsigned PReg = PRegs[i];
1956      LiveInterval &pli = getInterval(PReg);
1957      if (!pli.liveAt(Index))
1958        continue;
1959      vrm.addEmergencySpill(PReg, MI);
1960      SlotIndex StartIdx = Index.getLoadIndex();
1961      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1962      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1963        pli.removeRange(StartIdx, EndIdx);
1964        Cut = true;
1965      } else {
1966        std::string msg;
1967        raw_string_ostream Msg(msg);
1968        Msg << "Ran out of registers during register allocation!";
1969        if (MI->isInlineAsm()) {
1970          Msg << "\nPlease check your inline asm statement for invalid "
1971              << "constraints:\n";
1972          MI->print(Msg, tm_);
1973        }
1974        report_fatal_error(Msg.str());
1975      }
1976      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1977        if (!hasInterval(*AS))
1978          continue;
1979        LiveInterval &spli = getInterval(*AS);
1980        if (spli.liveAt(Index))
1981          spli.removeRange(Index.getLoadIndex(),
1982                           Index.getNextIndex().getBaseIndex());
1983      }
1984    }
1985  }
1986  return Cut;
1987}
1988
1989LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1990                                                  MachineInstr* startInst) {
1991  LiveInterval& Interval = getOrCreateInterval(reg);
1992  VNInfo* VN = Interval.getNextValue(
1993    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1994    startInst, getVNInfoAllocator());
1995  VN->setHasPHIKill(true);
1996  LiveRange LR(
1997     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1998     getMBBEndIdx(startInst->getParent()), VN);
1999  Interval.addRange(LR);
2000
2001  return LR;
2002}
2003
2004