LiveIntervalAnalysis.cpp revision 0d8ccaa5c8db820b5b93f37e51563148c57ba6b8
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/CalcSpillWeights.h"
24#include "llvm/CodeGen/LiveVariables.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineInstr.h"
27#include "llvm/CodeGen/MachineInstrBuilder.h"
28#include "llvm/CodeGen/MachineLoopInfo.h"
29#include "llvm/CodeGen/MachineMemOperand.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/Passes.h"
32#include "llvm/CodeGen/ProcessImplicitDefs.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetOptions.h"
37#include "llvm/Support/CommandLine.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/raw_ostream.h"
41#include "llvm/ADT/DepthFirstIterator.h"
42#include "llvm/ADT/SmallSet.h"
43#include "llvm/ADT/Statistic.h"
44#include "llvm/ADT/STLExtras.h"
45#include <algorithm>
46#include <limits>
47#include <cmath>
48using namespace llvm;
49
50// Hidden options for help debugging.
51static cl::opt<bool> DisableReMat("disable-rematerialization",
52                                  cl::init(false), cl::Hidden);
53
54STATISTIC(numIntervals , "Number of original intervals");
55STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
56STATISTIC(numSplits    , "Number of intervals split");
57
58char LiveIntervals::ID = 0;
59INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60                "Live Interval Analysis", false, false)
61INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69                "Live Interval Analysis", false, false)
70
71void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72  AU.setPreservesCFG();
73  AU.addRequired<AliasAnalysis>();
74  AU.addPreserved<AliasAnalysis>();
75  AU.addRequired<LiveVariables>();
76  AU.addPreserved<LiveVariables>();
77  AU.addRequired<MachineLoopInfo>();
78  AU.addPreserved<MachineLoopInfo>();
79  AU.addPreservedID(MachineDominatorsID);
80
81  if (!StrongPHIElim) {
82    AU.addPreservedID(PHIEliminationID);
83    AU.addRequiredID(PHIEliminationID);
84  }
85
86  AU.addRequiredID(TwoAddressInstructionPassID);
87  AU.addPreserved<ProcessImplicitDefs>();
88  AU.addRequired<ProcessImplicitDefs>();
89  AU.addPreserved<SlotIndexes>();
90  AU.addRequiredTransitive<SlotIndexes>();
91  MachineFunctionPass::getAnalysisUsage(AU);
92}
93
94void LiveIntervals::releaseMemory() {
95  // Free the live intervals themselves.
96  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97       E = r2iMap_.end(); I != E; ++I)
98    delete I->second;
99
100  r2iMap_.clear();
101
102  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103  VNInfoAllocator.Reset();
104  while (!CloneMIs.empty()) {
105    MachineInstr *MI = CloneMIs.back();
106    CloneMIs.pop_back();
107    mf_->DeleteMachineInstr(MI);
108  }
109}
110
111/// runOnMachineFunction - Register allocate the whole function
112///
113bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114  mf_ = &fn;
115  mri_ = &mf_->getRegInfo();
116  tm_ = &fn.getTarget();
117  tri_ = tm_->getRegisterInfo();
118  tii_ = tm_->getInstrInfo();
119  aa_ = &getAnalysis<AliasAnalysis>();
120  lv_ = &getAnalysis<LiveVariables>();
121  indexes_ = &getAnalysis<SlotIndexes>();
122  allocatableRegs_ = tri_->getAllocatableSet(fn);
123
124  computeIntervals();
125
126  numIntervals += getNumIntervals();
127
128  DEBUG(dump());
129  return true;
130}
131
132/// print - Implement the dump method.
133void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134  OS << "********** INTERVALS **********\n";
135  for (const_iterator I = begin(), E = end(); I != E; ++I) {
136    I->second->print(OS, tri_);
137    OS << "\n";
138  }
139
140  printInstrs(OS);
141}
142
143void LiveIntervals::printInstrs(raw_ostream &OS) const {
144  OS << "********** MACHINEINSTRS **********\n";
145  mf_->print(OS, indexes_);
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(dbgs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    if (MI.isCopy())
192      if (MI.getOperand(0).getReg() == li.reg ||
193          MI.getOperand(1).getReg() == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220  for (LiveInterval::Ranges::const_iterator
221         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222    for (SlotIndex index = I->start.getBaseIndex(),
223           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224           index != end;
225           index = index.getNextIndex()) {
226      MachineInstr *MI = getInstructionFromIndex(index);
227      if (!MI)
228        continue;               // skip deleted instructions
229
230      if (JoinedCopies.count(MI))
231        continue;
232      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233        MachineOperand& MO = MI->getOperand(i);
234        if (!MO.isReg())
235          continue;
236        unsigned PhysReg = MO.getReg();
237        if (PhysReg == 0 || PhysReg == Reg ||
238            TargetRegisterInfo::isVirtualRegister(PhysReg))
239          continue;
240        if (tri_->regsOverlap(Reg, PhysReg))
241          return true;
242      }
243    }
244  }
245
246  return false;
247}
248
249static
250bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251  unsigned Reg = MI.getOperand(MOIdx).getReg();
252  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253    const MachineOperand &MO = MI.getOperand(i);
254    if (!MO.isReg())
255      continue;
256    if (MO.getReg() == Reg && MO.isDef()) {
257      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258             MI.getOperand(MOIdx).getSubReg() &&
259             (MO.getSubReg() || MO.isImplicit()));
260      return true;
261    }
262  }
263  return false;
264}
265
266/// isPartialRedef - Return true if the specified def at the specific index is
267/// partially re-defining the specified live interval. A common case of this is
268/// a definition of the sub-register.
269bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270                                   LiveInterval &interval) {
271  if (!MO.getSubReg() || MO.isEarlyClobber())
272    return false;
273
274  SlotIndex RedefIndex = MIIdx.getDefIndex();
275  const LiveRange *OldLR =
276    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278  if (DefMI != 0) {
279    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
280  }
281  return false;
282}
283
284void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285                                             MachineBasicBlock::iterator mi,
286                                             SlotIndex MIIdx,
287                                             MachineOperand& MO,
288                                             unsigned MOIdx,
289                                             LiveInterval &interval) {
290  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291
292  // Virtual registers may be defined multiple times (due to phi
293  // elimination and 2-addr elimination).  Much of what we do only has to be
294  // done once for the vreg.  We use an empty interval to detect the first
295  // time we see a vreg.
296  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297  if (interval.empty()) {
298    // Get the Idx of the defining instructions.
299    SlotIndex defIndex = MIIdx.getDefIndex();
300    // Earlyclobbers move back one, so that they overlap the live range
301    // of inputs.
302    if (MO.isEarlyClobber())
303      defIndex = MIIdx.getUseIndex();
304
305    // Make sure the first definition is not a partial redefinition. Add an
306    // <imp-def> of the full register.
307    if (MO.getSubReg())
308      mi->addRegisterDefined(interval.reg);
309
310    MachineInstr *CopyMI = NULL;
311    if (mi->isCopyLike()) {
312      CopyMI = mi;
313    }
314
315    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316    assert(ValNo->id == 0 && "First value in interval is not 0?");
317
318    // Loop over all of the blocks that the vreg is defined in.  There are
319    // two cases we have to handle here.  The most common case is a vreg
320    // whose lifetime is contained within a basic block.  In this case there
321    // will be a single kill, in MBB, which comes after the definition.
322    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323      // FIXME: what about dead vars?
324      SlotIndex killIdx;
325      if (vi.Kills[0] != mi)
326        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327      else
328        killIdx = defIndex.getStoreIndex();
329
330      // If the kill happens after the definition, we have an intra-block
331      // live range.
332      if (killIdx > defIndex) {
333        assert(vi.AliveBlocks.empty() &&
334               "Shouldn't be alive across any blocks!");
335        LiveRange LR(defIndex, killIdx, ValNo);
336        interval.addRange(LR);
337        DEBUG(dbgs() << " +" << LR << "\n");
338        return;
339      }
340    }
341
342    // The other case we handle is when a virtual register lives to the end
343    // of the defining block, potentially live across some blocks, then is
344    // live into some number of blocks, but gets killed.  Start by adding a
345    // range that goes from this definition to the end of the defining block.
346    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347    DEBUG(dbgs() << " +" << NewLR);
348    interval.addRange(NewLR);
349
350    bool PHIJoin = lv_->isPHIJoin(interval.reg);
351
352    if (PHIJoin) {
353      // A phi join register is killed at the end of the MBB and revived as a new
354      // valno in the killing blocks.
355      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356      DEBUG(dbgs() << " phi-join");
357      ValNo->setHasPHIKill(true);
358    } else {
359      // Iterate over all of the blocks that the variable is completely
360      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361      // live interval.
362      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363               E = vi.AliveBlocks.end(); I != E; ++I) {
364        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366        interval.addRange(LR);
367        DEBUG(dbgs() << " +" << LR);
368      }
369    }
370
371    // Finally, this virtual register is live from the start of any killing
372    // block to the 'use' slot of the killing instruction.
373    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374      MachineInstr *Kill = vi.Kills[i];
375      SlotIndex Start = getMBBStartIdx(Kill->getParent());
376      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377
378      // Create interval with one of a NEW value number.  Note that this value
379      // number isn't actually defined by an instruction, weird huh? :)
380      if (PHIJoin) {
381        assert(getInstructionFromIndex(Start) == 0 &&
382               "PHI def index points at actual instruction.");
383        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384        ValNo->setIsPHIDef(true);
385      }
386      LiveRange LR(Start, killIdx, ValNo);
387      interval.addRange(LR);
388      DEBUG(dbgs() << " +" << LR);
389    }
390
391  } else {
392    if (MultipleDefsBySameMI(*mi, MOIdx))
393      // Multiple defs of the same virtual register by the same instruction.
394      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395      // This is likely due to elimination of REG_SEQUENCE instructions. Return
396      // here since there is nothing to do.
397      return;
398
399    // If this is the second time we see a virtual register definition, it
400    // must be due to phi elimination or two addr elimination.  If this is
401    // the result of two address elimination, then the vreg is one of the
402    // def-and-use register operand.
403
404    // It may also be partial redef like this:
405    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409      // If this is a two-address definition, then we have already processed
410      // the live range.  The only problem is that we didn't realize there
411      // are actually two values in the live interval.  Because of this we
412      // need to take the LiveRegion that defines this register and split it
413      // into two values.
414      SlotIndex RedefIndex = MIIdx.getDefIndex();
415      if (MO.isEarlyClobber())
416        RedefIndex = MIIdx.getUseIndex();
417
418      const LiveRange *OldLR =
419        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420      VNInfo *OldValNo = OldLR->valno;
421      SlotIndex DefIndex = OldValNo->def.getDefIndex();
422
423      // Delete the previous value, which should be short and continuous,
424      // because the 2-addr copy must be in the same MBB as the redef.
425      interval.removeRange(DefIndex, RedefIndex);
426
427      // The new value number (#1) is defined by the instruction we claimed
428      // defined value #0.
429      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430
431      // Value#0 is now defined by the 2-addr instruction.
432      OldValNo->def  = RedefIndex;
433      OldValNo->setCopy(0);
434
435      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436      if (PartReDef && mi->isCopyLike())
437        OldValNo->setCopy(&*mi);
438
439      // Add the new live interval which replaces the range for the input copy.
440      LiveRange LR(DefIndex, RedefIndex, ValNo);
441      DEBUG(dbgs() << " replace range with " << LR);
442      interval.addRange(LR);
443
444      // If this redefinition is dead, we need to add a dummy unit live
445      // range covering the def slot.
446      if (MO.isDead())
447        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
448                                    OldValNo));
449
450      DEBUG({
451          dbgs() << " RESULT: ";
452          interval.print(dbgs(), tri_);
453        });
454    } else if (lv_->isPHIJoin(interval.reg)) {
455      // In the case of PHI elimination, each variable definition is only
456      // live until the end of the block.  We've already taken care of the
457      // rest of the live range.
458
459      SlotIndex defIndex = MIIdx.getDefIndex();
460      if (MO.isEarlyClobber())
461        defIndex = MIIdx.getUseIndex();
462
463      VNInfo *ValNo;
464      MachineInstr *CopyMI = NULL;
465      if (mi->isCopyLike())
466        CopyMI = mi;
467      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468
469      SlotIndex killIndex = getMBBEndIdx(mbb);
470      LiveRange LR(defIndex, killIndex, ValNo);
471      interval.addRange(LR);
472      ValNo->setHasPHIKill(true);
473      DEBUG(dbgs() << " phi-join +" << LR);
474    } else {
475      llvm_unreachable("Multiply defined register");
476    }
477  }
478
479  DEBUG(dbgs() << '\n');
480}
481
482void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483                                              MachineBasicBlock::iterator mi,
484                                              SlotIndex MIIdx,
485                                              MachineOperand& MO,
486                                              LiveInterval &interval,
487                                              MachineInstr *CopyMI) {
488  // A physical register cannot be live across basic block, so its
489  // lifetime must end somewhere in its defining basic block.
490  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491
492  SlotIndex baseIndex = MIIdx;
493  SlotIndex start = baseIndex.getDefIndex();
494  // Earlyclobbers move back one.
495  if (MO.isEarlyClobber())
496    start = MIIdx.getUseIndex();
497  SlotIndex end = start;
498
499  // If it is not used after definition, it is considered dead at
500  // the instruction defining it. Hence its interval is:
501  // [defSlot(def), defSlot(def)+1)
502  // For earlyclobbers, the defSlot was pushed back one; the extra
503  // advance below compensates.
504  if (MO.isDead()) {
505    DEBUG(dbgs() << " dead");
506    end = start.getStoreIndex();
507    goto exit;
508  }
509
510  // If it is not dead on definition, it must be killed by a
511  // subsequent instruction. Hence its interval is:
512  // [defSlot(def), useSlot(kill)+1)
513  baseIndex = baseIndex.getNextIndex();
514  while (++mi != MBB->end()) {
515
516    if (mi->isDebugValue())
517      continue;
518    if (getInstructionFromIndex(baseIndex) == 0)
519      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520
521    if (mi->killsRegister(interval.reg, tri_)) {
522      DEBUG(dbgs() << " killed");
523      end = baseIndex.getDefIndex();
524      goto exit;
525    } else {
526      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527      if (DefIdx != -1) {
528        if (mi->isRegTiedToUseOperand(DefIdx)) {
529          // Two-address instruction.
530          end = baseIndex.getDefIndex();
531        } else {
532          // Another instruction redefines the register before it is ever read.
533          // Then the register is essentially dead at the instruction that
534          // defines it. Hence its interval is:
535          // [defSlot(def), defSlot(def)+1)
536          DEBUG(dbgs() << " dead");
537          end = start.getStoreIndex();
538        }
539        goto exit;
540      }
541    }
542
543    baseIndex = baseIndex.getNextIndex();
544  }
545
546  // The only case we should have a dead physreg here without a killing or
547  // instruction where we know it's dead is if it is live-in to the function
548  // and never used. Another possible case is the implicit use of the
549  // physical register has been deleted by two-address pass.
550  end = start.getStoreIndex();
551
552exit:
553  assert(start < end && "did not find end of interval?");
554
555  // Already exists? Extend old live interval.
556  VNInfo *ValNo = interval.getVNInfoAt(start);
557  bool Extend = ValNo != 0;
558  if (!Extend)
559    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560  if (Extend && MO.isEarlyClobber())
561    ValNo->setHasRedefByEC(true);
562  LiveRange LR(start, end, ValNo);
563  interval.addRange(LR);
564  DEBUG(dbgs() << " +" << LR << '\n');
565}
566
567void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568                                      MachineBasicBlock::iterator MI,
569                                      SlotIndex MIIdx,
570                                      MachineOperand& MO,
571                                      unsigned MOIdx) {
572  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574                             getOrCreateInterval(MO.getReg()));
575  else if (allocatableRegs_[MO.getReg()]) {
576    MachineInstr *CopyMI = NULL;
577    if (MI->isCopyLike())
578      CopyMI = MI;
579    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580                              getOrCreateInterval(MO.getReg()), CopyMI);
581    // Def of a register also defines its sub-registers.
582    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583      // If MI also modifies the sub-register explicitly, avoid processing it
584      // more than once. Do not pass in TRI here so it checks for exact match.
585      if (!MI->definesRegister(*AS))
586        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                                  getOrCreateInterval(*AS), 0);
588  }
589}
590
591void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592                                         SlotIndex MIIdx,
593                                         LiveInterval &interval, bool isAlias) {
594  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595
596  // Look for kills, if it reaches a def before it's killed, then it shouldn't
597  // be considered a livein.
598  MachineBasicBlock::iterator mi = MBB->begin();
599  MachineBasicBlock::iterator E = MBB->end();
600  // Skip over DBG_VALUE at the start of the MBB.
601  if (mi != E && mi->isDebugValue()) {
602    while (++mi != E && mi->isDebugValue())
603      ;
604    if (mi == E)
605      // MBB is empty except for DBG_VALUE's.
606      return;
607  }
608
609  SlotIndex baseIndex = MIIdx;
610  SlotIndex start = baseIndex;
611  if (getInstructionFromIndex(baseIndex) == 0)
612    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613
614  SlotIndex end = baseIndex;
615  bool SeenDefUse = false;
616
617  while (mi != E) {
618    if (mi->killsRegister(interval.reg, tri_)) {
619      DEBUG(dbgs() << " killed");
620      end = baseIndex.getDefIndex();
621      SeenDefUse = true;
622      break;
623    } else if (mi->definesRegister(interval.reg, tri_)) {
624      // Another instruction redefines the register before it is ever read.
625      // Then the register is essentially dead at the instruction that defines
626      // it. Hence its interval is:
627      // [defSlot(def), defSlot(def)+1)
628      DEBUG(dbgs() << " dead");
629      end = start.getStoreIndex();
630      SeenDefUse = true;
631      break;
632    }
633
634    while (++mi != E && mi->isDebugValue())
635      // Skip over DBG_VALUE.
636      ;
637    if (mi != E)
638      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
639  }
640
641  // Live-in register might not be used at all.
642  if (!SeenDefUse) {
643    if (isAlias) {
644      DEBUG(dbgs() << " dead");
645      end = MIIdx.getStoreIndex();
646    } else {
647      DEBUG(dbgs() << " live through");
648      end = baseIndex;
649    }
650  }
651
652  SlotIndex defIdx = getMBBStartIdx(MBB);
653  assert(getInstructionFromIndex(defIdx) == 0 &&
654         "PHI def index points at actual instruction.");
655  VNInfo *vni =
656    interval.getNextValue(defIdx, 0, VNInfoAllocator);
657  vni->setIsPHIDef(true);
658  LiveRange LR(start, end, vni);
659
660  interval.addRange(LR);
661  DEBUG(dbgs() << " +" << LR << '\n');
662}
663
664/// computeIntervals - computes the live intervals for virtual
665/// registers. for some ordering of the machine instructions [1,N] a
666/// live interval is an interval [i, j) where 1 <= i <= j < N for
667/// which a variable is live
668void LiveIntervals::computeIntervals() {
669  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670               << "********** Function: "
671               << ((Value*)mf_->getFunction())->getName() << '\n');
672
673  SmallVector<unsigned, 8> UndefUses;
674  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675       MBBI != E; ++MBBI) {
676    MachineBasicBlock *MBB = MBBI;
677    if (MBB->empty())
678      continue;
679
680    // Track the index of the current machine instr.
681    SlotIndex MIIndex = getMBBStartIdx(MBB);
682    DEBUG(dbgs() << "BB#" << MBB->getNumber()
683          << ":\t\t# derived from " << MBB->getName() << "\n");
684
685    // Create intervals for live-ins to this BB first.
686    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687           LE = MBB->livein_end(); LI != LE; ++LI) {
688      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689      // Multiple live-ins can alias the same register.
690      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691        if (!hasInterval(*AS))
692          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
693                               true);
694    }
695
696    // Skip over empty initial indices.
697    if (getInstructionFromIndex(MIIndex) == 0)
698      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699
700    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701         MI != miEnd; ++MI) {
702      DEBUG(dbgs() << MIIndex << "\t" << *MI);
703      if (MI->isDebugValue())
704        continue;
705
706      // Handle defs.
707      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708        MachineOperand &MO = MI->getOperand(i);
709        if (!MO.isReg() || !MO.getReg())
710          continue;
711
712        // handle register defs - build intervals
713        if (MO.isDef())
714          handleRegisterDef(MBB, MI, MIIndex, MO, i);
715        else if (MO.isUndef())
716          UndefUses.push_back(MO.getReg());
717      }
718
719      // Move to the next instr slot.
720      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
721    }
722  }
723
724  // Create empty intervals for registers defined by implicit_def's (except
725  // for those implicit_def that define values which are liveout of their
726  // blocks.
727  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728    unsigned UndefReg = UndefUses[i];
729    (void)getOrCreateInterval(UndefReg);
730  }
731}
732
733LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735  return new LiveInterval(reg, Weight);
736}
737
738/// dupInterval - Duplicate a live interval. The caller is responsible for
739/// managing the allocated memory.
740LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741  LiveInterval *NewLI = createInterval(li->reg);
742  NewLI->Copy(*li, mri_, getVNInfoAllocator());
743  return NewLI;
744}
745
746/// shrinkToUses - After removing some uses of a register, shrink its live
747/// range to just the remaining uses. This method does not compute reaching
748/// defs for new uses, and it doesn't remove dead defs.
749void LiveIntervals::shrinkToUses(LiveInterval *li,
750                                 SmallVectorImpl<MachineInstr*> *dead) {
751  DEBUG(dbgs() << "Shrink: " << *li << '\n');
752  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
753         && "Can't only shrink physical registers");
754  // Find all the values used, including PHI kills.
755  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
756
757  // Visit all instructions reading li->reg.
758  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
759       MachineInstr *UseMI = I.skipInstruction();) {
760    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761      continue;
762    SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
763    VNInfo *VNI = li->getVNInfoAt(Idx);
764    assert(VNI && "Live interval not live into reading instruction");
765    if (VNI->def == Idx) {
766      // Special case: An early-clobber tied operand reads and writes the
767      // register one slot early.
768      Idx = Idx.getPrevSlot();
769      VNI = li->getVNInfoAt(Idx);
770      assert(VNI && "Early-clobber tied value not available");
771    }
772    WorkList.push_back(std::make_pair(Idx, VNI));
773  }
774
775  // Create a new live interval with only minimal live segments per def.
776  LiveInterval NewLI(li->reg, 0);
777  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
778       I != E; ++I) {
779    VNInfo *VNI = *I;
780    if (VNI->isUnused())
781      continue;
782    NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
783
784    // A use tied to an early-clobber def ends at the load slot and isn't caught
785    // above. Catch it here instead. This probably only ever happens for inline
786    // assembly.
787    if (VNI->def.isUse())
788      if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
789        WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
790  }
791
792  // Keep track of the PHIs that are in use.
793  SmallPtrSet<VNInfo*, 8> UsedPHIs;
794
795  // Extend intervals to reach all uses in WorkList.
796  while (!WorkList.empty()) {
797    SlotIndex Idx = WorkList.back().first;
798    VNInfo *VNI = WorkList.back().second;
799    WorkList.pop_back();
800    const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
801    SlotIndex BlockStart = getMBBStartIdx(MBB);
802
803    // Extend the live range for VNI to be live at Idx.
804    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
805      (void)ExtVNI;
806      assert(ExtVNI == VNI && "Unexpected existing value number");
807      // Is this a PHIDef we haven't seen before?
808      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
809        continue;
810      // The PHI is live, make sure the predecessors are live-out.
811      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
812           PE = MBB->pred_end(); PI != PE; ++PI) {
813        SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
814        VNInfo *PVNI = li->getVNInfoAt(Stop);
815        // A predecessor is not required to have a live-out value for a PHI.
816        if (PVNI) {
817          assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
818          WorkList.push_back(std::make_pair(Stop, PVNI));
819        }
820      }
821      continue;
822    }
823
824    // VNI is live-in to MBB.
825    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
826    NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
827
828    // Make sure VNI is live-out from the predecessors.
829    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
830         PE = MBB->pred_end(); PI != PE; ++PI) {
831      SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
832      assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
833      WorkList.push_back(std::make_pair(Stop, VNI));
834    }
835  }
836
837  // Handle dead values.
838  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
839       I != E; ++I) {
840    VNInfo *VNI = *I;
841    if (VNI->isUnused())
842      continue;
843    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
844    assert(LII != NewLI.end() && "Missing live range for PHI");
845    if (LII->end != VNI->def.getNextSlot())
846      continue;
847    if (VNI->isPHIDef()) {
848      // This is a dead PHI. Remove it.
849      VNI->setIsUnused(true);
850      NewLI.removeRange(*LII);
851    } else {
852      // This is a dead def. Make sure the instruction knows.
853      MachineInstr *MI = getInstructionFromIndex(VNI->def);
854      assert(MI && "No instruction defining live value");
855      MI->addRegisterDead(li->reg, tri_);
856      if (dead && MI->allDefsAreDead()) {
857        DEBUG(dbgs() << "All defs dead: " << *MI);
858        dead->push_back(MI);
859      }
860    }
861  }
862
863  // Move the trimmed ranges back.
864  li->ranges.swap(NewLI.ranges);
865  DEBUG(dbgs() << "Shrink: " << *li << '\n');
866}
867
868
869//===----------------------------------------------------------------------===//
870// Register allocator hooks.
871//
872
873MachineBasicBlock::iterator
874LiveIntervals::getLastSplitPoint(const LiveInterval &li,
875                                 MachineBasicBlock *mbb) const {
876  const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
877
878  // If li is not live into a landing pad, we can insert spill code before the
879  // first terminator.
880  if (!lpad || !isLiveInToMBB(li, lpad))
881    return mbb->getFirstTerminator();
882
883  // When there is a landing pad, spill code must go before the call instruction
884  // that can throw.
885  MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
886  while (I != B) {
887    --I;
888    if (I->getDesc().isCall())
889      return I;
890  }
891  // The block contains no calls that can throw, so use the first terminator.
892  return mbb->getFirstTerminator();
893}
894
895void LiveIntervals::addKillFlags() {
896  for (iterator I = begin(), E = end(); I != E; ++I) {
897    unsigned Reg = I->first;
898    if (TargetRegisterInfo::isPhysicalRegister(Reg))
899      continue;
900    if (mri_->reg_nodbg_empty(Reg))
901      continue;
902    LiveInterval *LI = I->second;
903
904    // Every instruction that kills Reg corresponds to a live range end point.
905    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
906         ++RI) {
907      // A LOAD index indicates an MBB edge.
908      if (RI->end.isLoad())
909        continue;
910      MachineInstr *MI = getInstructionFromIndex(RI->end);
911      if (!MI)
912        continue;
913      MI->addRegisterKilled(Reg, NULL);
914    }
915  }
916}
917
918/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
919/// allow one) virtual register operand, then its uses are implicitly using
920/// the register. Returns the virtual register.
921unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
922                                            MachineInstr *MI) const {
923  unsigned RegOp = 0;
924  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
925    MachineOperand &MO = MI->getOperand(i);
926    if (!MO.isReg() || !MO.isUse())
927      continue;
928    unsigned Reg = MO.getReg();
929    if (Reg == 0 || Reg == li.reg)
930      continue;
931
932    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
933        !allocatableRegs_[Reg])
934      continue;
935    // FIXME: For now, only remat MI with at most one register operand.
936    assert(!RegOp &&
937           "Can't rematerialize instruction with multiple register operand!");
938    RegOp = MO.getReg();
939#ifndef NDEBUG
940    break;
941#endif
942  }
943  return RegOp;
944}
945
946/// isValNoAvailableAt - Return true if the val# of the specified interval
947/// which reaches the given instruction also reaches the specified use index.
948bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
949                                       SlotIndex UseIdx) const {
950  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
951  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
952}
953
954/// isReMaterializable - Returns true if the definition MI of the specified
955/// val# of the specified interval is re-materializable.
956bool
957LiveIntervals::isReMaterializable(const LiveInterval &li,
958                                  const VNInfo *ValNo, MachineInstr *MI,
959                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
960                                  bool &isLoad) {
961  if (DisableReMat)
962    return false;
963
964  if (!tii_->isTriviallyReMaterializable(MI, aa_))
965    return false;
966
967  // Target-specific code can mark an instruction as being rematerializable
968  // if it has one virtual reg use, though it had better be something like
969  // a PIC base register which is likely to be live everywhere.
970  unsigned ImpUse = getReMatImplicitUse(li, MI);
971  if (ImpUse) {
972    const LiveInterval &ImpLi = getInterval(ImpUse);
973    for (MachineRegisterInfo::use_nodbg_iterator
974           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
975         ri != re; ++ri) {
976      MachineInstr *UseMI = &*ri;
977      SlotIndex UseIdx = getInstructionIndex(UseMI);
978      if (li.getVNInfoAt(UseIdx) != ValNo)
979        continue;
980      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
981        return false;
982    }
983
984    // If a register operand of the re-materialized instruction is going to
985    // be spilled next, then it's not legal to re-materialize this instruction.
986    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
987      if (ImpUse == SpillIs[i]->reg)
988        return false;
989  }
990  return true;
991}
992
993/// isReMaterializable - Returns true if the definition MI of the specified
994/// val# of the specified interval is re-materializable.
995bool LiveIntervals::isReMaterializable(const LiveInterval &li,
996                                       const VNInfo *ValNo, MachineInstr *MI) {
997  SmallVector<LiveInterval*, 4> Dummy1;
998  bool Dummy2;
999  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1000}
1001
1002/// isReMaterializable - Returns true if every definition of MI of every
1003/// val# of the specified interval is re-materializable.
1004bool
1005LiveIntervals::isReMaterializable(const LiveInterval &li,
1006                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
1007                                  bool &isLoad) {
1008  isLoad = false;
1009  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1010       i != e; ++i) {
1011    const VNInfo *VNI = *i;
1012    if (VNI->isUnused())
1013      continue; // Dead val#.
1014    // Is the def for the val# rematerializable?
1015    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1016    if (!ReMatDefMI)
1017      return false;
1018    bool DefIsLoad = false;
1019    if (!ReMatDefMI ||
1020        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1021      return false;
1022    isLoad |= DefIsLoad;
1023  }
1024  return true;
1025}
1026
1027/// FilterFoldedOps - Filter out two-address use operands. Return
1028/// true if it finds any issue with the operands that ought to prevent
1029/// folding.
1030static bool FilterFoldedOps(MachineInstr *MI,
1031                            SmallVector<unsigned, 2> &Ops,
1032                            unsigned &MRInfo,
1033                            SmallVector<unsigned, 2> &FoldOps) {
1034  MRInfo = 0;
1035  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1036    unsigned OpIdx = Ops[i];
1037    MachineOperand &MO = MI->getOperand(OpIdx);
1038    // FIXME: fold subreg use.
1039    if (MO.getSubReg())
1040      return true;
1041    if (MO.isDef())
1042      MRInfo |= (unsigned)VirtRegMap::isMod;
1043    else {
1044      // Filter out two-address use operand(s).
1045      if (MI->isRegTiedToDefOperand(OpIdx)) {
1046        MRInfo = VirtRegMap::isModRef;
1047        continue;
1048      }
1049      MRInfo |= (unsigned)VirtRegMap::isRef;
1050    }
1051    FoldOps.push_back(OpIdx);
1052  }
1053  return false;
1054}
1055
1056
1057/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1058/// slot / to reg or any rematerialized load into ith operand of specified
1059/// MI. If it is successul, MI is updated with the newly created MI and
1060/// returns true.
1061bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1062                                         VirtRegMap &vrm, MachineInstr *DefMI,
1063                                         SlotIndex InstrIdx,
1064                                         SmallVector<unsigned, 2> &Ops,
1065                                         bool isSS, int Slot, unsigned Reg) {
1066  // If it is an implicit def instruction, just delete it.
1067  if (MI->isImplicitDef()) {
1068    RemoveMachineInstrFromMaps(MI);
1069    vrm.RemoveMachineInstrFromMaps(MI);
1070    MI->eraseFromParent();
1071    ++numFolds;
1072    return true;
1073  }
1074
1075  // Filter the list of operand indexes that are to be folded. Abort if
1076  // any operand will prevent folding.
1077  unsigned MRInfo = 0;
1078  SmallVector<unsigned, 2> FoldOps;
1079  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1080    return false;
1081
1082  // The only time it's safe to fold into a two address instruction is when
1083  // it's folding reload and spill from / into a spill stack slot.
1084  if (DefMI && (MRInfo & VirtRegMap::isMod))
1085    return false;
1086
1087  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1088                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1089  if (fmi) {
1090    // Remember this instruction uses the spill slot.
1091    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1092
1093    // Attempt to fold the memory reference into the instruction. If
1094    // we can do this, we don't need to insert spill code.
1095    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1096      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1097    vrm.transferSpillPts(MI, fmi);
1098    vrm.transferRestorePts(MI, fmi);
1099    vrm.transferEmergencySpills(MI, fmi);
1100    ReplaceMachineInstrInMaps(MI, fmi);
1101    MI->eraseFromParent();
1102    MI = fmi;
1103    ++numFolds;
1104    return true;
1105  }
1106  return false;
1107}
1108
1109/// canFoldMemoryOperand - Returns true if the specified load / store
1110/// folding is possible.
1111bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1112                                         SmallVector<unsigned, 2> &Ops,
1113                                         bool ReMat) const {
1114  // Filter the list of operand indexes that are to be folded. Abort if
1115  // any operand will prevent folding.
1116  unsigned MRInfo = 0;
1117  SmallVector<unsigned, 2> FoldOps;
1118  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1119    return false;
1120
1121  // It's only legal to remat for a use, not a def.
1122  if (ReMat && (MRInfo & VirtRegMap::isMod))
1123    return false;
1124
1125  return tii_->canFoldMemoryOperand(MI, FoldOps);
1126}
1127
1128bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1129  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1130
1131  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1132
1133  if (mbb == 0)
1134    return false;
1135
1136  for (++itr; itr != li.ranges.end(); ++itr) {
1137    MachineBasicBlock *mbb2 =
1138      indexes_->getMBBCoveringRange(itr->start, itr->end);
1139
1140    if (mbb2 != mbb)
1141      return false;
1142  }
1143
1144  return true;
1145}
1146
1147/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1148/// interval on to-be re-materialized operands of MI) with new register.
1149void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1150                                       MachineInstr *MI, unsigned NewVReg,
1151                                       VirtRegMap &vrm) {
1152  // There is an implicit use. That means one of the other operand is
1153  // being remat'ed and the remat'ed instruction has li.reg as an
1154  // use operand. Make sure we rewrite that as well.
1155  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1156    MachineOperand &MO = MI->getOperand(i);
1157    if (!MO.isReg())
1158      continue;
1159    unsigned Reg = MO.getReg();
1160    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1161      continue;
1162    if (!vrm.isReMaterialized(Reg))
1163      continue;
1164    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1165    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1166    if (UseMO)
1167      UseMO->setReg(NewVReg);
1168  }
1169}
1170
1171/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1172/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1173bool LiveIntervals::
1174rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1175                 bool TrySplit, SlotIndex index, SlotIndex end,
1176                 MachineInstr *MI,
1177                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1178                 unsigned Slot, int LdSlot,
1179                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1180                 VirtRegMap &vrm,
1181                 const TargetRegisterClass* rc,
1182                 SmallVector<int, 4> &ReMatIds,
1183                 const MachineLoopInfo *loopInfo,
1184                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1185                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1186                 std::vector<LiveInterval*> &NewLIs) {
1187  bool CanFold = false;
1188 RestartInstruction:
1189  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1190    MachineOperand& mop = MI->getOperand(i);
1191    if (!mop.isReg())
1192      continue;
1193    unsigned Reg = mop.getReg();
1194    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1195      continue;
1196    if (Reg != li.reg)
1197      continue;
1198
1199    bool TryFold = !DefIsReMat;
1200    bool FoldSS = true; // Default behavior unless it's a remat.
1201    int FoldSlot = Slot;
1202    if (DefIsReMat) {
1203      // If this is the rematerializable definition MI itself and
1204      // all of its uses are rematerialized, simply delete it.
1205      if (MI == ReMatOrigDefMI && CanDelete) {
1206        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1207                     << *MI << '\n');
1208        RemoveMachineInstrFromMaps(MI);
1209        vrm.RemoveMachineInstrFromMaps(MI);
1210        MI->eraseFromParent();
1211        break;
1212      }
1213
1214      // If def for this use can't be rematerialized, then try folding.
1215      // If def is rematerializable and it's a load, also try folding.
1216      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1217      if (isLoad) {
1218        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1219        FoldSS = isLoadSS;
1220        FoldSlot = LdSlot;
1221      }
1222    }
1223
1224    // Scan all of the operands of this instruction rewriting operands
1225    // to use NewVReg instead of li.reg as appropriate.  We do this for
1226    // two reasons:
1227    //
1228    //   1. If the instr reads the same spilled vreg multiple times, we
1229    //      want to reuse the NewVReg.
1230    //   2. If the instr is a two-addr instruction, we are required to
1231    //      keep the src/dst regs pinned.
1232    //
1233    // Keep track of whether we replace a use and/or def so that we can
1234    // create the spill interval with the appropriate range.
1235    SmallVector<unsigned, 2> Ops;
1236    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1237
1238    // Create a new virtual register for the spill interval.
1239    // Create the new register now so we can map the fold instruction
1240    // to the new register so when it is unfolded we get the correct
1241    // answer.
1242    bool CreatedNewVReg = false;
1243    if (NewVReg == 0) {
1244      NewVReg = mri_->createVirtualRegister(rc);
1245      vrm.grow();
1246      CreatedNewVReg = true;
1247
1248      // The new virtual register should get the same allocation hints as the
1249      // old one.
1250      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1251      if (Hint.first || Hint.second)
1252        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1253    }
1254
1255    if (!TryFold)
1256      CanFold = false;
1257    else {
1258      // Do not fold load / store here if we are splitting. We'll find an
1259      // optimal point to insert a load / store later.
1260      if (!TrySplit) {
1261        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1262                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1263          // Folding the load/store can completely change the instruction in
1264          // unpredictable ways, rescan it from the beginning.
1265
1266          if (FoldSS) {
1267            // We need to give the new vreg the same stack slot as the
1268            // spilled interval.
1269            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1270          }
1271
1272          HasUse = false;
1273          HasDef = false;
1274          CanFold = false;
1275          if (isNotInMIMap(MI))
1276            break;
1277          goto RestartInstruction;
1278        }
1279      } else {
1280        // We'll try to fold it later if it's profitable.
1281        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1282      }
1283    }
1284
1285    mop.setReg(NewVReg);
1286    if (mop.isImplicit())
1287      rewriteImplicitOps(li, MI, NewVReg, vrm);
1288
1289    // Reuse NewVReg for other reads.
1290    bool HasEarlyClobber = false;
1291    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1292      MachineOperand &mopj = MI->getOperand(Ops[j]);
1293      mopj.setReg(NewVReg);
1294      if (mopj.isImplicit())
1295        rewriteImplicitOps(li, MI, NewVReg, vrm);
1296      if (mopj.isEarlyClobber())
1297        HasEarlyClobber = true;
1298    }
1299
1300    if (CreatedNewVReg) {
1301      if (DefIsReMat) {
1302        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1303        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1304          // Each valnum may have its own remat id.
1305          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1306        } else {
1307          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1308        }
1309        if (!CanDelete || (HasUse && HasDef)) {
1310          // If this is a two-addr instruction then its use operands are
1311          // rematerializable but its def is not. It should be assigned a
1312          // stack slot.
1313          vrm.assignVirt2StackSlot(NewVReg, Slot);
1314        }
1315      } else {
1316        vrm.assignVirt2StackSlot(NewVReg, Slot);
1317      }
1318    } else if (HasUse && HasDef &&
1319               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1320      // If this interval hasn't been assigned a stack slot (because earlier
1321      // def is a deleted remat def), do it now.
1322      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1323      vrm.assignVirt2StackSlot(NewVReg, Slot);
1324    }
1325
1326    // Re-matting an instruction with virtual register use. Add the
1327    // register as an implicit use on the use MI.
1328    if (DefIsReMat && ImpUse)
1329      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1330
1331    // Create a new register interval for this spill / remat.
1332    LiveInterval &nI = getOrCreateInterval(NewVReg);
1333    if (CreatedNewVReg) {
1334      NewLIs.push_back(&nI);
1335      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1336      if (TrySplit)
1337        vrm.setIsSplitFromReg(NewVReg, li.reg);
1338    }
1339
1340    if (HasUse) {
1341      if (CreatedNewVReg) {
1342        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1343                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1344        DEBUG(dbgs() << " +" << LR);
1345        nI.addRange(LR);
1346      } else {
1347        // Extend the split live interval to this def / use.
1348        SlotIndex End = index.getDefIndex();
1349        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1350                     nI.getValNumInfo(nI.getNumValNums()-1));
1351        DEBUG(dbgs() << " +" << LR);
1352        nI.addRange(LR);
1353      }
1354    }
1355    if (HasDef) {
1356      // An early clobber starts at the use slot, except for an early clobber
1357      // tied to a use operand (yes, that is a thing).
1358      LiveRange LR(HasEarlyClobber && !HasUse ?
1359                   index.getUseIndex() : index.getDefIndex(),
1360                   index.getStoreIndex(),
1361                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1362      DEBUG(dbgs() << " +" << LR);
1363      nI.addRange(LR);
1364    }
1365
1366    DEBUG({
1367        dbgs() << "\t\t\t\tAdded new interval: ";
1368        nI.print(dbgs(), tri_);
1369        dbgs() << '\n';
1370      });
1371  }
1372  return CanFold;
1373}
1374bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1375                                   const VNInfo *VNI,
1376                                   MachineBasicBlock *MBB,
1377                                   SlotIndex Idx) const {
1378  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1379}
1380
1381/// RewriteInfo - Keep track of machine instrs that will be rewritten
1382/// during spilling.
1383namespace {
1384  struct RewriteInfo {
1385    SlotIndex Index;
1386    MachineInstr *MI;
1387    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1388  };
1389
1390  struct RewriteInfoCompare {
1391    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1392      return LHS.Index < RHS.Index;
1393    }
1394  };
1395}
1396
1397void LiveIntervals::
1398rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1399                    LiveInterval::Ranges::const_iterator &I,
1400                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1401                    unsigned Slot, int LdSlot,
1402                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1403                    VirtRegMap &vrm,
1404                    const TargetRegisterClass* rc,
1405                    SmallVector<int, 4> &ReMatIds,
1406                    const MachineLoopInfo *loopInfo,
1407                    BitVector &SpillMBBs,
1408                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1409                    BitVector &RestoreMBBs,
1410                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1411                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1412                    std::vector<LiveInterval*> &NewLIs) {
1413  bool AllCanFold = true;
1414  unsigned NewVReg = 0;
1415  SlotIndex start = I->start.getBaseIndex();
1416  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1417
1418  // First collect all the def / use in this live range that will be rewritten.
1419  // Make sure they are sorted according to instruction index.
1420  std::vector<RewriteInfo> RewriteMIs;
1421  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1422         re = mri_->reg_end(); ri != re; ) {
1423    MachineInstr *MI = &*ri;
1424    MachineOperand &O = ri.getOperand();
1425    ++ri;
1426    if (MI->isDebugValue()) {
1427      // Modify DBG_VALUE now that the value is in a spill slot.
1428      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1429        uint64_t Offset = MI->getOperand(1).getImm();
1430        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1431        DebugLoc DL = MI->getDebugLoc();
1432        int FI = isLoadSS ? LdSlot : (int)Slot;
1433        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1434                                                           Offset, MDPtr, DL)) {
1435          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1436          ReplaceMachineInstrInMaps(MI, NewDV);
1437          MachineBasicBlock *MBB = MI->getParent();
1438          MBB->insert(MBB->erase(MI), NewDV);
1439          continue;
1440        }
1441      }
1442
1443      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1444      RemoveMachineInstrFromMaps(MI);
1445      vrm.RemoveMachineInstrFromMaps(MI);
1446      MI->eraseFromParent();
1447      continue;
1448    }
1449    assert(!(O.isImplicit() && O.isUse()) &&
1450           "Spilling register that's used as implicit use?");
1451    SlotIndex index = getInstructionIndex(MI);
1452    if (index < start || index >= end)
1453      continue;
1454
1455    if (O.isUndef())
1456      // Must be defined by an implicit def. It should not be spilled. Note,
1457      // this is for correctness reason. e.g.
1458      // 8   %reg1024<def> = IMPLICIT_DEF
1459      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1460      // The live range [12, 14) are not part of the r1024 live interval since
1461      // it's defined by an implicit def. It will not conflicts with live
1462      // interval of r1025. Now suppose both registers are spilled, you can
1463      // easily see a situation where both registers are reloaded before
1464      // the INSERT_SUBREG and both target registers that would overlap.
1465      continue;
1466    RewriteMIs.push_back(RewriteInfo(index, MI));
1467  }
1468  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1469
1470  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1471  // Now rewrite the defs and uses.
1472  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1473    RewriteInfo &rwi = RewriteMIs[i];
1474    ++i;
1475    SlotIndex index = rwi.Index;
1476    MachineInstr *MI = rwi.MI;
1477    // If MI def and/or use the same register multiple times, then there
1478    // are multiple entries.
1479    while (i != e && RewriteMIs[i].MI == MI) {
1480      assert(RewriteMIs[i].Index == index);
1481      ++i;
1482    }
1483    MachineBasicBlock *MBB = MI->getParent();
1484
1485    if (ImpUse && MI != ReMatDefMI) {
1486      // Re-matting an instruction with virtual register use. Prevent interval
1487      // from being spilled.
1488      getInterval(ImpUse).markNotSpillable();
1489    }
1490
1491    unsigned MBBId = MBB->getNumber();
1492    unsigned ThisVReg = 0;
1493    if (TrySplit) {
1494      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1495      if (NVI != MBBVRegsMap.end()) {
1496        ThisVReg = NVI->second;
1497        // One common case:
1498        // x = use
1499        // ...
1500        // ...
1501        // def = ...
1502        //     = use
1503        // It's better to start a new interval to avoid artifically
1504        // extend the new interval.
1505        if (MI->readsWritesVirtualRegister(li.reg) ==
1506            std::make_pair(false,true)) {
1507          MBBVRegsMap.erase(MBB->getNumber());
1508          ThisVReg = 0;
1509        }
1510      }
1511    }
1512
1513    bool IsNew = ThisVReg == 0;
1514    if (IsNew) {
1515      // This ends the previous live interval. If all of its def / use
1516      // can be folded, give it a low spill weight.
1517      if (NewVReg && TrySplit && AllCanFold) {
1518        LiveInterval &nI = getOrCreateInterval(NewVReg);
1519        nI.weight /= 10.0F;
1520      }
1521      AllCanFold = true;
1522    }
1523    NewVReg = ThisVReg;
1524
1525    bool HasDef = false;
1526    bool HasUse = false;
1527    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1528                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1529                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1530                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1531                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1532    if (!HasDef && !HasUse)
1533      continue;
1534
1535    AllCanFold &= CanFold;
1536
1537    // Update weight of spill interval.
1538    LiveInterval &nI = getOrCreateInterval(NewVReg);
1539    if (!TrySplit) {
1540      // The spill weight is now infinity as it cannot be spilled again.
1541      nI.markNotSpillable();
1542      continue;
1543    }
1544
1545    // Keep track of the last def and first use in each MBB.
1546    if (HasDef) {
1547      if (MI != ReMatOrigDefMI || !CanDelete) {
1548        bool HasKill = false;
1549        if (!HasUse)
1550          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1551        else {
1552          // If this is a two-address code, then this index starts a new VNInfo.
1553          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1554          if (VNI)
1555            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1556        }
1557        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1558          SpillIdxes.find(MBBId);
1559        if (!HasKill) {
1560          if (SII == SpillIdxes.end()) {
1561            std::vector<SRInfo> S;
1562            S.push_back(SRInfo(index, NewVReg, true));
1563            SpillIdxes.insert(std::make_pair(MBBId, S));
1564          } else if (SII->second.back().vreg != NewVReg) {
1565            SII->second.push_back(SRInfo(index, NewVReg, true));
1566          } else if (index > SII->second.back().index) {
1567            // If there is an earlier def and this is a two-address
1568            // instruction, then it's not possible to fold the store (which
1569            // would also fold the load).
1570            SRInfo &Info = SII->second.back();
1571            Info.index = index;
1572            Info.canFold = !HasUse;
1573          }
1574          SpillMBBs.set(MBBId);
1575        } else if (SII != SpillIdxes.end() &&
1576                   SII->second.back().vreg == NewVReg &&
1577                   index > SII->second.back().index) {
1578          // There is an earlier def that's not killed (must be two-address).
1579          // The spill is no longer needed.
1580          SII->second.pop_back();
1581          if (SII->second.empty()) {
1582            SpillIdxes.erase(MBBId);
1583            SpillMBBs.reset(MBBId);
1584          }
1585        }
1586      }
1587    }
1588
1589    if (HasUse) {
1590      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1591        SpillIdxes.find(MBBId);
1592      if (SII != SpillIdxes.end() &&
1593          SII->second.back().vreg == NewVReg &&
1594          index > SII->second.back().index)
1595        // Use(s) following the last def, it's not safe to fold the spill.
1596        SII->second.back().canFold = false;
1597      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1598        RestoreIdxes.find(MBBId);
1599      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1600        // If we are splitting live intervals, only fold if it's the first
1601        // use and there isn't another use later in the MBB.
1602        RII->second.back().canFold = false;
1603      else if (IsNew) {
1604        // Only need a reload if there isn't an earlier def / use.
1605        if (RII == RestoreIdxes.end()) {
1606          std::vector<SRInfo> Infos;
1607          Infos.push_back(SRInfo(index, NewVReg, true));
1608          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1609        } else {
1610          RII->second.push_back(SRInfo(index, NewVReg, true));
1611        }
1612        RestoreMBBs.set(MBBId);
1613      }
1614    }
1615
1616    // Update spill weight.
1617    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1618    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1619  }
1620
1621  if (NewVReg && TrySplit && AllCanFold) {
1622    // If all of its def / use can be folded, give it a low spill weight.
1623    LiveInterval &nI = getOrCreateInterval(NewVReg);
1624    nI.weight /= 10.0F;
1625  }
1626}
1627
1628bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1629                        unsigned vr, BitVector &RestoreMBBs,
1630                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1631  if (!RestoreMBBs[Id])
1632    return false;
1633  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1634  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1635    if (Restores[i].index == index &&
1636        Restores[i].vreg == vr &&
1637        Restores[i].canFold)
1638      return true;
1639  return false;
1640}
1641
1642void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1643                        unsigned vr, BitVector &RestoreMBBs,
1644                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1645  if (!RestoreMBBs[Id])
1646    return;
1647  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1648  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1649    if (Restores[i].index == index && Restores[i].vreg)
1650      Restores[i].index = SlotIndex();
1651}
1652
1653/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1654/// spilled and create empty intervals for their uses.
1655void
1656LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1657                                    const TargetRegisterClass* rc,
1658                                    std::vector<LiveInterval*> &NewLIs) {
1659  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1660         re = mri_->reg_end(); ri != re; ) {
1661    MachineOperand &O = ri.getOperand();
1662    MachineInstr *MI = &*ri;
1663    ++ri;
1664    if (MI->isDebugValue()) {
1665      // Remove debug info for now.
1666      O.setReg(0U);
1667      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1668      continue;
1669    }
1670    if (O.isDef()) {
1671      assert(MI->isImplicitDef() &&
1672             "Register def was not rewritten?");
1673      RemoveMachineInstrFromMaps(MI);
1674      vrm.RemoveMachineInstrFromMaps(MI);
1675      MI->eraseFromParent();
1676    } else {
1677      // This must be an use of an implicit_def so it's not part of the live
1678      // interval. Create a new empty live interval for it.
1679      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1680      unsigned NewVReg = mri_->createVirtualRegister(rc);
1681      vrm.grow();
1682      vrm.setIsImplicitlyDefined(NewVReg);
1683      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1684      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1685        MachineOperand &MO = MI->getOperand(i);
1686        if (MO.isReg() && MO.getReg() == li.reg) {
1687          MO.setReg(NewVReg);
1688          MO.setIsUndef();
1689        }
1690      }
1691    }
1692  }
1693}
1694
1695float
1696LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1697  // Limit the loop depth ridiculousness.
1698  if (loopDepth > 200)
1699    loopDepth = 200;
1700
1701  // The loop depth is used to roughly estimate the number of times the
1702  // instruction is executed. Something like 10^d is simple, but will quickly
1703  // overflow a float. This expression behaves like 10^d for small d, but is
1704  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1705  // headroom before overflow.
1706  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1707
1708  return (isDef + isUse) * lc;
1709}
1710
1711static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1712  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1713    NewLIs[i]->weight =
1714      normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1715}
1716
1717std::vector<LiveInterval*> LiveIntervals::
1718addIntervalsForSpills(const LiveInterval &li,
1719                      const SmallVectorImpl<LiveInterval*> &SpillIs,
1720                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1721  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1722
1723  DEBUG({
1724      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1725      li.print(dbgs(), tri_);
1726      dbgs() << '\n';
1727    });
1728
1729  // Each bit specify whether a spill is required in the MBB.
1730  BitVector SpillMBBs(mf_->getNumBlockIDs());
1731  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1732  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1733  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1734  DenseMap<unsigned,unsigned> MBBVRegsMap;
1735  std::vector<LiveInterval*> NewLIs;
1736  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1737
1738  unsigned NumValNums = li.getNumValNums();
1739  SmallVector<MachineInstr*, 4> ReMatDefs;
1740  ReMatDefs.resize(NumValNums, NULL);
1741  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1742  ReMatOrigDefs.resize(NumValNums, NULL);
1743  SmallVector<int, 4> ReMatIds;
1744  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1745  BitVector ReMatDelete(NumValNums);
1746  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1747
1748  // Spilling a split live interval. It cannot be split any further. Also,
1749  // it's also guaranteed to be a single val# / range interval.
1750  if (vrm.getPreSplitReg(li.reg)) {
1751    vrm.setIsSplitFromReg(li.reg, 0);
1752    // Unset the split kill marker on the last use.
1753    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1754    if (KillIdx != SlotIndex()) {
1755      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1756      assert(KillMI && "Last use disappeared?");
1757      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1758      assert(KillOp != -1 && "Last use disappeared?");
1759      KillMI->getOperand(KillOp).setIsKill(false);
1760    }
1761    vrm.removeKillPoint(li.reg);
1762    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1763    Slot = vrm.getStackSlot(li.reg);
1764    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1765    MachineInstr *ReMatDefMI = DefIsReMat ?
1766      vrm.getReMaterializedMI(li.reg) : NULL;
1767    int LdSlot = 0;
1768    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1769    bool isLoad = isLoadSS ||
1770      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1771    bool IsFirstRange = true;
1772    for (LiveInterval::Ranges::const_iterator
1773           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1774      // If this is a split live interval with multiple ranges, it means there
1775      // are two-address instructions that re-defined the value. Only the
1776      // first def can be rematerialized!
1777      if (IsFirstRange) {
1778        // Note ReMatOrigDefMI has already been deleted.
1779        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1780                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1781                             false, vrm, rc, ReMatIds, loopInfo,
1782                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1783                             MBBVRegsMap, NewLIs);
1784      } else {
1785        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1786                             Slot, 0, false, false, false,
1787                             false, vrm, rc, ReMatIds, loopInfo,
1788                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1789                             MBBVRegsMap, NewLIs);
1790      }
1791      IsFirstRange = false;
1792    }
1793
1794    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1795    normalizeSpillWeights(NewLIs);
1796    return NewLIs;
1797  }
1798
1799  bool TrySplit = !intervalIsInOneMBB(li);
1800  if (TrySplit)
1801    ++numSplits;
1802  bool NeedStackSlot = false;
1803  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1804       i != e; ++i) {
1805    const VNInfo *VNI = *i;
1806    unsigned VN = VNI->id;
1807    if (VNI->isUnused())
1808      continue; // Dead val#.
1809    // Is the def for the val# rematerializable?
1810    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1811    bool dummy;
1812    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1813      // Remember how to remat the def of this val#.
1814      ReMatOrigDefs[VN] = ReMatDefMI;
1815      // Original def may be modified so we have to make a copy here.
1816      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1817      CloneMIs.push_back(Clone);
1818      ReMatDefs[VN] = Clone;
1819
1820      bool CanDelete = true;
1821      if (VNI->hasPHIKill()) {
1822        // A kill is a phi node, not all of its uses can be rematerialized.
1823        // It must not be deleted.
1824        CanDelete = false;
1825        // Need a stack slot if there is any live range where uses cannot be
1826        // rematerialized.
1827        NeedStackSlot = true;
1828      }
1829      if (CanDelete)
1830        ReMatDelete.set(VN);
1831    } else {
1832      // Need a stack slot if there is any live range where uses cannot be
1833      // rematerialized.
1834      NeedStackSlot = true;
1835    }
1836  }
1837
1838  // One stack slot per live interval.
1839  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1840    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1841      Slot = vrm.assignVirt2StackSlot(li.reg);
1842
1843    // This case only occurs when the prealloc splitter has already assigned
1844    // a stack slot to this vreg.
1845    else
1846      Slot = vrm.getStackSlot(li.reg);
1847  }
1848
1849  // Create new intervals and rewrite defs and uses.
1850  for (LiveInterval::Ranges::const_iterator
1851         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1852    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1853    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1854    bool DefIsReMat = ReMatDefMI != NULL;
1855    bool CanDelete = ReMatDelete[I->valno->id];
1856    int LdSlot = 0;
1857    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1858    bool isLoad = isLoadSS ||
1859      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1860    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1861                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1862                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1863                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1864                               MBBVRegsMap, NewLIs);
1865  }
1866
1867  // Insert spills / restores if we are splitting.
1868  if (!TrySplit) {
1869    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1870    normalizeSpillWeights(NewLIs);
1871    return NewLIs;
1872  }
1873
1874  SmallPtrSet<LiveInterval*, 4> AddedKill;
1875  SmallVector<unsigned, 2> Ops;
1876  if (NeedStackSlot) {
1877    int Id = SpillMBBs.find_first();
1878    while (Id != -1) {
1879      std::vector<SRInfo> &spills = SpillIdxes[Id];
1880      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1881        SlotIndex index = spills[i].index;
1882        unsigned VReg = spills[i].vreg;
1883        LiveInterval &nI = getOrCreateInterval(VReg);
1884        bool isReMat = vrm.isReMaterialized(VReg);
1885        MachineInstr *MI = getInstructionFromIndex(index);
1886        bool CanFold = false;
1887        bool FoundUse = false;
1888        Ops.clear();
1889        if (spills[i].canFold) {
1890          CanFold = true;
1891          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1892            MachineOperand &MO = MI->getOperand(j);
1893            if (!MO.isReg() || MO.getReg() != VReg)
1894              continue;
1895
1896            Ops.push_back(j);
1897            if (MO.isDef())
1898              continue;
1899            if (isReMat ||
1900                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1901                                                RestoreMBBs, RestoreIdxes))) {
1902              // MI has two-address uses of the same register. If the use
1903              // isn't the first and only use in the BB, then we can't fold
1904              // it. FIXME: Move this to rewriteInstructionsForSpills.
1905              CanFold = false;
1906              break;
1907            }
1908            FoundUse = true;
1909          }
1910        }
1911        // Fold the store into the def if possible.
1912        bool Folded = false;
1913        if (CanFold && !Ops.empty()) {
1914          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1915            Folded = true;
1916            if (FoundUse) {
1917              // Also folded uses, do not issue a load.
1918              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1919              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1920            }
1921            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1922          }
1923        }
1924
1925        // Otherwise tell the spiller to issue a spill.
1926        if (!Folded) {
1927          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1928          bool isKill = LR->end == index.getStoreIndex();
1929          if (!MI->registerDefIsDead(nI.reg))
1930            // No need to spill a dead def.
1931            vrm.addSpillPoint(VReg, isKill, MI);
1932          if (isKill)
1933            AddedKill.insert(&nI);
1934        }
1935      }
1936      Id = SpillMBBs.find_next(Id);
1937    }
1938  }
1939
1940  int Id = RestoreMBBs.find_first();
1941  while (Id != -1) {
1942    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1943    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1944      SlotIndex index = restores[i].index;
1945      if (index == SlotIndex())
1946        continue;
1947      unsigned VReg = restores[i].vreg;
1948      LiveInterval &nI = getOrCreateInterval(VReg);
1949      bool isReMat = vrm.isReMaterialized(VReg);
1950      MachineInstr *MI = getInstructionFromIndex(index);
1951      bool CanFold = false;
1952      Ops.clear();
1953      if (restores[i].canFold) {
1954        CanFold = true;
1955        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1956          MachineOperand &MO = MI->getOperand(j);
1957          if (!MO.isReg() || MO.getReg() != VReg)
1958            continue;
1959
1960          if (MO.isDef()) {
1961            // If this restore were to be folded, it would have been folded
1962            // already.
1963            CanFold = false;
1964            break;
1965          }
1966          Ops.push_back(j);
1967        }
1968      }
1969
1970      // Fold the load into the use if possible.
1971      bool Folded = false;
1972      if (CanFold && !Ops.empty()) {
1973        if (!isReMat)
1974          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1975        else {
1976          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1977          int LdSlot = 0;
1978          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1979          // If the rematerializable def is a load, also try to fold it.
1980          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1981            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1982                                          Ops, isLoadSS, LdSlot, VReg);
1983          if (!Folded) {
1984            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1985            if (ImpUse) {
1986              // Re-matting an instruction with virtual register use. Add the
1987              // register as an implicit use on the use MI and mark the register
1988              // interval as unspillable.
1989              LiveInterval &ImpLi = getInterval(ImpUse);
1990              ImpLi.markNotSpillable();
1991              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1992            }
1993          }
1994        }
1995      }
1996      // If folding is not possible / failed, then tell the spiller to issue a
1997      // load / rematerialization for us.
1998      if (Folded)
1999        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2000      else
2001        vrm.addRestorePoint(VReg, MI);
2002    }
2003    Id = RestoreMBBs.find_next(Id);
2004  }
2005
2006  // Finalize intervals: add kills, finalize spill weights, and filter out
2007  // dead intervals.
2008  std::vector<LiveInterval*> RetNewLIs;
2009  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2010    LiveInterval *LI = NewLIs[i];
2011    if (!LI->empty()) {
2012      if (!AddedKill.count(LI)) {
2013        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2014        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2015        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2016        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2017        assert(UseIdx != -1);
2018        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2019          LastUse->getOperand(UseIdx).setIsKill();
2020          vrm.addKillPoint(LI->reg, LastUseIdx);
2021        }
2022      }
2023      RetNewLIs.push_back(LI);
2024    }
2025  }
2026
2027  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2028  normalizeSpillWeights(RetNewLIs);
2029  return RetNewLIs;
2030}
2031
2032/// hasAllocatableSuperReg - Return true if the specified physical register has
2033/// any super register that's allocatable.
2034bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2035  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2036    if (allocatableRegs_[*AS] && hasInterval(*AS))
2037      return true;
2038  return false;
2039}
2040
2041/// getRepresentativeReg - Find the largest super register of the specified
2042/// physical register.
2043unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2044  // Find the largest super-register that is allocatable.
2045  unsigned BestReg = Reg;
2046  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2047    unsigned SuperReg = *AS;
2048    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2049      BestReg = SuperReg;
2050      break;
2051    }
2052  }
2053  return BestReg;
2054}
2055
2056/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2057/// specified interval that conflicts with the specified physical register.
2058unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2059                                                   unsigned PhysReg) const {
2060  unsigned NumConflicts = 0;
2061  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2062  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2063         E = mri_->reg_end(); I != E; ++I) {
2064    MachineOperand &O = I.getOperand();
2065    MachineInstr *MI = O.getParent();
2066    if (MI->isDebugValue())
2067      continue;
2068    SlotIndex Index = getInstructionIndex(MI);
2069    if (pli.liveAt(Index))
2070      ++NumConflicts;
2071  }
2072  return NumConflicts;
2073}
2074
2075/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2076/// around all defs and uses of the specified interval. Return true if it
2077/// was able to cut its interval.
2078bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2079                                            unsigned PhysReg, VirtRegMap &vrm) {
2080  unsigned SpillReg = getRepresentativeReg(PhysReg);
2081
2082  DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2083               << " represented by " << tri_->getName(SpillReg) << '\n');
2084
2085  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2086    // If there are registers which alias PhysReg, but which are not a
2087    // sub-register of the chosen representative super register. Assert
2088    // since we can't handle it yet.
2089    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2090           tri_->isSuperRegister(*AS, SpillReg));
2091
2092  bool Cut = false;
2093  SmallVector<unsigned, 4> PRegs;
2094  if (hasInterval(SpillReg))
2095    PRegs.push_back(SpillReg);
2096  for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2097    if (hasInterval(*SR))
2098      PRegs.push_back(*SR);
2099
2100  DEBUG({
2101    dbgs() << "Trying to spill:";
2102    for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2103      dbgs() << ' ' << tri_->getName(PRegs[i]);
2104    dbgs() << '\n';
2105  });
2106
2107  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2108  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2109         E = mri_->reg_end(); I != E; ++I) {
2110    MachineOperand &O = I.getOperand();
2111    MachineInstr *MI = O.getParent();
2112    if (MI->isDebugValue() || SeenMIs.count(MI))
2113      continue;
2114    SeenMIs.insert(MI);
2115    SlotIndex Index = getInstructionIndex(MI);
2116    bool LiveReg = false;
2117    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2118      unsigned PReg = PRegs[i];
2119      LiveInterval &pli = getInterval(PReg);
2120      if (!pli.liveAt(Index))
2121        continue;
2122      LiveReg = true;
2123      SlotIndex StartIdx = Index.getLoadIndex();
2124      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2125      if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2126        std::string msg;
2127        raw_string_ostream Msg(msg);
2128        Msg << "Ran out of registers during register allocation!";
2129        if (MI->isInlineAsm()) {
2130          Msg << "\nPlease check your inline asm statement for invalid "
2131              << "constraints:\n";
2132          MI->print(Msg, tm_);
2133        }
2134        report_fatal_error(Msg.str());
2135      }
2136      pli.removeRange(StartIdx, EndIdx);
2137      LiveReg = true;
2138    }
2139    if (!LiveReg)
2140      continue;
2141    DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2142    vrm.addEmergencySpill(SpillReg, MI);
2143    Cut = true;
2144  }
2145  return Cut;
2146}
2147
2148LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2149                                                  MachineInstr* startInst) {
2150  LiveInterval& Interval = getOrCreateInterval(reg);
2151  VNInfo* VN = Interval.getNextValue(
2152    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2153    startInst, getVNInfoAllocator());
2154  VN->setHasPHIKill(true);
2155  LiveRange LR(
2156     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2157     getMBBEndIdx(startInst->getParent()), VN);
2158  Interval.addRange(LR);
2159
2160  return LR;
2161}
2162
2163