LiveIntervalAnalysis.cpp revision 4662a9f270fe2c916c35545718720ed181384c30
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/CalcSpillWeights.h"
24#include "llvm/CodeGen/LiveVariables.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineInstr.h"
27#include "llvm/CodeGen/MachineInstrBuilder.h"
28#include "llvm/CodeGen/MachineLoopInfo.h"
29#include "llvm/CodeGen/MachineMemOperand.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/Passes.h"
32#include "llvm/CodeGen/ProcessImplicitDefs.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetOptions.h"
37#include "llvm/Support/CommandLine.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/raw_ostream.h"
41#include "llvm/ADT/DepthFirstIterator.h"
42#include "llvm/ADT/SmallSet.h"
43#include "llvm/ADT/Statistic.h"
44#include "llvm/ADT/STLExtras.h"
45#include <algorithm>
46#include <limits>
47#include <cmath>
48using namespace llvm;
49
50// Hidden options for help debugging.
51static cl::opt<bool> DisableReMat("disable-rematerialization",
52                                  cl::init(false), cl::Hidden);
53
54STATISTIC(numIntervals , "Number of original intervals");
55STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
56STATISTIC(numSplits    , "Number of intervals split");
57
58char LiveIntervals::ID = 0;
59INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60                "Live Interval Analysis", false, false)
61INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69                "Live Interval Analysis", false, false)
70
71void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72  AU.setPreservesCFG();
73  AU.addRequired<AliasAnalysis>();
74  AU.addPreserved<AliasAnalysis>();
75  AU.addRequired<LiveVariables>();
76  AU.addPreserved<LiveVariables>();
77  AU.addRequired<MachineLoopInfo>();
78  AU.addPreserved<MachineLoopInfo>();
79  AU.addPreservedID(MachineDominatorsID);
80
81  if (!StrongPHIElim) {
82    AU.addPreservedID(PHIEliminationID);
83    AU.addRequiredID(PHIEliminationID);
84  }
85
86  AU.addRequiredID(TwoAddressInstructionPassID);
87  AU.addPreserved<ProcessImplicitDefs>();
88  AU.addRequired<ProcessImplicitDefs>();
89  AU.addPreserved<SlotIndexes>();
90  AU.addRequiredTransitive<SlotIndexes>();
91  MachineFunctionPass::getAnalysisUsage(AU);
92}
93
94void LiveIntervals::releaseMemory() {
95  // Free the live intervals themselves.
96  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97       E = r2iMap_.end(); I != E; ++I)
98    delete I->second;
99
100  r2iMap_.clear();
101
102  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103  VNInfoAllocator.Reset();
104  while (!CloneMIs.empty()) {
105    MachineInstr *MI = CloneMIs.back();
106    CloneMIs.pop_back();
107    mf_->DeleteMachineInstr(MI);
108  }
109}
110
111/// runOnMachineFunction - Register allocate the whole function
112///
113bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114  mf_ = &fn;
115  mri_ = &mf_->getRegInfo();
116  tm_ = &fn.getTarget();
117  tri_ = tm_->getRegisterInfo();
118  tii_ = tm_->getInstrInfo();
119  aa_ = &getAnalysis<AliasAnalysis>();
120  lv_ = &getAnalysis<LiveVariables>();
121  indexes_ = &getAnalysis<SlotIndexes>();
122  allocatableRegs_ = tri_->getAllocatableSet(fn);
123
124  computeIntervals();
125
126  numIntervals += getNumIntervals();
127
128  DEBUG(dump());
129  return true;
130}
131
132/// print - Implement the dump method.
133void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134  OS << "********** INTERVALS **********\n";
135  for (const_iterator I = begin(), E = end(); I != E; ++I) {
136    I->second->print(OS, tri_);
137    OS << "\n";
138  }
139
140  printInstrs(OS);
141}
142
143void LiveIntervals::printInstrs(raw_ostream &OS) const {
144  OS << "********** MACHINEINSTRS **********\n";
145  mf_->print(OS, indexes_);
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(dbgs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    if (MI.isCopy())
192      if (MI.getOperand(0).getReg() == li.reg ||
193          MI.getOperand(1).getReg() == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220  for (LiveInterval::Ranges::const_iterator
221         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222    for (SlotIndex index = I->start.getBaseIndex(),
223           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224           index != end;
225           index = index.getNextIndex()) {
226      MachineInstr *MI = getInstructionFromIndex(index);
227      if (!MI)
228        continue;               // skip deleted instructions
229
230      if (JoinedCopies.count(MI))
231        continue;
232      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233        MachineOperand& MO = MI->getOperand(i);
234        if (!MO.isReg())
235          continue;
236        unsigned PhysReg = MO.getReg();
237        if (PhysReg == 0 || PhysReg == Reg ||
238            TargetRegisterInfo::isVirtualRegister(PhysReg))
239          continue;
240        if (tri_->regsOverlap(Reg, PhysReg))
241          return true;
242      }
243    }
244  }
245
246  return false;
247}
248
249static
250bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251  unsigned Reg = MI.getOperand(MOIdx).getReg();
252  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253    const MachineOperand &MO = MI.getOperand(i);
254    if (!MO.isReg())
255      continue;
256    if (MO.getReg() == Reg && MO.isDef()) {
257      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258             MI.getOperand(MOIdx).getSubReg() &&
259             (MO.getSubReg() || MO.isImplicit()));
260      return true;
261    }
262  }
263  return false;
264}
265
266/// isPartialRedef - Return true if the specified def at the specific index is
267/// partially re-defining the specified live interval. A common case of this is
268/// a definition of the sub-register.
269bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270                                   LiveInterval &interval) {
271  if (!MO.getSubReg() || MO.isEarlyClobber())
272    return false;
273
274  SlotIndex RedefIndex = MIIdx.getDefIndex();
275  const LiveRange *OldLR =
276    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278  if (DefMI != 0) {
279    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
280  }
281  return false;
282}
283
284void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285                                             MachineBasicBlock::iterator mi,
286                                             SlotIndex MIIdx,
287                                             MachineOperand& MO,
288                                             unsigned MOIdx,
289                                             LiveInterval &interval) {
290  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291
292  // Virtual registers may be defined multiple times (due to phi
293  // elimination and 2-addr elimination).  Much of what we do only has to be
294  // done once for the vreg.  We use an empty interval to detect the first
295  // time we see a vreg.
296  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297  if (interval.empty()) {
298    // Get the Idx of the defining instructions.
299    SlotIndex defIndex = MIIdx.getDefIndex();
300    // Earlyclobbers move back one, so that they overlap the live range
301    // of inputs.
302    if (MO.isEarlyClobber())
303      defIndex = MIIdx.getUseIndex();
304
305    // Make sure the first definition is not a partial redefinition. Add an
306    // <imp-def> of the full register.
307    if (MO.getSubReg())
308      mi->addRegisterDefined(interval.reg);
309
310    MachineInstr *CopyMI = NULL;
311    if (mi->isCopyLike()) {
312      CopyMI = mi;
313    }
314
315    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316    assert(ValNo->id == 0 && "First value in interval is not 0?");
317
318    // Loop over all of the blocks that the vreg is defined in.  There are
319    // two cases we have to handle here.  The most common case is a vreg
320    // whose lifetime is contained within a basic block.  In this case there
321    // will be a single kill, in MBB, which comes after the definition.
322    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323      // FIXME: what about dead vars?
324      SlotIndex killIdx;
325      if (vi.Kills[0] != mi)
326        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327      else
328        killIdx = defIndex.getStoreIndex();
329
330      // If the kill happens after the definition, we have an intra-block
331      // live range.
332      if (killIdx > defIndex) {
333        assert(vi.AliveBlocks.empty() &&
334               "Shouldn't be alive across any blocks!");
335        LiveRange LR(defIndex, killIdx, ValNo);
336        interval.addRange(LR);
337        DEBUG(dbgs() << " +" << LR << "\n");
338        return;
339      }
340    }
341
342    // The other case we handle is when a virtual register lives to the end
343    // of the defining block, potentially live across some blocks, then is
344    // live into some number of blocks, but gets killed.  Start by adding a
345    // range that goes from this definition to the end of the defining block.
346    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347    DEBUG(dbgs() << " +" << NewLR);
348    interval.addRange(NewLR);
349
350    bool PHIJoin = lv_->isPHIJoin(interval.reg);
351
352    if (PHIJoin) {
353      // A phi join register is killed at the end of the MBB and revived as a new
354      // valno in the killing blocks.
355      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356      DEBUG(dbgs() << " phi-join");
357      ValNo->setHasPHIKill(true);
358    } else {
359      // Iterate over all of the blocks that the variable is completely
360      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361      // live interval.
362      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363               E = vi.AliveBlocks.end(); I != E; ++I) {
364        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366        interval.addRange(LR);
367        DEBUG(dbgs() << " +" << LR);
368      }
369    }
370
371    // Finally, this virtual register is live from the start of any killing
372    // block to the 'use' slot of the killing instruction.
373    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374      MachineInstr *Kill = vi.Kills[i];
375      SlotIndex Start = getMBBStartIdx(Kill->getParent());
376      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377
378      // Create interval with one of a NEW value number.  Note that this value
379      // number isn't actually defined by an instruction, weird huh? :)
380      if (PHIJoin) {
381        assert(getInstructionFromIndex(Start) == 0 &&
382               "PHI def index points at actual instruction.");
383        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384        ValNo->setIsPHIDef(true);
385      }
386      LiveRange LR(Start, killIdx, ValNo);
387      interval.addRange(LR);
388      DEBUG(dbgs() << " +" << LR);
389    }
390
391  } else {
392    if (MultipleDefsBySameMI(*mi, MOIdx))
393      // Multiple defs of the same virtual register by the same instruction.
394      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395      // This is likely due to elimination of REG_SEQUENCE instructions. Return
396      // here since there is nothing to do.
397      return;
398
399    // If this is the second time we see a virtual register definition, it
400    // must be due to phi elimination or two addr elimination.  If this is
401    // the result of two address elimination, then the vreg is one of the
402    // def-and-use register operand.
403
404    // It may also be partial redef like this:
405    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409      // If this is a two-address definition, then we have already processed
410      // the live range.  The only problem is that we didn't realize there
411      // are actually two values in the live interval.  Because of this we
412      // need to take the LiveRegion that defines this register and split it
413      // into two values.
414      SlotIndex RedefIndex = MIIdx.getDefIndex();
415      if (MO.isEarlyClobber())
416        RedefIndex = MIIdx.getUseIndex();
417
418      const LiveRange *OldLR =
419        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420      VNInfo *OldValNo = OldLR->valno;
421      SlotIndex DefIndex = OldValNo->def.getDefIndex();
422
423      // Delete the previous value, which should be short and continuous,
424      // because the 2-addr copy must be in the same MBB as the redef.
425      interval.removeRange(DefIndex, RedefIndex);
426
427      // The new value number (#1) is defined by the instruction we claimed
428      // defined value #0.
429      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430
431      // Value#0 is now defined by the 2-addr instruction.
432      OldValNo->def  = RedefIndex;
433      OldValNo->setCopy(0);
434
435      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436      if (PartReDef && mi->isCopyLike())
437        OldValNo->setCopy(&*mi);
438
439      // Add the new live interval which replaces the range for the input copy.
440      LiveRange LR(DefIndex, RedefIndex, ValNo);
441      DEBUG(dbgs() << " replace range with " << LR);
442      interval.addRange(LR);
443
444      // If this redefinition is dead, we need to add a dummy unit live
445      // range covering the def slot.
446      if (MO.isDead())
447        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
448                                    OldValNo));
449
450      DEBUG({
451          dbgs() << " RESULT: ";
452          interval.print(dbgs(), tri_);
453        });
454    } else if (lv_->isPHIJoin(interval.reg)) {
455      // In the case of PHI elimination, each variable definition is only
456      // live until the end of the block.  We've already taken care of the
457      // rest of the live range.
458
459      SlotIndex defIndex = MIIdx.getDefIndex();
460      if (MO.isEarlyClobber())
461        defIndex = MIIdx.getUseIndex();
462
463      VNInfo *ValNo;
464      MachineInstr *CopyMI = NULL;
465      if (mi->isCopyLike())
466        CopyMI = mi;
467      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468
469      SlotIndex killIndex = getMBBEndIdx(mbb);
470      LiveRange LR(defIndex, killIndex, ValNo);
471      interval.addRange(LR);
472      ValNo->setHasPHIKill(true);
473      DEBUG(dbgs() << " phi-join +" << LR);
474    } else {
475      llvm_unreachable("Multiply defined register");
476    }
477  }
478
479  DEBUG(dbgs() << '\n');
480}
481
482void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483                                              MachineBasicBlock::iterator mi,
484                                              SlotIndex MIIdx,
485                                              MachineOperand& MO,
486                                              LiveInterval &interval,
487                                              MachineInstr *CopyMI) {
488  // A physical register cannot be live across basic block, so its
489  // lifetime must end somewhere in its defining basic block.
490  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491
492  SlotIndex baseIndex = MIIdx;
493  SlotIndex start = baseIndex.getDefIndex();
494  // Earlyclobbers move back one.
495  if (MO.isEarlyClobber())
496    start = MIIdx.getUseIndex();
497  SlotIndex end = start;
498
499  // If it is not used after definition, it is considered dead at
500  // the instruction defining it. Hence its interval is:
501  // [defSlot(def), defSlot(def)+1)
502  // For earlyclobbers, the defSlot was pushed back one; the extra
503  // advance below compensates.
504  if (MO.isDead()) {
505    DEBUG(dbgs() << " dead");
506    end = start.getStoreIndex();
507    goto exit;
508  }
509
510  // If it is not dead on definition, it must be killed by a
511  // subsequent instruction. Hence its interval is:
512  // [defSlot(def), useSlot(kill)+1)
513  baseIndex = baseIndex.getNextIndex();
514  while (++mi != MBB->end()) {
515
516    if (mi->isDebugValue())
517      continue;
518    if (getInstructionFromIndex(baseIndex) == 0)
519      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520
521    if (mi->killsRegister(interval.reg, tri_)) {
522      DEBUG(dbgs() << " killed");
523      end = baseIndex.getDefIndex();
524      goto exit;
525    } else {
526      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527      if (DefIdx != -1) {
528        if (mi->isRegTiedToUseOperand(DefIdx)) {
529          // Two-address instruction.
530          end = baseIndex.getDefIndex();
531        } else {
532          // Another instruction redefines the register before it is ever read.
533          // Then the register is essentially dead at the instruction that
534          // defines it. Hence its interval is:
535          // [defSlot(def), defSlot(def)+1)
536          DEBUG(dbgs() << " dead");
537          end = start.getStoreIndex();
538        }
539        goto exit;
540      }
541    }
542
543    baseIndex = baseIndex.getNextIndex();
544  }
545
546  // The only case we should have a dead physreg here without a killing or
547  // instruction where we know it's dead is if it is live-in to the function
548  // and never used. Another possible case is the implicit use of the
549  // physical register has been deleted by two-address pass.
550  end = start.getStoreIndex();
551
552exit:
553  assert(start < end && "did not find end of interval?");
554
555  // Already exists? Extend old live interval.
556  VNInfo *ValNo = interval.getVNInfoAt(start);
557  bool Extend = ValNo != 0;
558  if (!Extend)
559    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560  if (Extend && MO.isEarlyClobber())
561    ValNo->setHasRedefByEC(true);
562  LiveRange LR(start, end, ValNo);
563  interval.addRange(LR);
564  DEBUG(dbgs() << " +" << LR << '\n');
565}
566
567void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568                                      MachineBasicBlock::iterator MI,
569                                      SlotIndex MIIdx,
570                                      MachineOperand& MO,
571                                      unsigned MOIdx) {
572  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574                             getOrCreateInterval(MO.getReg()));
575  else {
576    MachineInstr *CopyMI = NULL;
577    if (MI->isCopyLike())
578      CopyMI = MI;
579    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580                              getOrCreateInterval(MO.getReg()), CopyMI);
581    // Def of a register also defines its sub-registers.
582    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583      // If MI also modifies the sub-register explicitly, avoid processing it
584      // more than once. Do not pass in TRI here so it checks for exact match.
585      if (!MI->definesRegister(*AS))
586        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                                  getOrCreateInterval(*AS), 0);
588  }
589}
590
591void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592                                         SlotIndex MIIdx,
593                                         LiveInterval &interval, bool isAlias) {
594  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595
596  // Look for kills, if it reaches a def before it's killed, then it shouldn't
597  // be considered a livein.
598  MachineBasicBlock::iterator mi = MBB->begin();
599  MachineBasicBlock::iterator E = MBB->end();
600  // Skip over DBG_VALUE at the start of the MBB.
601  if (mi != E && mi->isDebugValue()) {
602    while (++mi != E && mi->isDebugValue())
603      ;
604    if (mi == E)
605      // MBB is empty except for DBG_VALUE's.
606      return;
607  }
608
609  SlotIndex baseIndex = MIIdx;
610  SlotIndex start = baseIndex;
611  if (getInstructionFromIndex(baseIndex) == 0)
612    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613
614  SlotIndex end = baseIndex;
615  bool SeenDefUse = false;
616
617  while (mi != E) {
618    if (mi->killsRegister(interval.reg, tri_)) {
619      DEBUG(dbgs() << " killed");
620      end = baseIndex.getDefIndex();
621      SeenDefUse = true;
622      break;
623    } else if (mi->definesRegister(interval.reg, tri_)) {
624      // Another instruction redefines the register before it is ever read.
625      // Then the register is essentially dead at the instruction that defines
626      // it. Hence its interval is:
627      // [defSlot(def), defSlot(def)+1)
628      DEBUG(dbgs() << " dead");
629      end = start.getStoreIndex();
630      SeenDefUse = true;
631      break;
632    }
633
634    while (++mi != E && mi->isDebugValue())
635      // Skip over DBG_VALUE.
636      ;
637    if (mi != E)
638      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
639  }
640
641  // Live-in register might not be used at all.
642  if (!SeenDefUse) {
643    if (isAlias) {
644      DEBUG(dbgs() << " dead");
645      end = MIIdx.getStoreIndex();
646    } else {
647      DEBUG(dbgs() << " live through");
648      end = baseIndex;
649    }
650  }
651
652  SlotIndex defIdx = getMBBStartIdx(MBB);
653  assert(getInstructionFromIndex(defIdx) == 0 &&
654         "PHI def index points at actual instruction.");
655  VNInfo *vni =
656    interval.getNextValue(defIdx, 0, VNInfoAllocator);
657  vni->setIsPHIDef(true);
658  LiveRange LR(start, end, vni);
659
660  interval.addRange(LR);
661  DEBUG(dbgs() << " +" << LR << '\n');
662}
663
664/// computeIntervals - computes the live intervals for virtual
665/// registers. for some ordering of the machine instructions [1,N] a
666/// live interval is an interval [i, j) where 1 <= i <= j < N for
667/// which a variable is live
668void LiveIntervals::computeIntervals() {
669  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670               << "********** Function: "
671               << ((Value*)mf_->getFunction())->getName() << '\n');
672
673  SmallVector<unsigned, 8> UndefUses;
674  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675       MBBI != E; ++MBBI) {
676    MachineBasicBlock *MBB = MBBI;
677    if (MBB->empty())
678      continue;
679
680    // Track the index of the current machine instr.
681    SlotIndex MIIndex = getMBBStartIdx(MBB);
682    DEBUG(dbgs() << "BB#" << MBB->getNumber()
683          << ":\t\t# derived from " << MBB->getName() << "\n");
684
685    // Create intervals for live-ins to this BB first.
686    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687           LE = MBB->livein_end(); LI != LE; ++LI) {
688      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689      // Multiple live-ins can alias the same register.
690      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691        if (!hasInterval(*AS))
692          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
693                               true);
694    }
695
696    // Skip over empty initial indices.
697    if (getInstructionFromIndex(MIIndex) == 0)
698      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699
700    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701         MI != miEnd; ++MI) {
702      DEBUG(dbgs() << MIIndex << "\t" << *MI);
703      if (MI->isDebugValue())
704        continue;
705
706      // Handle defs.
707      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708        MachineOperand &MO = MI->getOperand(i);
709        if (!MO.isReg() || !MO.getReg())
710          continue;
711
712        // handle register defs - build intervals
713        if (MO.isDef())
714          handleRegisterDef(MBB, MI, MIIndex, MO, i);
715        else if (MO.isUndef())
716          UndefUses.push_back(MO.getReg());
717      }
718
719      // Move to the next instr slot.
720      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
721    }
722  }
723
724  // Create empty intervals for registers defined by implicit_def's (except
725  // for those implicit_def that define values which are liveout of their
726  // blocks.
727  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728    unsigned UndefReg = UndefUses[i];
729    (void)getOrCreateInterval(UndefReg);
730  }
731}
732
733LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735  return new LiveInterval(reg, Weight);
736}
737
738/// dupInterval - Duplicate a live interval. The caller is responsible for
739/// managing the allocated memory.
740LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741  LiveInterval *NewLI = createInterval(li->reg);
742  NewLI->Copy(*li, mri_, getVNInfoAllocator());
743  return NewLI;
744}
745
746/// shrinkToUses - After removing some uses of a register, shrink its live
747/// range to just the remaining uses. This method does not compute reaching
748/// defs for new uses, and it doesn't remove dead defs.
749bool LiveIntervals::shrinkToUses(LiveInterval *li,
750                                 SmallVectorImpl<MachineInstr*> *dead) {
751  DEBUG(dbgs() << "Shrink: " << *li << '\n');
752  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
753         && "Can't only shrink physical registers");
754  // Find all the values used, including PHI kills.
755  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
756
757  // Visit all instructions reading li->reg.
758  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
759       MachineInstr *UseMI = I.skipInstruction();) {
760    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
761      continue;
762    SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
763    VNInfo *VNI = li->getVNInfoAt(Idx);
764    if (!VNI) {
765      // This shouldn't happen: readsVirtualRegister returns true, but there is
766      // no live value. It is likely caused by a target getting <undef> flags
767      // wrong.
768      DEBUG(dbgs() << Idx << '\t' << *UseMI
769                   << "Warning: Instr claims to read non-existent value in "
770                    << *li << '\n');
771      continue;
772    }
773    if (VNI->def == Idx) {
774      // Special case: An early-clobber tied operand reads and writes the
775      // register one slot early.
776      Idx = Idx.getPrevSlot();
777      VNI = li->getVNInfoAt(Idx);
778      assert(VNI && "Early-clobber tied value not available");
779    }
780    WorkList.push_back(std::make_pair(Idx, VNI));
781  }
782
783  // Create a new live interval with only minimal live segments per def.
784  LiveInterval NewLI(li->reg, 0);
785  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
786       I != E; ++I) {
787    VNInfo *VNI = *I;
788    if (VNI->isUnused())
789      continue;
790    NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
791
792    // A use tied to an early-clobber def ends at the load slot and isn't caught
793    // above. Catch it here instead. This probably only ever happens for inline
794    // assembly.
795    if (VNI->def.isUse())
796      if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
797        WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
798  }
799
800  // Keep track of the PHIs that are in use.
801  SmallPtrSet<VNInfo*, 8> UsedPHIs;
802
803  // Extend intervals to reach all uses in WorkList.
804  while (!WorkList.empty()) {
805    SlotIndex Idx = WorkList.back().first;
806    VNInfo *VNI = WorkList.back().second;
807    WorkList.pop_back();
808    const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
809    SlotIndex BlockStart = getMBBStartIdx(MBB);
810
811    // Extend the live range for VNI to be live at Idx.
812    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
813      (void)ExtVNI;
814      assert(ExtVNI == VNI && "Unexpected existing value number");
815      // Is this a PHIDef we haven't seen before?
816      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
817        continue;
818      // The PHI is live, make sure the predecessors are live-out.
819      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
820           PE = MBB->pred_end(); PI != PE; ++PI) {
821        SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
822        VNInfo *PVNI = li->getVNInfoAt(Stop);
823        // A predecessor is not required to have a live-out value for a PHI.
824        if (PVNI) {
825          assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
826          WorkList.push_back(std::make_pair(Stop, PVNI));
827        }
828      }
829      continue;
830    }
831
832    // VNI is live-in to MBB.
833    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
834    NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
835
836    // Make sure VNI is live-out from the predecessors.
837    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
838         PE = MBB->pred_end(); PI != PE; ++PI) {
839      SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
840      assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
841      WorkList.push_back(std::make_pair(Stop, VNI));
842    }
843  }
844
845  // Handle dead values.
846  bool CanSeparate = false;
847  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
848       I != E; ++I) {
849    VNInfo *VNI = *I;
850    if (VNI->isUnused())
851      continue;
852    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
853    assert(LII != NewLI.end() && "Missing live range for PHI");
854    if (LII->end != VNI->def.getNextSlot())
855      continue;
856    if (VNI->isPHIDef()) {
857      // This is a dead PHI. Remove it.
858      VNI->setIsUnused(true);
859      NewLI.removeRange(*LII);
860      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
861      CanSeparate = true;
862    } else {
863      // This is a dead def. Make sure the instruction knows.
864      MachineInstr *MI = getInstructionFromIndex(VNI->def);
865      assert(MI && "No instruction defining live value");
866      MI->addRegisterDead(li->reg, tri_);
867      if (dead && MI->allDefsAreDead()) {
868        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
869        dead->push_back(MI);
870      }
871    }
872  }
873
874  // Move the trimmed ranges back.
875  li->ranges.swap(NewLI.ranges);
876  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
877  return CanSeparate;
878}
879
880
881//===----------------------------------------------------------------------===//
882// Register allocator hooks.
883//
884
885MachineBasicBlock::iterator
886LiveIntervals::getLastSplitPoint(const LiveInterval &li,
887                                 MachineBasicBlock *mbb) const {
888  const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
889
890  // If li is not live into a landing pad, we can insert spill code before the
891  // first terminator.
892  if (!lpad || !isLiveInToMBB(li, lpad))
893    return mbb->getFirstTerminator();
894
895  // When there is a landing pad, spill code must go before the call instruction
896  // that can throw.
897  MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
898  while (I != B) {
899    --I;
900    if (I->getDesc().isCall())
901      return I;
902  }
903  // The block contains no calls that can throw, so use the first terminator.
904  return mbb->getFirstTerminator();
905}
906
907void LiveIntervals::addKillFlags() {
908  for (iterator I = begin(), E = end(); I != E; ++I) {
909    unsigned Reg = I->first;
910    if (TargetRegisterInfo::isPhysicalRegister(Reg))
911      continue;
912    if (mri_->reg_nodbg_empty(Reg))
913      continue;
914    LiveInterval *LI = I->second;
915
916    // Every instruction that kills Reg corresponds to a live range end point.
917    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
918         ++RI) {
919      // A LOAD index indicates an MBB edge.
920      if (RI->end.isLoad())
921        continue;
922      MachineInstr *MI = getInstructionFromIndex(RI->end);
923      if (!MI)
924        continue;
925      MI->addRegisterKilled(Reg, NULL);
926    }
927  }
928}
929
930/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
931/// allow one) virtual register operand, then its uses are implicitly using
932/// the register. Returns the virtual register.
933unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
934                                            MachineInstr *MI) const {
935  unsigned RegOp = 0;
936  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
937    MachineOperand &MO = MI->getOperand(i);
938    if (!MO.isReg() || !MO.isUse())
939      continue;
940    unsigned Reg = MO.getReg();
941    if (Reg == 0 || Reg == li.reg)
942      continue;
943
944    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
945        !allocatableRegs_[Reg])
946      continue;
947    // FIXME: For now, only remat MI with at most one register operand.
948    assert(!RegOp &&
949           "Can't rematerialize instruction with multiple register operand!");
950    RegOp = MO.getReg();
951#ifndef NDEBUG
952    break;
953#endif
954  }
955  return RegOp;
956}
957
958/// isValNoAvailableAt - Return true if the val# of the specified interval
959/// which reaches the given instruction also reaches the specified use index.
960bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
961                                       SlotIndex UseIdx) const {
962  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
963  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
964}
965
966/// isReMaterializable - Returns true if the definition MI of the specified
967/// val# of the specified interval is re-materializable.
968bool
969LiveIntervals::isReMaterializable(const LiveInterval &li,
970                                  const VNInfo *ValNo, MachineInstr *MI,
971                                  const SmallVectorImpl<LiveInterval*> *SpillIs,
972                                  bool &isLoad) {
973  if (DisableReMat)
974    return false;
975
976  if (!tii_->isTriviallyReMaterializable(MI, aa_))
977    return false;
978
979  // Target-specific code can mark an instruction as being rematerializable
980  // if it has one virtual reg use, though it had better be something like
981  // a PIC base register which is likely to be live everywhere.
982  unsigned ImpUse = getReMatImplicitUse(li, MI);
983  if (ImpUse) {
984    const LiveInterval &ImpLi = getInterval(ImpUse);
985    for (MachineRegisterInfo::use_nodbg_iterator
986           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
987         ri != re; ++ri) {
988      MachineInstr *UseMI = &*ri;
989      SlotIndex UseIdx = getInstructionIndex(UseMI);
990      if (li.getVNInfoAt(UseIdx) != ValNo)
991        continue;
992      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
993        return false;
994    }
995
996    // If a register operand of the re-materialized instruction is going to
997    // be spilled next, then it's not legal to re-materialize this instruction.
998    if (SpillIs)
999      for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
1000        if (ImpUse == (*SpillIs)[i]->reg)
1001          return false;
1002  }
1003  return true;
1004}
1005
1006/// isReMaterializable - Returns true if the definition MI of the specified
1007/// val# of the specified interval is re-materializable.
1008bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1009                                       const VNInfo *ValNo, MachineInstr *MI) {
1010  bool Dummy2;
1011  return isReMaterializable(li, ValNo, MI, 0, Dummy2);
1012}
1013
1014/// isReMaterializable - Returns true if every definition of MI of every
1015/// val# of the specified interval is re-materializable.
1016bool
1017LiveIntervals::isReMaterializable(const LiveInterval &li,
1018                                  const SmallVectorImpl<LiveInterval*> *SpillIs,
1019                                  bool &isLoad) {
1020  isLoad = false;
1021  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1022       i != e; ++i) {
1023    const VNInfo *VNI = *i;
1024    if (VNI->isUnused())
1025      continue; // Dead val#.
1026    // Is the def for the val# rematerializable?
1027    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1028    if (!ReMatDefMI)
1029      return false;
1030    bool DefIsLoad = false;
1031    if (!ReMatDefMI ||
1032        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1033      return false;
1034    isLoad |= DefIsLoad;
1035  }
1036  return true;
1037}
1038
1039/// FilterFoldedOps - Filter out two-address use operands. Return
1040/// true if it finds any issue with the operands that ought to prevent
1041/// folding.
1042static bool FilterFoldedOps(MachineInstr *MI,
1043                            SmallVector<unsigned, 2> &Ops,
1044                            unsigned &MRInfo,
1045                            SmallVector<unsigned, 2> &FoldOps) {
1046  MRInfo = 0;
1047  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1048    unsigned OpIdx = Ops[i];
1049    MachineOperand &MO = MI->getOperand(OpIdx);
1050    // FIXME: fold subreg use.
1051    if (MO.getSubReg())
1052      return true;
1053    if (MO.isDef())
1054      MRInfo |= (unsigned)VirtRegMap::isMod;
1055    else {
1056      // Filter out two-address use operand(s).
1057      if (MI->isRegTiedToDefOperand(OpIdx)) {
1058        MRInfo = VirtRegMap::isModRef;
1059        continue;
1060      }
1061      MRInfo |= (unsigned)VirtRegMap::isRef;
1062    }
1063    FoldOps.push_back(OpIdx);
1064  }
1065  return false;
1066}
1067
1068
1069/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1070/// slot / to reg or any rematerialized load into ith operand of specified
1071/// MI. If it is successul, MI is updated with the newly created MI and
1072/// returns true.
1073bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1074                                         VirtRegMap &vrm, MachineInstr *DefMI,
1075                                         SlotIndex InstrIdx,
1076                                         SmallVector<unsigned, 2> &Ops,
1077                                         bool isSS, int Slot, unsigned Reg) {
1078  // If it is an implicit def instruction, just delete it.
1079  if (MI->isImplicitDef()) {
1080    RemoveMachineInstrFromMaps(MI);
1081    vrm.RemoveMachineInstrFromMaps(MI);
1082    MI->eraseFromParent();
1083    ++numFolds;
1084    return true;
1085  }
1086
1087  // Filter the list of operand indexes that are to be folded. Abort if
1088  // any operand will prevent folding.
1089  unsigned MRInfo = 0;
1090  SmallVector<unsigned, 2> FoldOps;
1091  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1092    return false;
1093
1094  // The only time it's safe to fold into a two address instruction is when
1095  // it's folding reload and spill from / into a spill stack slot.
1096  if (DefMI && (MRInfo & VirtRegMap::isMod))
1097    return false;
1098
1099  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1100                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1101  if (fmi) {
1102    // Remember this instruction uses the spill slot.
1103    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1104
1105    // Attempt to fold the memory reference into the instruction. If
1106    // we can do this, we don't need to insert spill code.
1107    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1108      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1109    vrm.transferSpillPts(MI, fmi);
1110    vrm.transferRestorePts(MI, fmi);
1111    vrm.transferEmergencySpills(MI, fmi);
1112    ReplaceMachineInstrInMaps(MI, fmi);
1113    MI->eraseFromParent();
1114    MI = fmi;
1115    ++numFolds;
1116    return true;
1117  }
1118  return false;
1119}
1120
1121/// canFoldMemoryOperand - Returns true if the specified load / store
1122/// folding is possible.
1123bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1124                                         SmallVector<unsigned, 2> &Ops,
1125                                         bool ReMat) const {
1126  // Filter the list of operand indexes that are to be folded. Abort if
1127  // any operand will prevent folding.
1128  unsigned MRInfo = 0;
1129  SmallVector<unsigned, 2> FoldOps;
1130  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1131    return false;
1132
1133  // It's only legal to remat for a use, not a def.
1134  if (ReMat && (MRInfo & VirtRegMap::isMod))
1135    return false;
1136
1137  return tii_->canFoldMemoryOperand(MI, FoldOps);
1138}
1139
1140bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1141  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1142
1143  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1144
1145  if (mbb == 0)
1146    return false;
1147
1148  for (++itr; itr != li.ranges.end(); ++itr) {
1149    MachineBasicBlock *mbb2 =
1150      indexes_->getMBBCoveringRange(itr->start, itr->end);
1151
1152    if (mbb2 != mbb)
1153      return false;
1154  }
1155
1156  return true;
1157}
1158
1159/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1160/// interval on to-be re-materialized operands of MI) with new register.
1161void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1162                                       MachineInstr *MI, unsigned NewVReg,
1163                                       VirtRegMap &vrm) {
1164  // There is an implicit use. That means one of the other operand is
1165  // being remat'ed and the remat'ed instruction has li.reg as an
1166  // use operand. Make sure we rewrite that as well.
1167  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1168    MachineOperand &MO = MI->getOperand(i);
1169    if (!MO.isReg())
1170      continue;
1171    unsigned Reg = MO.getReg();
1172    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1173      continue;
1174    if (!vrm.isReMaterialized(Reg))
1175      continue;
1176    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1177    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1178    if (UseMO)
1179      UseMO->setReg(NewVReg);
1180  }
1181}
1182
1183/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1184/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1185bool LiveIntervals::
1186rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1187                 bool TrySplit, SlotIndex index, SlotIndex end,
1188                 MachineInstr *MI,
1189                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1190                 unsigned Slot, int LdSlot,
1191                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1192                 VirtRegMap &vrm,
1193                 const TargetRegisterClass* rc,
1194                 SmallVector<int, 4> &ReMatIds,
1195                 const MachineLoopInfo *loopInfo,
1196                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1197                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1198                 std::vector<LiveInterval*> &NewLIs) {
1199  bool CanFold = false;
1200 RestartInstruction:
1201  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1202    MachineOperand& mop = MI->getOperand(i);
1203    if (!mop.isReg())
1204      continue;
1205    unsigned Reg = mop.getReg();
1206    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1207      continue;
1208    if (Reg != li.reg)
1209      continue;
1210
1211    bool TryFold = !DefIsReMat;
1212    bool FoldSS = true; // Default behavior unless it's a remat.
1213    int FoldSlot = Slot;
1214    if (DefIsReMat) {
1215      // If this is the rematerializable definition MI itself and
1216      // all of its uses are rematerialized, simply delete it.
1217      if (MI == ReMatOrigDefMI && CanDelete) {
1218        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1219                     << *MI << '\n');
1220        RemoveMachineInstrFromMaps(MI);
1221        vrm.RemoveMachineInstrFromMaps(MI);
1222        MI->eraseFromParent();
1223        break;
1224      }
1225
1226      // If def for this use can't be rematerialized, then try folding.
1227      // If def is rematerializable and it's a load, also try folding.
1228      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1229      if (isLoad) {
1230        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1231        FoldSS = isLoadSS;
1232        FoldSlot = LdSlot;
1233      }
1234    }
1235
1236    // Scan all of the operands of this instruction rewriting operands
1237    // to use NewVReg instead of li.reg as appropriate.  We do this for
1238    // two reasons:
1239    //
1240    //   1. If the instr reads the same spilled vreg multiple times, we
1241    //      want to reuse the NewVReg.
1242    //   2. If the instr is a two-addr instruction, we are required to
1243    //      keep the src/dst regs pinned.
1244    //
1245    // Keep track of whether we replace a use and/or def so that we can
1246    // create the spill interval with the appropriate range.
1247    SmallVector<unsigned, 2> Ops;
1248    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1249
1250    // Create a new virtual register for the spill interval.
1251    // Create the new register now so we can map the fold instruction
1252    // to the new register so when it is unfolded we get the correct
1253    // answer.
1254    bool CreatedNewVReg = false;
1255    if (NewVReg == 0) {
1256      NewVReg = mri_->createVirtualRegister(rc);
1257      vrm.grow();
1258      CreatedNewVReg = true;
1259
1260      // The new virtual register should get the same allocation hints as the
1261      // old one.
1262      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1263      if (Hint.first || Hint.second)
1264        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1265    }
1266
1267    if (!TryFold)
1268      CanFold = false;
1269    else {
1270      // Do not fold load / store here if we are splitting. We'll find an
1271      // optimal point to insert a load / store later.
1272      if (!TrySplit) {
1273        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1274                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1275          // Folding the load/store can completely change the instruction in
1276          // unpredictable ways, rescan it from the beginning.
1277
1278          if (FoldSS) {
1279            // We need to give the new vreg the same stack slot as the
1280            // spilled interval.
1281            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1282          }
1283
1284          HasUse = false;
1285          HasDef = false;
1286          CanFold = false;
1287          if (isNotInMIMap(MI))
1288            break;
1289          goto RestartInstruction;
1290        }
1291      } else {
1292        // We'll try to fold it later if it's profitable.
1293        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1294      }
1295    }
1296
1297    mop.setReg(NewVReg);
1298    if (mop.isImplicit())
1299      rewriteImplicitOps(li, MI, NewVReg, vrm);
1300
1301    // Reuse NewVReg for other reads.
1302    bool HasEarlyClobber = false;
1303    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1304      MachineOperand &mopj = MI->getOperand(Ops[j]);
1305      mopj.setReg(NewVReg);
1306      if (mopj.isImplicit())
1307        rewriteImplicitOps(li, MI, NewVReg, vrm);
1308      if (mopj.isEarlyClobber())
1309        HasEarlyClobber = true;
1310    }
1311
1312    if (CreatedNewVReg) {
1313      if (DefIsReMat) {
1314        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1315        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1316          // Each valnum may have its own remat id.
1317          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1318        } else {
1319          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1320        }
1321        if (!CanDelete || (HasUse && HasDef)) {
1322          // If this is a two-addr instruction then its use operands are
1323          // rematerializable but its def is not. It should be assigned a
1324          // stack slot.
1325          vrm.assignVirt2StackSlot(NewVReg, Slot);
1326        }
1327      } else {
1328        vrm.assignVirt2StackSlot(NewVReg, Slot);
1329      }
1330    } else if (HasUse && HasDef &&
1331               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1332      // If this interval hasn't been assigned a stack slot (because earlier
1333      // def is a deleted remat def), do it now.
1334      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1335      vrm.assignVirt2StackSlot(NewVReg, Slot);
1336    }
1337
1338    // Re-matting an instruction with virtual register use. Add the
1339    // register as an implicit use on the use MI.
1340    if (DefIsReMat && ImpUse)
1341      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1342
1343    // Create a new register interval for this spill / remat.
1344    LiveInterval &nI = getOrCreateInterval(NewVReg);
1345    if (CreatedNewVReg) {
1346      NewLIs.push_back(&nI);
1347      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1348      if (TrySplit)
1349        vrm.setIsSplitFromReg(NewVReg, li.reg);
1350    }
1351
1352    if (HasUse) {
1353      if (CreatedNewVReg) {
1354        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1355                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1356        DEBUG(dbgs() << " +" << LR);
1357        nI.addRange(LR);
1358      } else {
1359        // Extend the split live interval to this def / use.
1360        SlotIndex End = index.getDefIndex();
1361        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1362                     nI.getValNumInfo(nI.getNumValNums()-1));
1363        DEBUG(dbgs() << " +" << LR);
1364        nI.addRange(LR);
1365      }
1366    }
1367    if (HasDef) {
1368      // An early clobber starts at the use slot, except for an early clobber
1369      // tied to a use operand (yes, that is a thing).
1370      LiveRange LR(HasEarlyClobber && !HasUse ?
1371                   index.getUseIndex() : index.getDefIndex(),
1372                   index.getStoreIndex(),
1373                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1374      DEBUG(dbgs() << " +" << LR);
1375      nI.addRange(LR);
1376    }
1377
1378    DEBUG({
1379        dbgs() << "\t\t\t\tAdded new interval: ";
1380        nI.print(dbgs(), tri_);
1381        dbgs() << '\n';
1382      });
1383  }
1384  return CanFold;
1385}
1386bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1387                                   const VNInfo *VNI,
1388                                   MachineBasicBlock *MBB,
1389                                   SlotIndex Idx) const {
1390  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1391}
1392
1393/// RewriteInfo - Keep track of machine instrs that will be rewritten
1394/// during spilling.
1395namespace {
1396  struct RewriteInfo {
1397    SlotIndex Index;
1398    MachineInstr *MI;
1399    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1400  };
1401
1402  struct RewriteInfoCompare {
1403    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1404      return LHS.Index < RHS.Index;
1405    }
1406  };
1407}
1408
1409void LiveIntervals::
1410rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1411                    LiveInterval::Ranges::const_iterator &I,
1412                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1413                    unsigned Slot, int LdSlot,
1414                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1415                    VirtRegMap &vrm,
1416                    const TargetRegisterClass* rc,
1417                    SmallVector<int, 4> &ReMatIds,
1418                    const MachineLoopInfo *loopInfo,
1419                    BitVector &SpillMBBs,
1420                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1421                    BitVector &RestoreMBBs,
1422                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1423                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1424                    std::vector<LiveInterval*> &NewLIs) {
1425  bool AllCanFold = true;
1426  unsigned NewVReg = 0;
1427  SlotIndex start = I->start.getBaseIndex();
1428  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1429
1430  // First collect all the def / use in this live range that will be rewritten.
1431  // Make sure they are sorted according to instruction index.
1432  std::vector<RewriteInfo> RewriteMIs;
1433  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1434         re = mri_->reg_end(); ri != re; ) {
1435    MachineInstr *MI = &*ri;
1436    MachineOperand &O = ri.getOperand();
1437    ++ri;
1438    if (MI->isDebugValue()) {
1439      // Modify DBG_VALUE now that the value is in a spill slot.
1440      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1441        uint64_t Offset = MI->getOperand(1).getImm();
1442        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1443        DebugLoc DL = MI->getDebugLoc();
1444        int FI = isLoadSS ? LdSlot : (int)Slot;
1445        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1446                                                           Offset, MDPtr, DL)) {
1447          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1448          ReplaceMachineInstrInMaps(MI, NewDV);
1449          MachineBasicBlock *MBB = MI->getParent();
1450          MBB->insert(MBB->erase(MI), NewDV);
1451          continue;
1452        }
1453      }
1454
1455      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1456      RemoveMachineInstrFromMaps(MI);
1457      vrm.RemoveMachineInstrFromMaps(MI);
1458      MI->eraseFromParent();
1459      continue;
1460    }
1461    assert(!(O.isImplicit() && O.isUse()) &&
1462           "Spilling register that's used as implicit use?");
1463    SlotIndex index = getInstructionIndex(MI);
1464    if (index < start || index >= end)
1465      continue;
1466
1467    if (O.isUndef())
1468      // Must be defined by an implicit def. It should not be spilled. Note,
1469      // this is for correctness reason. e.g.
1470      // 8   %reg1024<def> = IMPLICIT_DEF
1471      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1472      // The live range [12, 14) are not part of the r1024 live interval since
1473      // it's defined by an implicit def. It will not conflicts with live
1474      // interval of r1025. Now suppose both registers are spilled, you can
1475      // easily see a situation where both registers are reloaded before
1476      // the INSERT_SUBREG and both target registers that would overlap.
1477      continue;
1478    RewriteMIs.push_back(RewriteInfo(index, MI));
1479  }
1480  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1481
1482  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1483  // Now rewrite the defs and uses.
1484  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1485    RewriteInfo &rwi = RewriteMIs[i];
1486    ++i;
1487    SlotIndex index = rwi.Index;
1488    MachineInstr *MI = rwi.MI;
1489    // If MI def and/or use the same register multiple times, then there
1490    // are multiple entries.
1491    while (i != e && RewriteMIs[i].MI == MI) {
1492      assert(RewriteMIs[i].Index == index);
1493      ++i;
1494    }
1495    MachineBasicBlock *MBB = MI->getParent();
1496
1497    if (ImpUse && MI != ReMatDefMI) {
1498      // Re-matting an instruction with virtual register use. Prevent interval
1499      // from being spilled.
1500      getInterval(ImpUse).markNotSpillable();
1501    }
1502
1503    unsigned MBBId = MBB->getNumber();
1504    unsigned ThisVReg = 0;
1505    if (TrySplit) {
1506      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1507      if (NVI != MBBVRegsMap.end()) {
1508        ThisVReg = NVI->second;
1509        // One common case:
1510        // x = use
1511        // ...
1512        // ...
1513        // def = ...
1514        //     = use
1515        // It's better to start a new interval to avoid artifically
1516        // extend the new interval.
1517        if (MI->readsWritesVirtualRegister(li.reg) ==
1518            std::make_pair(false,true)) {
1519          MBBVRegsMap.erase(MBB->getNumber());
1520          ThisVReg = 0;
1521        }
1522      }
1523    }
1524
1525    bool IsNew = ThisVReg == 0;
1526    if (IsNew) {
1527      // This ends the previous live interval. If all of its def / use
1528      // can be folded, give it a low spill weight.
1529      if (NewVReg && TrySplit && AllCanFold) {
1530        LiveInterval &nI = getOrCreateInterval(NewVReg);
1531        nI.weight /= 10.0F;
1532      }
1533      AllCanFold = true;
1534    }
1535    NewVReg = ThisVReg;
1536
1537    bool HasDef = false;
1538    bool HasUse = false;
1539    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1540                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1541                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1542                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1543                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1544    if (!HasDef && !HasUse)
1545      continue;
1546
1547    AllCanFold &= CanFold;
1548
1549    // Update weight of spill interval.
1550    LiveInterval &nI = getOrCreateInterval(NewVReg);
1551    if (!TrySplit) {
1552      // The spill weight is now infinity as it cannot be spilled again.
1553      nI.markNotSpillable();
1554      continue;
1555    }
1556
1557    // Keep track of the last def and first use in each MBB.
1558    if (HasDef) {
1559      if (MI != ReMatOrigDefMI || !CanDelete) {
1560        bool HasKill = false;
1561        if (!HasUse)
1562          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1563        else {
1564          // If this is a two-address code, then this index starts a new VNInfo.
1565          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1566          if (VNI)
1567            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1568        }
1569        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1570          SpillIdxes.find(MBBId);
1571        if (!HasKill) {
1572          if (SII == SpillIdxes.end()) {
1573            std::vector<SRInfo> S;
1574            S.push_back(SRInfo(index, NewVReg, true));
1575            SpillIdxes.insert(std::make_pair(MBBId, S));
1576          } else if (SII->second.back().vreg != NewVReg) {
1577            SII->second.push_back(SRInfo(index, NewVReg, true));
1578          } else if (index > SII->second.back().index) {
1579            // If there is an earlier def and this is a two-address
1580            // instruction, then it's not possible to fold the store (which
1581            // would also fold the load).
1582            SRInfo &Info = SII->second.back();
1583            Info.index = index;
1584            Info.canFold = !HasUse;
1585          }
1586          SpillMBBs.set(MBBId);
1587        } else if (SII != SpillIdxes.end() &&
1588                   SII->second.back().vreg == NewVReg &&
1589                   index > SII->second.back().index) {
1590          // There is an earlier def that's not killed (must be two-address).
1591          // The spill is no longer needed.
1592          SII->second.pop_back();
1593          if (SII->second.empty()) {
1594            SpillIdxes.erase(MBBId);
1595            SpillMBBs.reset(MBBId);
1596          }
1597        }
1598      }
1599    }
1600
1601    if (HasUse) {
1602      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1603        SpillIdxes.find(MBBId);
1604      if (SII != SpillIdxes.end() &&
1605          SII->second.back().vreg == NewVReg &&
1606          index > SII->second.back().index)
1607        // Use(s) following the last def, it's not safe to fold the spill.
1608        SII->second.back().canFold = false;
1609      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1610        RestoreIdxes.find(MBBId);
1611      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1612        // If we are splitting live intervals, only fold if it's the first
1613        // use and there isn't another use later in the MBB.
1614        RII->second.back().canFold = false;
1615      else if (IsNew) {
1616        // Only need a reload if there isn't an earlier def / use.
1617        if (RII == RestoreIdxes.end()) {
1618          std::vector<SRInfo> Infos;
1619          Infos.push_back(SRInfo(index, NewVReg, true));
1620          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1621        } else {
1622          RII->second.push_back(SRInfo(index, NewVReg, true));
1623        }
1624        RestoreMBBs.set(MBBId);
1625      }
1626    }
1627
1628    // Update spill weight.
1629    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1630    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1631  }
1632
1633  if (NewVReg && TrySplit && AllCanFold) {
1634    // If all of its def / use can be folded, give it a low spill weight.
1635    LiveInterval &nI = getOrCreateInterval(NewVReg);
1636    nI.weight /= 10.0F;
1637  }
1638}
1639
1640bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1641                        unsigned vr, BitVector &RestoreMBBs,
1642                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1643  if (!RestoreMBBs[Id])
1644    return false;
1645  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1646  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1647    if (Restores[i].index == index &&
1648        Restores[i].vreg == vr &&
1649        Restores[i].canFold)
1650      return true;
1651  return false;
1652}
1653
1654void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1655                        unsigned vr, BitVector &RestoreMBBs,
1656                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1657  if (!RestoreMBBs[Id])
1658    return;
1659  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1660  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1661    if (Restores[i].index == index && Restores[i].vreg)
1662      Restores[i].index = SlotIndex();
1663}
1664
1665/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1666/// spilled and create empty intervals for their uses.
1667void
1668LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1669                                    const TargetRegisterClass* rc,
1670                                    std::vector<LiveInterval*> &NewLIs) {
1671  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1672         re = mri_->reg_end(); ri != re; ) {
1673    MachineOperand &O = ri.getOperand();
1674    MachineInstr *MI = &*ri;
1675    ++ri;
1676    if (MI->isDebugValue()) {
1677      // Remove debug info for now.
1678      O.setReg(0U);
1679      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1680      continue;
1681    }
1682    if (O.isDef()) {
1683      assert(MI->isImplicitDef() &&
1684             "Register def was not rewritten?");
1685      RemoveMachineInstrFromMaps(MI);
1686      vrm.RemoveMachineInstrFromMaps(MI);
1687      MI->eraseFromParent();
1688    } else {
1689      // This must be an use of an implicit_def so it's not part of the live
1690      // interval. Create a new empty live interval for it.
1691      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1692      unsigned NewVReg = mri_->createVirtualRegister(rc);
1693      vrm.grow();
1694      vrm.setIsImplicitlyDefined(NewVReg);
1695      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1696      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1697        MachineOperand &MO = MI->getOperand(i);
1698        if (MO.isReg() && MO.getReg() == li.reg) {
1699          MO.setReg(NewVReg);
1700          MO.setIsUndef();
1701        }
1702      }
1703    }
1704  }
1705}
1706
1707float
1708LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1709  // Limit the loop depth ridiculousness.
1710  if (loopDepth > 200)
1711    loopDepth = 200;
1712
1713  // The loop depth is used to roughly estimate the number of times the
1714  // instruction is executed. Something like 10^d is simple, but will quickly
1715  // overflow a float. This expression behaves like 10^d for small d, but is
1716  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1717  // headroom before overflow.
1718  // By the way, powf() might be unavailable here. For consistency,
1719  // We may take pow(double,double).
1720  float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1721
1722  return (isDef + isUse) * lc;
1723}
1724
1725static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1726  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1727    NewLIs[i]->weight =
1728      normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1729}
1730
1731std::vector<LiveInterval*> LiveIntervals::
1732addIntervalsForSpills(const LiveInterval &li,
1733                      const SmallVectorImpl<LiveInterval*> *SpillIs,
1734                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1735  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1736
1737  DEBUG({
1738      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1739      li.print(dbgs(), tri_);
1740      dbgs() << '\n';
1741    });
1742
1743  // Each bit specify whether a spill is required in the MBB.
1744  BitVector SpillMBBs(mf_->getNumBlockIDs());
1745  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1746  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1747  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1748  DenseMap<unsigned,unsigned> MBBVRegsMap;
1749  std::vector<LiveInterval*> NewLIs;
1750  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1751
1752  unsigned NumValNums = li.getNumValNums();
1753  SmallVector<MachineInstr*, 4> ReMatDefs;
1754  ReMatDefs.resize(NumValNums, NULL);
1755  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1756  ReMatOrigDefs.resize(NumValNums, NULL);
1757  SmallVector<int, 4> ReMatIds;
1758  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1759  BitVector ReMatDelete(NumValNums);
1760  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1761
1762  // Spilling a split live interval. It cannot be split any further. Also,
1763  // it's also guaranteed to be a single val# / range interval.
1764  if (vrm.getPreSplitReg(li.reg)) {
1765    vrm.setIsSplitFromReg(li.reg, 0);
1766    // Unset the split kill marker on the last use.
1767    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1768    if (KillIdx != SlotIndex()) {
1769      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1770      assert(KillMI && "Last use disappeared?");
1771      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1772      assert(KillOp != -1 && "Last use disappeared?");
1773      KillMI->getOperand(KillOp).setIsKill(false);
1774    }
1775    vrm.removeKillPoint(li.reg);
1776    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1777    Slot = vrm.getStackSlot(li.reg);
1778    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1779    MachineInstr *ReMatDefMI = DefIsReMat ?
1780      vrm.getReMaterializedMI(li.reg) : NULL;
1781    int LdSlot = 0;
1782    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1783    bool isLoad = isLoadSS ||
1784      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1785    bool IsFirstRange = true;
1786    for (LiveInterval::Ranges::const_iterator
1787           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1788      // If this is a split live interval with multiple ranges, it means there
1789      // are two-address instructions that re-defined the value. Only the
1790      // first def can be rematerialized!
1791      if (IsFirstRange) {
1792        // Note ReMatOrigDefMI has already been deleted.
1793        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1794                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1795                             false, vrm, rc, ReMatIds, loopInfo,
1796                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1797                             MBBVRegsMap, NewLIs);
1798      } else {
1799        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1800                             Slot, 0, false, false, false,
1801                             false, vrm, rc, ReMatIds, loopInfo,
1802                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1803                             MBBVRegsMap, NewLIs);
1804      }
1805      IsFirstRange = false;
1806    }
1807
1808    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1809    normalizeSpillWeights(NewLIs);
1810    return NewLIs;
1811  }
1812
1813  bool TrySplit = !intervalIsInOneMBB(li);
1814  if (TrySplit)
1815    ++numSplits;
1816  bool NeedStackSlot = false;
1817  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1818       i != e; ++i) {
1819    const VNInfo *VNI = *i;
1820    unsigned VN = VNI->id;
1821    if (VNI->isUnused())
1822      continue; // Dead val#.
1823    // Is the def for the val# rematerializable?
1824    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1825    bool dummy;
1826    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1827      // Remember how to remat the def of this val#.
1828      ReMatOrigDefs[VN] = ReMatDefMI;
1829      // Original def may be modified so we have to make a copy here.
1830      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1831      CloneMIs.push_back(Clone);
1832      ReMatDefs[VN] = Clone;
1833
1834      bool CanDelete = true;
1835      if (VNI->hasPHIKill()) {
1836        // A kill is a phi node, not all of its uses can be rematerialized.
1837        // It must not be deleted.
1838        CanDelete = false;
1839        // Need a stack slot if there is any live range where uses cannot be
1840        // rematerialized.
1841        NeedStackSlot = true;
1842      }
1843      if (CanDelete)
1844        ReMatDelete.set(VN);
1845    } else {
1846      // Need a stack slot if there is any live range where uses cannot be
1847      // rematerialized.
1848      NeedStackSlot = true;
1849    }
1850  }
1851
1852  // One stack slot per live interval.
1853  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1854    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1855      Slot = vrm.assignVirt2StackSlot(li.reg);
1856
1857    // This case only occurs when the prealloc splitter has already assigned
1858    // a stack slot to this vreg.
1859    else
1860      Slot = vrm.getStackSlot(li.reg);
1861  }
1862
1863  // Create new intervals and rewrite defs and uses.
1864  for (LiveInterval::Ranges::const_iterator
1865         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1866    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1867    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1868    bool DefIsReMat = ReMatDefMI != NULL;
1869    bool CanDelete = ReMatDelete[I->valno->id];
1870    int LdSlot = 0;
1871    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1872    bool isLoad = isLoadSS ||
1873      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1874    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1875                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1876                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1877                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1878                               MBBVRegsMap, NewLIs);
1879  }
1880
1881  // Insert spills / restores if we are splitting.
1882  if (!TrySplit) {
1883    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1884    normalizeSpillWeights(NewLIs);
1885    return NewLIs;
1886  }
1887
1888  SmallPtrSet<LiveInterval*, 4> AddedKill;
1889  SmallVector<unsigned, 2> Ops;
1890  if (NeedStackSlot) {
1891    int Id = SpillMBBs.find_first();
1892    while (Id != -1) {
1893      std::vector<SRInfo> &spills = SpillIdxes[Id];
1894      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1895        SlotIndex index = spills[i].index;
1896        unsigned VReg = spills[i].vreg;
1897        LiveInterval &nI = getOrCreateInterval(VReg);
1898        bool isReMat = vrm.isReMaterialized(VReg);
1899        MachineInstr *MI = getInstructionFromIndex(index);
1900        bool CanFold = false;
1901        bool FoundUse = false;
1902        Ops.clear();
1903        if (spills[i].canFold) {
1904          CanFold = true;
1905          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1906            MachineOperand &MO = MI->getOperand(j);
1907            if (!MO.isReg() || MO.getReg() != VReg)
1908              continue;
1909
1910            Ops.push_back(j);
1911            if (MO.isDef())
1912              continue;
1913            if (isReMat ||
1914                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1915                                                RestoreMBBs, RestoreIdxes))) {
1916              // MI has two-address uses of the same register. If the use
1917              // isn't the first and only use in the BB, then we can't fold
1918              // it. FIXME: Move this to rewriteInstructionsForSpills.
1919              CanFold = false;
1920              break;
1921            }
1922            FoundUse = true;
1923          }
1924        }
1925        // Fold the store into the def if possible.
1926        bool Folded = false;
1927        if (CanFold && !Ops.empty()) {
1928          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1929            Folded = true;
1930            if (FoundUse) {
1931              // Also folded uses, do not issue a load.
1932              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1933              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1934            }
1935            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1936          }
1937        }
1938
1939        // Otherwise tell the spiller to issue a spill.
1940        if (!Folded) {
1941          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1942          bool isKill = LR->end == index.getStoreIndex();
1943          if (!MI->registerDefIsDead(nI.reg))
1944            // No need to spill a dead def.
1945            vrm.addSpillPoint(VReg, isKill, MI);
1946          if (isKill)
1947            AddedKill.insert(&nI);
1948        }
1949      }
1950      Id = SpillMBBs.find_next(Id);
1951    }
1952  }
1953
1954  int Id = RestoreMBBs.find_first();
1955  while (Id != -1) {
1956    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1957    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1958      SlotIndex index = restores[i].index;
1959      if (index == SlotIndex())
1960        continue;
1961      unsigned VReg = restores[i].vreg;
1962      LiveInterval &nI = getOrCreateInterval(VReg);
1963      bool isReMat = vrm.isReMaterialized(VReg);
1964      MachineInstr *MI = getInstructionFromIndex(index);
1965      bool CanFold = false;
1966      Ops.clear();
1967      if (restores[i].canFold) {
1968        CanFold = true;
1969        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1970          MachineOperand &MO = MI->getOperand(j);
1971          if (!MO.isReg() || MO.getReg() != VReg)
1972            continue;
1973
1974          if (MO.isDef()) {
1975            // If this restore were to be folded, it would have been folded
1976            // already.
1977            CanFold = false;
1978            break;
1979          }
1980          Ops.push_back(j);
1981        }
1982      }
1983
1984      // Fold the load into the use if possible.
1985      bool Folded = false;
1986      if (CanFold && !Ops.empty()) {
1987        if (!isReMat)
1988          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1989        else {
1990          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1991          int LdSlot = 0;
1992          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1993          // If the rematerializable def is a load, also try to fold it.
1994          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1995            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1996                                          Ops, isLoadSS, LdSlot, VReg);
1997          if (!Folded) {
1998            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1999            if (ImpUse) {
2000              // Re-matting an instruction with virtual register use. Add the
2001              // register as an implicit use on the use MI and mark the register
2002              // interval as unspillable.
2003              LiveInterval &ImpLi = getInterval(ImpUse);
2004              ImpLi.markNotSpillable();
2005              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2006            }
2007          }
2008        }
2009      }
2010      // If folding is not possible / failed, then tell the spiller to issue a
2011      // load / rematerialization for us.
2012      if (Folded)
2013        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2014      else
2015        vrm.addRestorePoint(VReg, MI);
2016    }
2017    Id = RestoreMBBs.find_next(Id);
2018  }
2019
2020  // Finalize intervals: add kills, finalize spill weights, and filter out
2021  // dead intervals.
2022  std::vector<LiveInterval*> RetNewLIs;
2023  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2024    LiveInterval *LI = NewLIs[i];
2025    if (!LI->empty()) {
2026      if (!AddedKill.count(LI)) {
2027        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2028        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2029        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2030        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2031        assert(UseIdx != -1);
2032        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2033          LastUse->getOperand(UseIdx).setIsKill();
2034          vrm.addKillPoint(LI->reg, LastUseIdx);
2035        }
2036      }
2037      RetNewLIs.push_back(LI);
2038    }
2039  }
2040
2041  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2042  normalizeSpillWeights(RetNewLIs);
2043  return RetNewLIs;
2044}
2045
2046/// hasAllocatableSuperReg - Return true if the specified physical register has
2047/// any super register that's allocatable.
2048bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2049  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2050    if (allocatableRegs_[*AS] && hasInterval(*AS))
2051      return true;
2052  return false;
2053}
2054
2055/// getRepresentativeReg - Find the largest super register of the specified
2056/// physical register.
2057unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2058  // Find the largest super-register that is allocatable.
2059  unsigned BestReg = Reg;
2060  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2061    unsigned SuperReg = *AS;
2062    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2063      BestReg = SuperReg;
2064      break;
2065    }
2066  }
2067  return BestReg;
2068}
2069
2070/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2071/// specified interval that conflicts with the specified physical register.
2072unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2073                                                   unsigned PhysReg) const {
2074  unsigned NumConflicts = 0;
2075  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2076  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2077         E = mri_->reg_end(); I != E; ++I) {
2078    MachineOperand &O = I.getOperand();
2079    MachineInstr *MI = O.getParent();
2080    if (MI->isDebugValue())
2081      continue;
2082    SlotIndex Index = getInstructionIndex(MI);
2083    if (pli.liveAt(Index))
2084      ++NumConflicts;
2085  }
2086  return NumConflicts;
2087}
2088
2089/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2090/// around all defs and uses of the specified interval. Return true if it
2091/// was able to cut its interval.
2092bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2093                                            unsigned PhysReg, VirtRegMap &vrm) {
2094  unsigned SpillReg = getRepresentativeReg(PhysReg);
2095
2096  DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2097               << " represented by " << tri_->getName(SpillReg) << '\n');
2098
2099  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2100    // If there are registers which alias PhysReg, but which are not a
2101    // sub-register of the chosen representative super register. Assert
2102    // since we can't handle it yet.
2103    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2104           tri_->isSuperRegister(*AS, SpillReg));
2105
2106  bool Cut = false;
2107  SmallVector<unsigned, 4> PRegs;
2108  if (hasInterval(SpillReg))
2109    PRegs.push_back(SpillReg);
2110  for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2111    if (hasInterval(*SR))
2112      PRegs.push_back(*SR);
2113
2114  DEBUG({
2115    dbgs() << "Trying to spill:";
2116    for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2117      dbgs() << ' ' << tri_->getName(PRegs[i]);
2118    dbgs() << '\n';
2119  });
2120
2121  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2122  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2123         E = mri_->reg_end(); I != E; ++I) {
2124    MachineOperand &O = I.getOperand();
2125    MachineInstr *MI = O.getParent();
2126    if (MI->isDebugValue() || SeenMIs.count(MI))
2127      continue;
2128    SeenMIs.insert(MI);
2129    SlotIndex Index = getInstructionIndex(MI);
2130    bool LiveReg = false;
2131    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2132      unsigned PReg = PRegs[i];
2133      LiveInterval &pli = getInterval(PReg);
2134      if (!pli.liveAt(Index))
2135        continue;
2136      LiveReg = true;
2137      SlotIndex StartIdx = Index.getLoadIndex();
2138      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2139      if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2140        std::string msg;
2141        raw_string_ostream Msg(msg);
2142        Msg << "Ran out of registers during register allocation!";
2143        if (MI->isInlineAsm()) {
2144          Msg << "\nPlease check your inline asm statement for invalid "
2145              << "constraints:\n";
2146          MI->print(Msg, tm_);
2147        }
2148        report_fatal_error(Msg.str());
2149      }
2150      pli.removeRange(StartIdx, EndIdx);
2151      LiveReg = true;
2152    }
2153    if (!LiveReg)
2154      continue;
2155    DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2156    vrm.addEmergencySpill(SpillReg, MI);
2157    Cut = true;
2158  }
2159  return Cut;
2160}
2161
2162LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2163                                                  MachineInstr* startInst) {
2164  LiveInterval& Interval = getOrCreateInterval(reg);
2165  VNInfo* VN = Interval.getNextValue(
2166    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2167    startInst, getVNInfoAllocator());
2168  VN->setHasPHIKill(true);
2169  LiveRange LR(
2170     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2171     getMBBEndIdx(startInst->getParent()), VN);
2172  Interval.addRange(LR);
2173
2174  return LR;
2175}
2176
2177