LiveIntervalAnalysis.cpp revision 4b11a70f7a63dc4c051a96a90ed6687eb0595cda
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/CalcSpillWeights.h"
24#include "llvm/CodeGen/LiveVariables.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineInstr.h"
27#include "llvm/CodeGen/MachineInstrBuilder.h"
28#include "llvm/CodeGen/MachineLoopInfo.h"
29#include "llvm/CodeGen/MachineMemOperand.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/Passes.h"
32#include "llvm/CodeGen/ProcessImplicitDefs.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetInstrInfo.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetOptions.h"
37#include "llvm/Support/CommandLine.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/raw_ostream.h"
41#include "llvm/ADT/DepthFirstIterator.h"
42#include "llvm/ADT/SmallSet.h"
43#include "llvm/ADT/Statistic.h"
44#include "llvm/ADT/STLExtras.h"
45#include <algorithm>
46#include <limits>
47#include <cmath>
48using namespace llvm;
49
50// Hidden options for help debugging.
51static cl::opt<bool> DisableReMat("disable-rematerialization",
52                                  cl::init(false), cl::Hidden);
53
54STATISTIC(numIntervals , "Number of original intervals");
55STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
56STATISTIC(numSplits    , "Number of intervals split");
57
58char LiveIntervals::ID = 0;
59INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
60                "Live Interval Analysis", false, false)
61INITIALIZE_PASS_DEPENDENCY(LiveVariables)
62INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
63INITIALIZE_PASS_DEPENDENCY(PHIElimination)
64INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
65INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
66INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
68INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
69                "Live Interval Analysis", false, false)
70
71void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72  AU.setPreservesCFG();
73  AU.addRequired<AliasAnalysis>();
74  AU.addPreserved<AliasAnalysis>();
75  AU.addRequired<LiveVariables>();
76  AU.addPreserved<LiveVariables>();
77  AU.addRequired<MachineLoopInfo>();
78  AU.addPreserved<MachineLoopInfo>();
79  AU.addPreservedID(MachineDominatorsID);
80
81  if (!StrongPHIElim) {
82    AU.addPreservedID(PHIEliminationID);
83    AU.addRequiredID(PHIEliminationID);
84  }
85
86  AU.addRequiredID(TwoAddressInstructionPassID);
87  AU.addPreserved<ProcessImplicitDefs>();
88  AU.addRequired<ProcessImplicitDefs>();
89  AU.addPreserved<SlotIndexes>();
90  AU.addRequiredTransitive<SlotIndexes>();
91  MachineFunctionPass::getAnalysisUsage(AU);
92}
93
94void LiveIntervals::releaseMemory() {
95  // Free the live intervals themselves.
96  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
97       E = r2iMap_.end(); I != E; ++I)
98    delete I->second;
99
100  r2iMap_.clear();
101
102  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
103  VNInfoAllocator.Reset();
104  while (!CloneMIs.empty()) {
105    MachineInstr *MI = CloneMIs.back();
106    CloneMIs.pop_back();
107    mf_->DeleteMachineInstr(MI);
108  }
109}
110
111/// runOnMachineFunction - Register allocate the whole function
112///
113bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114  mf_ = &fn;
115  mri_ = &mf_->getRegInfo();
116  tm_ = &fn.getTarget();
117  tri_ = tm_->getRegisterInfo();
118  tii_ = tm_->getInstrInfo();
119  aa_ = &getAnalysis<AliasAnalysis>();
120  lv_ = &getAnalysis<LiveVariables>();
121  indexes_ = &getAnalysis<SlotIndexes>();
122  allocatableRegs_ = tri_->getAllocatableSet(fn);
123
124  computeIntervals();
125
126  numIntervals += getNumIntervals();
127
128  DEBUG(dump());
129  return true;
130}
131
132/// print - Implement the dump method.
133void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134  OS << "********** INTERVALS **********\n";
135  for (const_iterator I = begin(), E = end(); I != E; ++I) {
136    I->second->print(OS, tri_);
137    OS << "\n";
138  }
139
140  printInstrs(OS);
141}
142
143void LiveIntervals::printInstrs(raw_ostream &OS) const {
144  OS << "********** MACHINEINSTRS **********\n";
145  mf_->print(OS, indexes_);
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(dbgs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    if (MI.isCopy())
192      if (MI.getOperand(0).getReg() == li.reg ||
193          MI.getOperand(1).getReg() == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
219                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
220  for (LiveInterval::Ranges::const_iterator
221         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
222    for (SlotIndex index = I->start.getBaseIndex(),
223           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
224           index != end;
225           index = index.getNextIndex()) {
226      MachineInstr *MI = getInstructionFromIndex(index);
227      if (!MI)
228        continue;               // skip deleted instructions
229
230      if (JoinedCopies.count(MI))
231        continue;
232      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
233        MachineOperand& MO = MI->getOperand(i);
234        if (!MO.isReg())
235          continue;
236        unsigned PhysReg = MO.getReg();
237        if (PhysReg == 0 || PhysReg == Reg ||
238            TargetRegisterInfo::isVirtualRegister(PhysReg))
239          continue;
240        if (tri_->regsOverlap(Reg, PhysReg))
241          return true;
242      }
243    }
244  }
245
246  return false;
247}
248
249static
250bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
251  unsigned Reg = MI.getOperand(MOIdx).getReg();
252  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
253    const MachineOperand &MO = MI.getOperand(i);
254    if (!MO.isReg())
255      continue;
256    if (MO.getReg() == Reg && MO.isDef()) {
257      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
258             MI.getOperand(MOIdx).getSubReg() &&
259             (MO.getSubReg() || MO.isImplicit()));
260      return true;
261    }
262  }
263  return false;
264}
265
266/// isPartialRedef - Return true if the specified def at the specific index is
267/// partially re-defining the specified live interval. A common case of this is
268/// a definition of the sub-register.
269bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
270                                   LiveInterval &interval) {
271  if (!MO.getSubReg() || MO.isEarlyClobber())
272    return false;
273
274  SlotIndex RedefIndex = MIIdx.getDefIndex();
275  const LiveRange *OldLR =
276    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
277  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
278  if (DefMI != 0) {
279    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
280  }
281  return false;
282}
283
284void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
285                                             MachineBasicBlock::iterator mi,
286                                             SlotIndex MIIdx,
287                                             MachineOperand& MO,
288                                             unsigned MOIdx,
289                                             LiveInterval &interval) {
290  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
291
292  // Virtual registers may be defined multiple times (due to phi
293  // elimination and 2-addr elimination).  Much of what we do only has to be
294  // done once for the vreg.  We use an empty interval to detect the first
295  // time we see a vreg.
296  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
297  if (interval.empty()) {
298    // Get the Idx of the defining instructions.
299    SlotIndex defIndex = MIIdx.getDefIndex();
300    // Earlyclobbers move back one, so that they overlap the live range
301    // of inputs.
302    if (MO.isEarlyClobber())
303      defIndex = MIIdx.getUseIndex();
304
305    // Make sure the first definition is not a partial redefinition. Add an
306    // <imp-def> of the full register.
307    if (MO.getSubReg())
308      mi->addRegisterDefined(interval.reg);
309
310    MachineInstr *CopyMI = NULL;
311    if (mi->isCopyLike()) {
312      CopyMI = mi;
313    }
314
315    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
316    assert(ValNo->id == 0 && "First value in interval is not 0?");
317
318    // Loop over all of the blocks that the vreg is defined in.  There are
319    // two cases we have to handle here.  The most common case is a vreg
320    // whose lifetime is contained within a basic block.  In this case there
321    // will be a single kill, in MBB, which comes after the definition.
322    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
323      // FIXME: what about dead vars?
324      SlotIndex killIdx;
325      if (vi.Kills[0] != mi)
326        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
327      else
328        killIdx = defIndex.getStoreIndex();
329
330      // If the kill happens after the definition, we have an intra-block
331      // live range.
332      if (killIdx > defIndex) {
333        assert(vi.AliveBlocks.empty() &&
334               "Shouldn't be alive across any blocks!");
335        LiveRange LR(defIndex, killIdx, ValNo);
336        interval.addRange(LR);
337        DEBUG(dbgs() << " +" << LR << "\n");
338        return;
339      }
340    }
341
342    // The other case we handle is when a virtual register lives to the end
343    // of the defining block, potentially live across some blocks, then is
344    // live into some number of blocks, but gets killed.  Start by adding a
345    // range that goes from this definition to the end of the defining block.
346    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
347    DEBUG(dbgs() << " +" << NewLR);
348    interval.addRange(NewLR);
349
350    bool PHIJoin = lv_->isPHIJoin(interval.reg);
351
352    if (PHIJoin) {
353      // A phi join register is killed at the end of the MBB and revived as a new
354      // valno in the killing blocks.
355      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
356      DEBUG(dbgs() << " phi-join");
357      ValNo->setHasPHIKill(true);
358    } else {
359      // Iterate over all of the blocks that the variable is completely
360      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
361      // live interval.
362      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
363               E = vi.AliveBlocks.end(); I != E; ++I) {
364        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
365        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
366        interval.addRange(LR);
367        DEBUG(dbgs() << " +" << LR);
368      }
369    }
370
371    // Finally, this virtual register is live from the start of any killing
372    // block to the 'use' slot of the killing instruction.
373    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
374      MachineInstr *Kill = vi.Kills[i];
375      SlotIndex Start = getMBBStartIdx(Kill->getParent());
376      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
377
378      // Create interval with one of a NEW value number.  Note that this value
379      // number isn't actually defined by an instruction, weird huh? :)
380      if (PHIJoin) {
381        assert(getInstructionFromIndex(Start) == 0 &&
382               "PHI def index points at actual instruction.");
383        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
384        ValNo->setIsPHIDef(true);
385      }
386      LiveRange LR(Start, killIdx, ValNo);
387      interval.addRange(LR);
388      DEBUG(dbgs() << " +" << LR);
389    }
390
391  } else {
392    if (MultipleDefsBySameMI(*mi, MOIdx))
393      // Multiple defs of the same virtual register by the same instruction.
394      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
395      // This is likely due to elimination of REG_SEQUENCE instructions. Return
396      // here since there is nothing to do.
397      return;
398
399    // If this is the second time we see a virtual register definition, it
400    // must be due to phi elimination or two addr elimination.  If this is
401    // the result of two address elimination, then the vreg is one of the
402    // def-and-use register operand.
403
404    // It may also be partial redef like this:
405    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
406    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
407    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
408    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
409      // If this is a two-address definition, then we have already processed
410      // the live range.  The only problem is that we didn't realize there
411      // are actually two values in the live interval.  Because of this we
412      // need to take the LiveRegion that defines this register and split it
413      // into two values.
414      SlotIndex RedefIndex = MIIdx.getDefIndex();
415      if (MO.isEarlyClobber())
416        RedefIndex = MIIdx.getUseIndex();
417
418      const LiveRange *OldLR =
419        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
420      VNInfo *OldValNo = OldLR->valno;
421      SlotIndex DefIndex = OldValNo->def.getDefIndex();
422
423      // Delete the previous value, which should be short and continuous,
424      // because the 2-addr copy must be in the same MBB as the redef.
425      interval.removeRange(DefIndex, RedefIndex);
426
427      // The new value number (#1) is defined by the instruction we claimed
428      // defined value #0.
429      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
430
431      // Value#0 is now defined by the 2-addr instruction.
432      OldValNo->def  = RedefIndex;
433      OldValNo->setCopy(0);
434
435      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
436      if (PartReDef && mi->isCopyLike())
437        OldValNo->setCopy(&*mi);
438
439      // Add the new live interval which replaces the range for the input copy.
440      LiveRange LR(DefIndex, RedefIndex, ValNo);
441      DEBUG(dbgs() << " replace range with " << LR);
442      interval.addRange(LR);
443
444      // If this redefinition is dead, we need to add a dummy unit live
445      // range covering the def slot.
446      if (MO.isDead())
447        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
448                                    OldValNo));
449
450      DEBUG({
451          dbgs() << " RESULT: ";
452          interval.print(dbgs(), tri_);
453        });
454    } else if (lv_->isPHIJoin(interval.reg)) {
455      // In the case of PHI elimination, each variable definition is only
456      // live until the end of the block.  We've already taken care of the
457      // rest of the live range.
458
459      SlotIndex defIndex = MIIdx.getDefIndex();
460      if (MO.isEarlyClobber())
461        defIndex = MIIdx.getUseIndex();
462
463      VNInfo *ValNo;
464      MachineInstr *CopyMI = NULL;
465      if (mi->isCopyLike())
466        CopyMI = mi;
467      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
468
469      SlotIndex killIndex = getMBBEndIdx(mbb);
470      LiveRange LR(defIndex, killIndex, ValNo);
471      interval.addRange(LR);
472      ValNo->setHasPHIKill(true);
473      DEBUG(dbgs() << " phi-join +" << LR);
474    } else {
475      llvm_unreachable("Multiply defined register");
476    }
477  }
478
479  DEBUG(dbgs() << '\n');
480}
481
482void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483                                              MachineBasicBlock::iterator mi,
484                                              SlotIndex MIIdx,
485                                              MachineOperand& MO,
486                                              LiveInterval &interval,
487                                              MachineInstr *CopyMI) {
488  // A physical register cannot be live across basic block, so its
489  // lifetime must end somewhere in its defining basic block.
490  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
491
492  SlotIndex baseIndex = MIIdx;
493  SlotIndex start = baseIndex.getDefIndex();
494  // Earlyclobbers move back one.
495  if (MO.isEarlyClobber())
496    start = MIIdx.getUseIndex();
497  SlotIndex end = start;
498
499  // If it is not used after definition, it is considered dead at
500  // the instruction defining it. Hence its interval is:
501  // [defSlot(def), defSlot(def)+1)
502  // For earlyclobbers, the defSlot was pushed back one; the extra
503  // advance below compensates.
504  if (MO.isDead()) {
505    DEBUG(dbgs() << " dead");
506    end = start.getStoreIndex();
507    goto exit;
508  }
509
510  // If it is not dead on definition, it must be killed by a
511  // subsequent instruction. Hence its interval is:
512  // [defSlot(def), useSlot(kill)+1)
513  baseIndex = baseIndex.getNextIndex();
514  while (++mi != MBB->end()) {
515
516    if (mi->isDebugValue())
517      continue;
518    if (getInstructionFromIndex(baseIndex) == 0)
519      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520
521    if (mi->killsRegister(interval.reg, tri_)) {
522      DEBUG(dbgs() << " killed");
523      end = baseIndex.getDefIndex();
524      goto exit;
525    } else {
526      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
527      if (DefIdx != -1) {
528        if (mi->isRegTiedToUseOperand(DefIdx)) {
529          // Two-address instruction.
530          end = baseIndex.getDefIndex();
531        } else {
532          // Another instruction redefines the register before it is ever read.
533          // Then the register is essentially dead at the instruction that
534          // defines it. Hence its interval is:
535          // [defSlot(def), defSlot(def)+1)
536          DEBUG(dbgs() << " dead");
537          end = start.getStoreIndex();
538        }
539        goto exit;
540      }
541    }
542
543    baseIndex = baseIndex.getNextIndex();
544  }
545
546  // The only case we should have a dead physreg here without a killing or
547  // instruction where we know it's dead is if it is live-in to the function
548  // and never used. Another possible case is the implicit use of the
549  // physical register has been deleted by two-address pass.
550  end = start.getStoreIndex();
551
552exit:
553  assert(start < end && "did not find end of interval?");
554
555  // Already exists? Extend old live interval.
556  VNInfo *ValNo = interval.getVNInfoAt(start);
557  bool Extend = ValNo != 0;
558  if (!Extend)
559    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
560  if (Extend && MO.isEarlyClobber())
561    ValNo->setHasRedefByEC(true);
562  LiveRange LR(start, end, ValNo);
563  interval.addRange(LR);
564  DEBUG(dbgs() << " +" << LR << '\n');
565}
566
567void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568                                      MachineBasicBlock::iterator MI,
569                                      SlotIndex MIIdx,
570                                      MachineOperand& MO,
571                                      unsigned MOIdx) {
572  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574                             getOrCreateInterval(MO.getReg()));
575  else if (allocatableRegs_[MO.getReg()]) {
576    MachineInstr *CopyMI = NULL;
577    if (MI->isCopyLike())
578      CopyMI = MI;
579    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580                              getOrCreateInterval(MO.getReg()), CopyMI);
581    // Def of a register also defines its sub-registers.
582    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583      // If MI also modifies the sub-register explicitly, avoid processing it
584      // more than once. Do not pass in TRI here so it checks for exact match.
585      if (!MI->definesRegister(*AS))
586        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                                  getOrCreateInterval(*AS), 0);
588  }
589}
590
591void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592                                         SlotIndex MIIdx,
593                                         LiveInterval &interval, bool isAlias) {
594  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
595
596  // Look for kills, if it reaches a def before it's killed, then it shouldn't
597  // be considered a livein.
598  MachineBasicBlock::iterator mi = MBB->begin();
599  MachineBasicBlock::iterator E = MBB->end();
600  // Skip over DBG_VALUE at the start of the MBB.
601  if (mi != E && mi->isDebugValue()) {
602    while (++mi != E && mi->isDebugValue())
603      ;
604    if (mi == E)
605      // MBB is empty except for DBG_VALUE's.
606      return;
607  }
608
609  SlotIndex baseIndex = MIIdx;
610  SlotIndex start = baseIndex;
611  if (getInstructionFromIndex(baseIndex) == 0)
612    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613
614  SlotIndex end = baseIndex;
615  bool SeenDefUse = false;
616
617  while (mi != E) {
618    if (mi->killsRegister(interval.reg, tri_)) {
619      DEBUG(dbgs() << " killed");
620      end = baseIndex.getDefIndex();
621      SeenDefUse = true;
622      break;
623    } else if (mi->definesRegister(interval.reg, tri_)) {
624      // Another instruction redefines the register before it is ever read.
625      // Then the register is essentially dead at the instruction that defines
626      // it. Hence its interval is:
627      // [defSlot(def), defSlot(def)+1)
628      DEBUG(dbgs() << " dead");
629      end = start.getStoreIndex();
630      SeenDefUse = true;
631      break;
632    }
633
634    while (++mi != E && mi->isDebugValue())
635      // Skip over DBG_VALUE.
636      ;
637    if (mi != E)
638      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
639  }
640
641  // Live-in register might not be used at all.
642  if (!SeenDefUse) {
643    if (isAlias) {
644      DEBUG(dbgs() << " dead");
645      end = MIIdx.getStoreIndex();
646    } else {
647      DEBUG(dbgs() << " live through");
648      end = baseIndex;
649    }
650  }
651
652  SlotIndex defIdx = getMBBStartIdx(MBB);
653  assert(getInstructionFromIndex(defIdx) == 0 &&
654         "PHI def index points at actual instruction.");
655  VNInfo *vni =
656    interval.getNextValue(defIdx, 0, VNInfoAllocator);
657  vni->setIsPHIDef(true);
658  LiveRange LR(start, end, vni);
659
660  interval.addRange(LR);
661  DEBUG(dbgs() << " +" << LR << '\n');
662}
663
664/// computeIntervals - computes the live intervals for virtual
665/// registers. for some ordering of the machine instructions [1,N] a
666/// live interval is an interval [i, j) where 1 <= i <= j < N for
667/// which a variable is live
668void LiveIntervals::computeIntervals() {
669  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
670               << "********** Function: "
671               << ((Value*)mf_->getFunction())->getName() << '\n');
672
673  SmallVector<unsigned, 8> UndefUses;
674  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
675       MBBI != E; ++MBBI) {
676    MachineBasicBlock *MBB = MBBI;
677    if (MBB->empty())
678      continue;
679
680    // Track the index of the current machine instr.
681    SlotIndex MIIndex = getMBBStartIdx(MBB);
682    DEBUG(dbgs() << "BB#" << MBB->getNumber()
683          << ":\t\t# derived from " << MBB->getName() << "\n");
684
685    // Create intervals for live-ins to this BB first.
686    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
687           LE = MBB->livein_end(); LI != LE; ++LI) {
688      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689      // Multiple live-ins can alias the same register.
690      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691        if (!hasInterval(*AS))
692          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
693                               true);
694    }
695
696    // Skip over empty initial indices.
697    if (getInstructionFromIndex(MIIndex) == 0)
698      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
699
700    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
701         MI != miEnd; ++MI) {
702      DEBUG(dbgs() << MIIndex << "\t" << *MI);
703      if (MI->isDebugValue())
704        continue;
705
706      // Handle defs.
707      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708        MachineOperand &MO = MI->getOperand(i);
709        if (!MO.isReg() || !MO.getReg())
710          continue;
711
712        // handle register defs - build intervals
713        if (MO.isDef())
714          handleRegisterDef(MBB, MI, MIIndex, MO, i);
715        else if (MO.isUndef())
716          UndefUses.push_back(MO.getReg());
717      }
718
719      // Move to the next instr slot.
720      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
721    }
722  }
723
724  // Create empty intervals for registers defined by implicit_def's (except
725  // for those implicit_def that define values which are liveout of their
726  // blocks.
727  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728    unsigned UndefReg = UndefUses[i];
729    (void)getOrCreateInterval(UndefReg);
730  }
731}
732
733LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735  return new LiveInterval(reg, Weight);
736}
737
738/// dupInterval - Duplicate a live interval. The caller is responsible for
739/// managing the allocated memory.
740LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741  LiveInterval *NewLI = createInterval(li->reg);
742  NewLI->Copy(*li, mri_, getVNInfoAllocator());
743  return NewLI;
744}
745
746/// shrinkToUses - After removing some uses of a register, shrink its live
747/// range to just the remaining uses. This method does not compute reaching
748/// defs for new uses, and it doesn't remove dead defs.
749void LiveIntervals::shrinkToUses(LiveInterval *li) {
750  DEBUG(dbgs() << "Shrink: " << *li << '\n');
751  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
752         && "Can't only shrink physical registers");
753  // Find all the values used, including PHI kills.
754  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
755
756  // Visit all instructions reading li->reg.
757  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
758       MachineInstr *UseMI = I.skipInstruction();) {
759    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
760      continue;
761    SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
762    VNInfo *VNI = li->getVNInfoAt(Idx);
763    assert(VNI && "Live interval not live into reading instruction");
764    if (VNI->def == Idx) {
765      // Special case: An early-clobber tied operand reads and writes the
766      // register one slot early.
767      Idx = Idx.getPrevSlot();
768      VNI = li->getVNInfoAt(Idx);
769      assert(VNI && "Early-clobber tied value not available");
770    }
771    WorkList.push_back(std::make_pair(Idx, VNI));
772  }
773
774  // Create a new live interval with only minimal live segments per def.
775  LiveInterval NewLI(li->reg, 0);
776  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
777       I != E; ++I) {
778    VNInfo *VNI = *I;
779    if (VNI->isUnused())
780      continue;
781    NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
782  }
783
784  // Keep track of the PHIs that are in use.
785  SmallPtrSet<VNInfo*, 8> UsedPHIs;
786
787  // Extend intervals to reach all uses in WorkList.
788  while (!WorkList.empty()) {
789    SlotIndex Idx = WorkList.back().first;
790    VNInfo *VNI = WorkList.back().second;
791    WorkList.pop_back();
792    const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
793    SlotIndex BlockStart = getMBBStartIdx(MBB);
794
795    // Extend the live range for VNI to be live at Idx.
796    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
797      (void)ExtVNI;
798      assert(ExtVNI == VNI && "Unexpected existing value number");
799      // Is this a PHIDef we haven't seen before?
800      if (!VNI->isPHIDef() || !UsedPHIs.insert(VNI))
801        continue;
802      // The PHI is live, make sure the predecessors are live-out.
803      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
804           PE = MBB->pred_end(); PI != PE; ++PI) {
805        SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
806        VNInfo *PVNI = li->getVNInfoAt(Stop);
807        // A predecessor is not required to have a live-out value for a PHI.
808        if (PVNI) {
809          assert(PVNI->hasPHIKill() && "Missing hasPHIKill flag");
810          WorkList.push_back(std::make_pair(Stop, PVNI));
811        }
812      }
813      continue;
814    }
815
816    // VNI is live-in to MBB.
817    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
818    NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
819
820    // Make sure VNI is live-out from the predecessors.
821    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
822         PE = MBB->pred_end(); PI != PE; ++PI) {
823      SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
824      assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
825      WorkList.push_back(std::make_pair(Stop, VNI));
826    }
827  }
828
829  // Handle dead values.
830  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
831       I != E; ++I) {
832    VNInfo *VNI = *I;
833    if (VNI->isUnused())
834      continue;
835    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
836    assert(LII != NewLI.end() && "Missing live range for PHI");
837    if (LII->end != VNI->def.getNextSlot())
838      continue;
839    if (VNI->isPHIDef()) {
840      // This is a dead PHI. Remove it.
841      VNI->setIsUnused(true);
842      NewLI.removeRange(*LII);
843    } else {
844      // This is a dead def. Make sure the instruction knows.
845      MachineInstr *MI = getInstructionFromIndex(VNI->def);
846      assert(MI && "No instruction defining live value");
847      MI->addRegisterDead(li->reg, tri_);
848    }
849  }
850
851  // Move the trimmed ranges back.
852  li->ranges.swap(NewLI.ranges);
853  DEBUG(dbgs() << "Shrink: " << *li << '\n');
854}
855
856
857//===----------------------------------------------------------------------===//
858// Register allocator hooks.
859//
860
861MachineBasicBlock::iterator
862LiveIntervals::getLastSplitPoint(const LiveInterval &li,
863                                 MachineBasicBlock *mbb) const {
864  const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
865
866  // If li is not live into a landing pad, we can insert spill code before the
867  // first terminator.
868  if (!lpad || !isLiveInToMBB(li, lpad))
869    return mbb->getFirstTerminator();
870
871  // When there is a landing pad, spill code must go before the call instruction
872  // that can throw.
873  MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
874  while (I != B) {
875    --I;
876    if (I->getDesc().isCall())
877      return I;
878  }
879  // The block contains no calls that can throw, so use the first terminator.
880  return mbb->getFirstTerminator();
881}
882
883void LiveIntervals::addKillFlags() {
884  for (iterator I = begin(), E = end(); I != E; ++I) {
885    unsigned Reg = I->first;
886    if (TargetRegisterInfo::isPhysicalRegister(Reg))
887      continue;
888    if (mri_->reg_nodbg_empty(Reg))
889      continue;
890    LiveInterval *LI = I->second;
891
892    // Every instruction that kills Reg corresponds to a live range end point.
893    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
894         ++RI) {
895      // A LOAD index indicates an MBB edge.
896      if (RI->end.isLoad())
897        continue;
898      MachineInstr *MI = getInstructionFromIndex(RI->end);
899      if (!MI)
900        continue;
901      MI->addRegisterKilled(Reg, NULL);
902    }
903  }
904}
905
906/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
907/// allow one) virtual register operand, then its uses are implicitly using
908/// the register. Returns the virtual register.
909unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
910                                            MachineInstr *MI) const {
911  unsigned RegOp = 0;
912  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
913    MachineOperand &MO = MI->getOperand(i);
914    if (!MO.isReg() || !MO.isUse())
915      continue;
916    unsigned Reg = MO.getReg();
917    if (Reg == 0 || Reg == li.reg)
918      continue;
919
920    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
921        !allocatableRegs_[Reg])
922      continue;
923    // FIXME: For now, only remat MI with at most one register operand.
924    assert(!RegOp &&
925           "Can't rematerialize instruction with multiple register operand!");
926    RegOp = MO.getReg();
927#ifndef NDEBUG
928    break;
929#endif
930  }
931  return RegOp;
932}
933
934/// isValNoAvailableAt - Return true if the val# of the specified interval
935/// which reaches the given instruction also reaches the specified use index.
936bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
937                                       SlotIndex UseIdx) const {
938  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
939  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
940}
941
942/// isReMaterializable - Returns true if the definition MI of the specified
943/// val# of the specified interval is re-materializable.
944bool
945LiveIntervals::isReMaterializable(const LiveInterval &li,
946                                  const VNInfo *ValNo, MachineInstr *MI,
947                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
948                                  bool &isLoad) {
949  if (DisableReMat)
950    return false;
951
952  if (!tii_->isTriviallyReMaterializable(MI, aa_))
953    return false;
954
955  // Target-specific code can mark an instruction as being rematerializable
956  // if it has one virtual reg use, though it had better be something like
957  // a PIC base register which is likely to be live everywhere.
958  unsigned ImpUse = getReMatImplicitUse(li, MI);
959  if (ImpUse) {
960    const LiveInterval &ImpLi = getInterval(ImpUse);
961    for (MachineRegisterInfo::use_nodbg_iterator
962           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
963         ri != re; ++ri) {
964      MachineInstr *UseMI = &*ri;
965      SlotIndex UseIdx = getInstructionIndex(UseMI);
966      if (li.getVNInfoAt(UseIdx) != ValNo)
967        continue;
968      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
969        return false;
970    }
971
972    // If a register operand of the re-materialized instruction is going to
973    // be spilled next, then it's not legal to re-materialize this instruction.
974    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
975      if (ImpUse == SpillIs[i]->reg)
976        return false;
977  }
978  return true;
979}
980
981/// isReMaterializable - Returns true if the definition MI of the specified
982/// val# of the specified interval is re-materializable.
983bool LiveIntervals::isReMaterializable(const LiveInterval &li,
984                                       const VNInfo *ValNo, MachineInstr *MI) {
985  SmallVector<LiveInterval*, 4> Dummy1;
986  bool Dummy2;
987  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
988}
989
990/// isReMaterializable - Returns true if every definition of MI of every
991/// val# of the specified interval is re-materializable.
992bool
993LiveIntervals::isReMaterializable(const LiveInterval &li,
994                                  const SmallVectorImpl<LiveInterval*> &SpillIs,
995                                  bool &isLoad) {
996  isLoad = false;
997  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
998       i != e; ++i) {
999    const VNInfo *VNI = *i;
1000    if (VNI->isUnused())
1001      continue; // Dead val#.
1002    // Is the def for the val# rematerializable?
1003    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1004    if (!ReMatDefMI)
1005      return false;
1006    bool DefIsLoad = false;
1007    if (!ReMatDefMI ||
1008        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1009      return false;
1010    isLoad |= DefIsLoad;
1011  }
1012  return true;
1013}
1014
1015/// FilterFoldedOps - Filter out two-address use operands. Return
1016/// true if it finds any issue with the operands that ought to prevent
1017/// folding.
1018static bool FilterFoldedOps(MachineInstr *MI,
1019                            SmallVector<unsigned, 2> &Ops,
1020                            unsigned &MRInfo,
1021                            SmallVector<unsigned, 2> &FoldOps) {
1022  MRInfo = 0;
1023  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1024    unsigned OpIdx = Ops[i];
1025    MachineOperand &MO = MI->getOperand(OpIdx);
1026    // FIXME: fold subreg use.
1027    if (MO.getSubReg())
1028      return true;
1029    if (MO.isDef())
1030      MRInfo |= (unsigned)VirtRegMap::isMod;
1031    else {
1032      // Filter out two-address use operand(s).
1033      if (MI->isRegTiedToDefOperand(OpIdx)) {
1034        MRInfo = VirtRegMap::isModRef;
1035        continue;
1036      }
1037      MRInfo |= (unsigned)VirtRegMap::isRef;
1038    }
1039    FoldOps.push_back(OpIdx);
1040  }
1041  return false;
1042}
1043
1044
1045/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1046/// slot / to reg or any rematerialized load into ith operand of specified
1047/// MI. If it is successul, MI is updated with the newly created MI and
1048/// returns true.
1049bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1050                                         VirtRegMap &vrm, MachineInstr *DefMI,
1051                                         SlotIndex InstrIdx,
1052                                         SmallVector<unsigned, 2> &Ops,
1053                                         bool isSS, int Slot, unsigned Reg) {
1054  // If it is an implicit def instruction, just delete it.
1055  if (MI->isImplicitDef()) {
1056    RemoveMachineInstrFromMaps(MI);
1057    vrm.RemoveMachineInstrFromMaps(MI);
1058    MI->eraseFromParent();
1059    ++numFolds;
1060    return true;
1061  }
1062
1063  // Filter the list of operand indexes that are to be folded. Abort if
1064  // any operand will prevent folding.
1065  unsigned MRInfo = 0;
1066  SmallVector<unsigned, 2> FoldOps;
1067  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1068    return false;
1069
1070  // The only time it's safe to fold into a two address instruction is when
1071  // it's folding reload and spill from / into a spill stack slot.
1072  if (DefMI && (MRInfo & VirtRegMap::isMod))
1073    return false;
1074
1075  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
1076                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
1077  if (fmi) {
1078    // Remember this instruction uses the spill slot.
1079    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1080
1081    // Attempt to fold the memory reference into the instruction. If
1082    // we can do this, we don't need to insert spill code.
1083    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1084      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1085    vrm.transferSpillPts(MI, fmi);
1086    vrm.transferRestorePts(MI, fmi);
1087    vrm.transferEmergencySpills(MI, fmi);
1088    ReplaceMachineInstrInMaps(MI, fmi);
1089    MI->eraseFromParent();
1090    MI = fmi;
1091    ++numFolds;
1092    return true;
1093  }
1094  return false;
1095}
1096
1097/// canFoldMemoryOperand - Returns true if the specified load / store
1098/// folding is possible.
1099bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1100                                         SmallVector<unsigned, 2> &Ops,
1101                                         bool ReMat) const {
1102  // Filter the list of operand indexes that are to be folded. Abort if
1103  // any operand will prevent folding.
1104  unsigned MRInfo = 0;
1105  SmallVector<unsigned, 2> FoldOps;
1106  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1107    return false;
1108
1109  // It's only legal to remat for a use, not a def.
1110  if (ReMat && (MRInfo & VirtRegMap::isMod))
1111    return false;
1112
1113  return tii_->canFoldMemoryOperand(MI, FoldOps);
1114}
1115
1116bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1117  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1118
1119  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1120
1121  if (mbb == 0)
1122    return false;
1123
1124  for (++itr; itr != li.ranges.end(); ++itr) {
1125    MachineBasicBlock *mbb2 =
1126      indexes_->getMBBCoveringRange(itr->start, itr->end);
1127
1128    if (mbb2 != mbb)
1129      return false;
1130  }
1131
1132  return true;
1133}
1134
1135/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1136/// interval on to-be re-materialized operands of MI) with new register.
1137void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1138                                       MachineInstr *MI, unsigned NewVReg,
1139                                       VirtRegMap &vrm) {
1140  // There is an implicit use. That means one of the other operand is
1141  // being remat'ed and the remat'ed instruction has li.reg as an
1142  // use operand. Make sure we rewrite that as well.
1143  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1144    MachineOperand &MO = MI->getOperand(i);
1145    if (!MO.isReg())
1146      continue;
1147    unsigned Reg = MO.getReg();
1148    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1149      continue;
1150    if (!vrm.isReMaterialized(Reg))
1151      continue;
1152    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1153    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1154    if (UseMO)
1155      UseMO->setReg(NewVReg);
1156  }
1157}
1158
1159/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1160/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1161bool LiveIntervals::
1162rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1163                 bool TrySplit, SlotIndex index, SlotIndex end,
1164                 MachineInstr *MI,
1165                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1166                 unsigned Slot, int LdSlot,
1167                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1168                 VirtRegMap &vrm,
1169                 const TargetRegisterClass* rc,
1170                 SmallVector<int, 4> &ReMatIds,
1171                 const MachineLoopInfo *loopInfo,
1172                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1173                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1174                 std::vector<LiveInterval*> &NewLIs) {
1175  bool CanFold = false;
1176 RestartInstruction:
1177  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1178    MachineOperand& mop = MI->getOperand(i);
1179    if (!mop.isReg())
1180      continue;
1181    unsigned Reg = mop.getReg();
1182    if (!TargetRegisterInfo::isVirtualRegister(Reg))
1183      continue;
1184    if (Reg != li.reg)
1185      continue;
1186
1187    bool TryFold = !DefIsReMat;
1188    bool FoldSS = true; // Default behavior unless it's a remat.
1189    int FoldSlot = Slot;
1190    if (DefIsReMat) {
1191      // If this is the rematerializable definition MI itself and
1192      // all of its uses are rematerialized, simply delete it.
1193      if (MI == ReMatOrigDefMI && CanDelete) {
1194        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1195                     << *MI << '\n');
1196        RemoveMachineInstrFromMaps(MI);
1197        vrm.RemoveMachineInstrFromMaps(MI);
1198        MI->eraseFromParent();
1199        break;
1200      }
1201
1202      // If def for this use can't be rematerialized, then try folding.
1203      // If def is rematerializable and it's a load, also try folding.
1204      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1205      if (isLoad) {
1206        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1207        FoldSS = isLoadSS;
1208        FoldSlot = LdSlot;
1209      }
1210    }
1211
1212    // Scan all of the operands of this instruction rewriting operands
1213    // to use NewVReg instead of li.reg as appropriate.  We do this for
1214    // two reasons:
1215    //
1216    //   1. If the instr reads the same spilled vreg multiple times, we
1217    //      want to reuse the NewVReg.
1218    //   2. If the instr is a two-addr instruction, we are required to
1219    //      keep the src/dst regs pinned.
1220    //
1221    // Keep track of whether we replace a use and/or def so that we can
1222    // create the spill interval with the appropriate range.
1223    SmallVector<unsigned, 2> Ops;
1224    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1225
1226    // Create a new virtual register for the spill interval.
1227    // Create the new register now so we can map the fold instruction
1228    // to the new register so when it is unfolded we get the correct
1229    // answer.
1230    bool CreatedNewVReg = false;
1231    if (NewVReg == 0) {
1232      NewVReg = mri_->createVirtualRegister(rc);
1233      vrm.grow();
1234      CreatedNewVReg = true;
1235
1236      // The new virtual register should get the same allocation hints as the
1237      // old one.
1238      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1239      if (Hint.first || Hint.second)
1240        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1241    }
1242
1243    if (!TryFold)
1244      CanFold = false;
1245    else {
1246      // Do not fold load / store here if we are splitting. We'll find an
1247      // optimal point to insert a load / store later.
1248      if (!TrySplit) {
1249        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1250                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1251          // Folding the load/store can completely change the instruction in
1252          // unpredictable ways, rescan it from the beginning.
1253
1254          if (FoldSS) {
1255            // We need to give the new vreg the same stack slot as the
1256            // spilled interval.
1257            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1258          }
1259
1260          HasUse = false;
1261          HasDef = false;
1262          CanFold = false;
1263          if (isNotInMIMap(MI))
1264            break;
1265          goto RestartInstruction;
1266        }
1267      } else {
1268        // We'll try to fold it later if it's profitable.
1269        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1270      }
1271    }
1272
1273    mop.setReg(NewVReg);
1274    if (mop.isImplicit())
1275      rewriteImplicitOps(li, MI, NewVReg, vrm);
1276
1277    // Reuse NewVReg for other reads.
1278    bool HasEarlyClobber = false;
1279    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1280      MachineOperand &mopj = MI->getOperand(Ops[j]);
1281      mopj.setReg(NewVReg);
1282      if (mopj.isImplicit())
1283        rewriteImplicitOps(li, MI, NewVReg, vrm);
1284      if (mopj.isEarlyClobber())
1285        HasEarlyClobber = true;
1286    }
1287
1288    if (CreatedNewVReg) {
1289      if (DefIsReMat) {
1290        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1291        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1292          // Each valnum may have its own remat id.
1293          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1294        } else {
1295          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1296        }
1297        if (!CanDelete || (HasUse && HasDef)) {
1298          // If this is a two-addr instruction then its use operands are
1299          // rematerializable but its def is not. It should be assigned a
1300          // stack slot.
1301          vrm.assignVirt2StackSlot(NewVReg, Slot);
1302        }
1303      } else {
1304        vrm.assignVirt2StackSlot(NewVReg, Slot);
1305      }
1306    } else if (HasUse && HasDef &&
1307               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1308      // If this interval hasn't been assigned a stack slot (because earlier
1309      // def is a deleted remat def), do it now.
1310      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1311      vrm.assignVirt2StackSlot(NewVReg, Slot);
1312    }
1313
1314    // Re-matting an instruction with virtual register use. Add the
1315    // register as an implicit use on the use MI.
1316    if (DefIsReMat && ImpUse)
1317      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1318
1319    // Create a new register interval for this spill / remat.
1320    LiveInterval &nI = getOrCreateInterval(NewVReg);
1321    if (CreatedNewVReg) {
1322      NewLIs.push_back(&nI);
1323      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1324      if (TrySplit)
1325        vrm.setIsSplitFromReg(NewVReg, li.reg);
1326    }
1327
1328    if (HasUse) {
1329      if (CreatedNewVReg) {
1330        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1331                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1332        DEBUG(dbgs() << " +" << LR);
1333        nI.addRange(LR);
1334      } else {
1335        // Extend the split live interval to this def / use.
1336        SlotIndex End = index.getDefIndex();
1337        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1338                     nI.getValNumInfo(nI.getNumValNums()-1));
1339        DEBUG(dbgs() << " +" << LR);
1340        nI.addRange(LR);
1341      }
1342    }
1343    if (HasDef) {
1344      // An early clobber starts at the use slot, except for an early clobber
1345      // tied to a use operand (yes, that is a thing).
1346      LiveRange LR(HasEarlyClobber && !HasUse ?
1347                   index.getUseIndex() : index.getDefIndex(),
1348                   index.getStoreIndex(),
1349                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1350      DEBUG(dbgs() << " +" << LR);
1351      nI.addRange(LR);
1352    }
1353
1354    DEBUG({
1355        dbgs() << "\t\t\t\tAdded new interval: ";
1356        nI.print(dbgs(), tri_);
1357        dbgs() << '\n';
1358      });
1359  }
1360  return CanFold;
1361}
1362bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1363                                   const VNInfo *VNI,
1364                                   MachineBasicBlock *MBB,
1365                                   SlotIndex Idx) const {
1366  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1367}
1368
1369/// RewriteInfo - Keep track of machine instrs that will be rewritten
1370/// during spilling.
1371namespace {
1372  struct RewriteInfo {
1373    SlotIndex Index;
1374    MachineInstr *MI;
1375    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1376  };
1377
1378  struct RewriteInfoCompare {
1379    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1380      return LHS.Index < RHS.Index;
1381    }
1382  };
1383}
1384
1385void LiveIntervals::
1386rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1387                    LiveInterval::Ranges::const_iterator &I,
1388                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1389                    unsigned Slot, int LdSlot,
1390                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1391                    VirtRegMap &vrm,
1392                    const TargetRegisterClass* rc,
1393                    SmallVector<int, 4> &ReMatIds,
1394                    const MachineLoopInfo *loopInfo,
1395                    BitVector &SpillMBBs,
1396                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1397                    BitVector &RestoreMBBs,
1398                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1399                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1400                    std::vector<LiveInterval*> &NewLIs) {
1401  bool AllCanFold = true;
1402  unsigned NewVReg = 0;
1403  SlotIndex start = I->start.getBaseIndex();
1404  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1405
1406  // First collect all the def / use in this live range that will be rewritten.
1407  // Make sure they are sorted according to instruction index.
1408  std::vector<RewriteInfo> RewriteMIs;
1409  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1410         re = mri_->reg_end(); ri != re; ) {
1411    MachineInstr *MI = &*ri;
1412    MachineOperand &O = ri.getOperand();
1413    ++ri;
1414    if (MI->isDebugValue()) {
1415      // Modify DBG_VALUE now that the value is in a spill slot.
1416      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1417        uint64_t Offset = MI->getOperand(1).getImm();
1418        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1419        DebugLoc DL = MI->getDebugLoc();
1420        int FI = isLoadSS ? LdSlot : (int)Slot;
1421        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1422                                                           Offset, MDPtr, DL)) {
1423          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1424          ReplaceMachineInstrInMaps(MI, NewDV);
1425          MachineBasicBlock *MBB = MI->getParent();
1426          MBB->insert(MBB->erase(MI), NewDV);
1427          continue;
1428        }
1429      }
1430
1431      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1432      RemoveMachineInstrFromMaps(MI);
1433      vrm.RemoveMachineInstrFromMaps(MI);
1434      MI->eraseFromParent();
1435      continue;
1436    }
1437    assert(!(O.isImplicit() && O.isUse()) &&
1438           "Spilling register that's used as implicit use?");
1439    SlotIndex index = getInstructionIndex(MI);
1440    if (index < start || index >= end)
1441      continue;
1442
1443    if (O.isUndef())
1444      // Must be defined by an implicit def. It should not be spilled. Note,
1445      // this is for correctness reason. e.g.
1446      // 8   %reg1024<def> = IMPLICIT_DEF
1447      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1448      // The live range [12, 14) are not part of the r1024 live interval since
1449      // it's defined by an implicit def. It will not conflicts with live
1450      // interval of r1025. Now suppose both registers are spilled, you can
1451      // easily see a situation where both registers are reloaded before
1452      // the INSERT_SUBREG and both target registers that would overlap.
1453      continue;
1454    RewriteMIs.push_back(RewriteInfo(index, MI));
1455  }
1456  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1457
1458  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1459  // Now rewrite the defs and uses.
1460  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1461    RewriteInfo &rwi = RewriteMIs[i];
1462    ++i;
1463    SlotIndex index = rwi.Index;
1464    MachineInstr *MI = rwi.MI;
1465    // If MI def and/or use the same register multiple times, then there
1466    // are multiple entries.
1467    while (i != e && RewriteMIs[i].MI == MI) {
1468      assert(RewriteMIs[i].Index == index);
1469      ++i;
1470    }
1471    MachineBasicBlock *MBB = MI->getParent();
1472
1473    if (ImpUse && MI != ReMatDefMI) {
1474      // Re-matting an instruction with virtual register use. Prevent interval
1475      // from being spilled.
1476      getInterval(ImpUse).markNotSpillable();
1477    }
1478
1479    unsigned MBBId = MBB->getNumber();
1480    unsigned ThisVReg = 0;
1481    if (TrySplit) {
1482      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1483      if (NVI != MBBVRegsMap.end()) {
1484        ThisVReg = NVI->second;
1485        // One common case:
1486        // x = use
1487        // ...
1488        // ...
1489        // def = ...
1490        //     = use
1491        // It's better to start a new interval to avoid artifically
1492        // extend the new interval.
1493        if (MI->readsWritesVirtualRegister(li.reg) ==
1494            std::make_pair(false,true)) {
1495          MBBVRegsMap.erase(MBB->getNumber());
1496          ThisVReg = 0;
1497        }
1498      }
1499    }
1500
1501    bool IsNew = ThisVReg == 0;
1502    if (IsNew) {
1503      // This ends the previous live interval. If all of its def / use
1504      // can be folded, give it a low spill weight.
1505      if (NewVReg && TrySplit && AllCanFold) {
1506        LiveInterval &nI = getOrCreateInterval(NewVReg);
1507        nI.weight /= 10.0F;
1508      }
1509      AllCanFold = true;
1510    }
1511    NewVReg = ThisVReg;
1512
1513    bool HasDef = false;
1514    bool HasUse = false;
1515    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1516                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1517                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1518                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1519                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1520    if (!HasDef && !HasUse)
1521      continue;
1522
1523    AllCanFold &= CanFold;
1524
1525    // Update weight of spill interval.
1526    LiveInterval &nI = getOrCreateInterval(NewVReg);
1527    if (!TrySplit) {
1528      // The spill weight is now infinity as it cannot be spilled again.
1529      nI.markNotSpillable();
1530      continue;
1531    }
1532
1533    // Keep track of the last def and first use in each MBB.
1534    if (HasDef) {
1535      if (MI != ReMatOrigDefMI || !CanDelete) {
1536        bool HasKill = false;
1537        if (!HasUse)
1538          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1539        else {
1540          // If this is a two-address code, then this index starts a new VNInfo.
1541          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1542          if (VNI)
1543            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1544        }
1545        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1546          SpillIdxes.find(MBBId);
1547        if (!HasKill) {
1548          if (SII == SpillIdxes.end()) {
1549            std::vector<SRInfo> S;
1550            S.push_back(SRInfo(index, NewVReg, true));
1551            SpillIdxes.insert(std::make_pair(MBBId, S));
1552          } else if (SII->second.back().vreg != NewVReg) {
1553            SII->second.push_back(SRInfo(index, NewVReg, true));
1554          } else if (index > SII->second.back().index) {
1555            // If there is an earlier def and this is a two-address
1556            // instruction, then it's not possible to fold the store (which
1557            // would also fold the load).
1558            SRInfo &Info = SII->second.back();
1559            Info.index = index;
1560            Info.canFold = !HasUse;
1561          }
1562          SpillMBBs.set(MBBId);
1563        } else if (SII != SpillIdxes.end() &&
1564                   SII->second.back().vreg == NewVReg &&
1565                   index > SII->second.back().index) {
1566          // There is an earlier def that's not killed (must be two-address).
1567          // The spill is no longer needed.
1568          SII->second.pop_back();
1569          if (SII->second.empty()) {
1570            SpillIdxes.erase(MBBId);
1571            SpillMBBs.reset(MBBId);
1572          }
1573        }
1574      }
1575    }
1576
1577    if (HasUse) {
1578      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1579        SpillIdxes.find(MBBId);
1580      if (SII != SpillIdxes.end() &&
1581          SII->second.back().vreg == NewVReg &&
1582          index > SII->second.back().index)
1583        // Use(s) following the last def, it's not safe to fold the spill.
1584        SII->second.back().canFold = false;
1585      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1586        RestoreIdxes.find(MBBId);
1587      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1588        // If we are splitting live intervals, only fold if it's the first
1589        // use and there isn't another use later in the MBB.
1590        RII->second.back().canFold = false;
1591      else if (IsNew) {
1592        // Only need a reload if there isn't an earlier def / use.
1593        if (RII == RestoreIdxes.end()) {
1594          std::vector<SRInfo> Infos;
1595          Infos.push_back(SRInfo(index, NewVReg, true));
1596          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1597        } else {
1598          RII->second.push_back(SRInfo(index, NewVReg, true));
1599        }
1600        RestoreMBBs.set(MBBId);
1601      }
1602    }
1603
1604    // Update spill weight.
1605    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1606    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1607  }
1608
1609  if (NewVReg && TrySplit && AllCanFold) {
1610    // If all of its def / use can be folded, give it a low spill weight.
1611    LiveInterval &nI = getOrCreateInterval(NewVReg);
1612    nI.weight /= 10.0F;
1613  }
1614}
1615
1616bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1617                        unsigned vr, BitVector &RestoreMBBs,
1618                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1619  if (!RestoreMBBs[Id])
1620    return false;
1621  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1622  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1623    if (Restores[i].index == index &&
1624        Restores[i].vreg == vr &&
1625        Restores[i].canFold)
1626      return true;
1627  return false;
1628}
1629
1630void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1631                        unsigned vr, BitVector &RestoreMBBs,
1632                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1633  if (!RestoreMBBs[Id])
1634    return;
1635  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1636  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1637    if (Restores[i].index == index && Restores[i].vreg)
1638      Restores[i].index = SlotIndex();
1639}
1640
1641/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1642/// spilled and create empty intervals for their uses.
1643void
1644LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1645                                    const TargetRegisterClass* rc,
1646                                    std::vector<LiveInterval*> &NewLIs) {
1647  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1648         re = mri_->reg_end(); ri != re; ) {
1649    MachineOperand &O = ri.getOperand();
1650    MachineInstr *MI = &*ri;
1651    ++ri;
1652    if (MI->isDebugValue()) {
1653      // Remove debug info for now.
1654      O.setReg(0U);
1655      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1656      continue;
1657    }
1658    if (O.isDef()) {
1659      assert(MI->isImplicitDef() &&
1660             "Register def was not rewritten?");
1661      RemoveMachineInstrFromMaps(MI);
1662      vrm.RemoveMachineInstrFromMaps(MI);
1663      MI->eraseFromParent();
1664    } else {
1665      // This must be an use of an implicit_def so it's not part of the live
1666      // interval. Create a new empty live interval for it.
1667      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1668      unsigned NewVReg = mri_->createVirtualRegister(rc);
1669      vrm.grow();
1670      vrm.setIsImplicitlyDefined(NewVReg);
1671      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1672      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1673        MachineOperand &MO = MI->getOperand(i);
1674        if (MO.isReg() && MO.getReg() == li.reg) {
1675          MO.setReg(NewVReg);
1676          MO.setIsUndef();
1677        }
1678      }
1679    }
1680  }
1681}
1682
1683float
1684LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1685  // Limit the loop depth ridiculousness.
1686  if (loopDepth > 200)
1687    loopDepth = 200;
1688
1689  // The loop depth is used to roughly estimate the number of times the
1690  // instruction is executed. Something like 10^d is simple, but will quickly
1691  // overflow a float. This expression behaves like 10^d for small d, but is
1692  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1693  // headroom before overflow.
1694  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1695
1696  return (isDef + isUse) * lc;
1697}
1698
1699static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1700  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1701    NewLIs[i]->weight =
1702      normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
1703}
1704
1705std::vector<LiveInterval*> LiveIntervals::
1706addIntervalsForSpills(const LiveInterval &li,
1707                      const SmallVectorImpl<LiveInterval*> &SpillIs,
1708                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1709  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1710
1711  DEBUG({
1712      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1713      li.print(dbgs(), tri_);
1714      dbgs() << '\n';
1715    });
1716
1717  // Each bit specify whether a spill is required in the MBB.
1718  BitVector SpillMBBs(mf_->getNumBlockIDs());
1719  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1720  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1721  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1722  DenseMap<unsigned,unsigned> MBBVRegsMap;
1723  std::vector<LiveInterval*> NewLIs;
1724  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1725
1726  unsigned NumValNums = li.getNumValNums();
1727  SmallVector<MachineInstr*, 4> ReMatDefs;
1728  ReMatDefs.resize(NumValNums, NULL);
1729  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1730  ReMatOrigDefs.resize(NumValNums, NULL);
1731  SmallVector<int, 4> ReMatIds;
1732  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1733  BitVector ReMatDelete(NumValNums);
1734  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1735
1736  // Spilling a split live interval. It cannot be split any further. Also,
1737  // it's also guaranteed to be a single val# / range interval.
1738  if (vrm.getPreSplitReg(li.reg)) {
1739    vrm.setIsSplitFromReg(li.reg, 0);
1740    // Unset the split kill marker on the last use.
1741    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1742    if (KillIdx != SlotIndex()) {
1743      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1744      assert(KillMI && "Last use disappeared?");
1745      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1746      assert(KillOp != -1 && "Last use disappeared?");
1747      KillMI->getOperand(KillOp).setIsKill(false);
1748    }
1749    vrm.removeKillPoint(li.reg);
1750    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1751    Slot = vrm.getStackSlot(li.reg);
1752    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1753    MachineInstr *ReMatDefMI = DefIsReMat ?
1754      vrm.getReMaterializedMI(li.reg) : NULL;
1755    int LdSlot = 0;
1756    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1757    bool isLoad = isLoadSS ||
1758      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1759    bool IsFirstRange = true;
1760    for (LiveInterval::Ranges::const_iterator
1761           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1762      // If this is a split live interval with multiple ranges, it means there
1763      // are two-address instructions that re-defined the value. Only the
1764      // first def can be rematerialized!
1765      if (IsFirstRange) {
1766        // Note ReMatOrigDefMI has already been deleted.
1767        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1768                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1769                             false, vrm, rc, ReMatIds, loopInfo,
1770                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1771                             MBBVRegsMap, NewLIs);
1772      } else {
1773        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1774                             Slot, 0, false, false, false,
1775                             false, vrm, rc, ReMatIds, loopInfo,
1776                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1777                             MBBVRegsMap, NewLIs);
1778      }
1779      IsFirstRange = false;
1780    }
1781
1782    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1783    normalizeSpillWeights(NewLIs);
1784    return NewLIs;
1785  }
1786
1787  bool TrySplit = !intervalIsInOneMBB(li);
1788  if (TrySplit)
1789    ++numSplits;
1790  bool NeedStackSlot = false;
1791  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1792       i != e; ++i) {
1793    const VNInfo *VNI = *i;
1794    unsigned VN = VNI->id;
1795    if (VNI->isUnused())
1796      continue; // Dead val#.
1797    // Is the def for the val# rematerializable?
1798    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1799    bool dummy;
1800    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1801      // Remember how to remat the def of this val#.
1802      ReMatOrigDefs[VN] = ReMatDefMI;
1803      // Original def may be modified so we have to make a copy here.
1804      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1805      CloneMIs.push_back(Clone);
1806      ReMatDefs[VN] = Clone;
1807
1808      bool CanDelete = true;
1809      if (VNI->hasPHIKill()) {
1810        // A kill is a phi node, not all of its uses can be rematerialized.
1811        // It must not be deleted.
1812        CanDelete = false;
1813        // Need a stack slot if there is any live range where uses cannot be
1814        // rematerialized.
1815        NeedStackSlot = true;
1816      }
1817      if (CanDelete)
1818        ReMatDelete.set(VN);
1819    } else {
1820      // Need a stack slot if there is any live range where uses cannot be
1821      // rematerialized.
1822      NeedStackSlot = true;
1823    }
1824  }
1825
1826  // One stack slot per live interval.
1827  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1828    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1829      Slot = vrm.assignVirt2StackSlot(li.reg);
1830
1831    // This case only occurs when the prealloc splitter has already assigned
1832    // a stack slot to this vreg.
1833    else
1834      Slot = vrm.getStackSlot(li.reg);
1835  }
1836
1837  // Create new intervals and rewrite defs and uses.
1838  for (LiveInterval::Ranges::const_iterator
1839         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1840    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1841    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1842    bool DefIsReMat = ReMatDefMI != NULL;
1843    bool CanDelete = ReMatDelete[I->valno->id];
1844    int LdSlot = 0;
1845    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1846    bool isLoad = isLoadSS ||
1847      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1848    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1849                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1850                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1851                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1852                               MBBVRegsMap, NewLIs);
1853  }
1854
1855  // Insert spills / restores if we are splitting.
1856  if (!TrySplit) {
1857    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1858    normalizeSpillWeights(NewLIs);
1859    return NewLIs;
1860  }
1861
1862  SmallPtrSet<LiveInterval*, 4> AddedKill;
1863  SmallVector<unsigned, 2> Ops;
1864  if (NeedStackSlot) {
1865    int Id = SpillMBBs.find_first();
1866    while (Id != -1) {
1867      std::vector<SRInfo> &spills = SpillIdxes[Id];
1868      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1869        SlotIndex index = spills[i].index;
1870        unsigned VReg = spills[i].vreg;
1871        LiveInterval &nI = getOrCreateInterval(VReg);
1872        bool isReMat = vrm.isReMaterialized(VReg);
1873        MachineInstr *MI = getInstructionFromIndex(index);
1874        bool CanFold = false;
1875        bool FoundUse = false;
1876        Ops.clear();
1877        if (spills[i].canFold) {
1878          CanFold = true;
1879          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1880            MachineOperand &MO = MI->getOperand(j);
1881            if (!MO.isReg() || MO.getReg() != VReg)
1882              continue;
1883
1884            Ops.push_back(j);
1885            if (MO.isDef())
1886              continue;
1887            if (isReMat ||
1888                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1889                                                RestoreMBBs, RestoreIdxes))) {
1890              // MI has two-address uses of the same register. If the use
1891              // isn't the first and only use in the BB, then we can't fold
1892              // it. FIXME: Move this to rewriteInstructionsForSpills.
1893              CanFold = false;
1894              break;
1895            }
1896            FoundUse = true;
1897          }
1898        }
1899        // Fold the store into the def if possible.
1900        bool Folded = false;
1901        if (CanFold && !Ops.empty()) {
1902          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1903            Folded = true;
1904            if (FoundUse) {
1905              // Also folded uses, do not issue a load.
1906              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1907              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1908            }
1909            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1910          }
1911        }
1912
1913        // Otherwise tell the spiller to issue a spill.
1914        if (!Folded) {
1915          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1916          bool isKill = LR->end == index.getStoreIndex();
1917          if (!MI->registerDefIsDead(nI.reg))
1918            // No need to spill a dead def.
1919            vrm.addSpillPoint(VReg, isKill, MI);
1920          if (isKill)
1921            AddedKill.insert(&nI);
1922        }
1923      }
1924      Id = SpillMBBs.find_next(Id);
1925    }
1926  }
1927
1928  int Id = RestoreMBBs.find_first();
1929  while (Id != -1) {
1930    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1931    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1932      SlotIndex index = restores[i].index;
1933      if (index == SlotIndex())
1934        continue;
1935      unsigned VReg = restores[i].vreg;
1936      LiveInterval &nI = getOrCreateInterval(VReg);
1937      bool isReMat = vrm.isReMaterialized(VReg);
1938      MachineInstr *MI = getInstructionFromIndex(index);
1939      bool CanFold = false;
1940      Ops.clear();
1941      if (restores[i].canFold) {
1942        CanFold = true;
1943        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1944          MachineOperand &MO = MI->getOperand(j);
1945          if (!MO.isReg() || MO.getReg() != VReg)
1946            continue;
1947
1948          if (MO.isDef()) {
1949            // If this restore were to be folded, it would have been folded
1950            // already.
1951            CanFold = false;
1952            break;
1953          }
1954          Ops.push_back(j);
1955        }
1956      }
1957
1958      // Fold the load into the use if possible.
1959      bool Folded = false;
1960      if (CanFold && !Ops.empty()) {
1961        if (!isReMat)
1962          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1963        else {
1964          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1965          int LdSlot = 0;
1966          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1967          // If the rematerializable def is a load, also try to fold it.
1968          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1969            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1970                                          Ops, isLoadSS, LdSlot, VReg);
1971          if (!Folded) {
1972            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1973            if (ImpUse) {
1974              // Re-matting an instruction with virtual register use. Add the
1975              // register as an implicit use on the use MI and mark the register
1976              // interval as unspillable.
1977              LiveInterval &ImpLi = getInterval(ImpUse);
1978              ImpLi.markNotSpillable();
1979              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1980            }
1981          }
1982        }
1983      }
1984      // If folding is not possible / failed, then tell the spiller to issue a
1985      // load / rematerialization for us.
1986      if (Folded)
1987        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1988      else
1989        vrm.addRestorePoint(VReg, MI);
1990    }
1991    Id = RestoreMBBs.find_next(Id);
1992  }
1993
1994  // Finalize intervals: add kills, finalize spill weights, and filter out
1995  // dead intervals.
1996  std::vector<LiveInterval*> RetNewLIs;
1997  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1998    LiveInterval *LI = NewLIs[i];
1999    if (!LI->empty()) {
2000      if (!AddedKill.count(LI)) {
2001        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2002        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2003        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2004        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2005        assert(UseIdx != -1);
2006        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2007          LastUse->getOperand(UseIdx).setIsKill();
2008          vrm.addKillPoint(LI->reg, LastUseIdx);
2009        }
2010      }
2011      RetNewLIs.push_back(LI);
2012    }
2013  }
2014
2015  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2016  normalizeSpillWeights(RetNewLIs);
2017  return RetNewLIs;
2018}
2019
2020/// hasAllocatableSuperReg - Return true if the specified physical register has
2021/// any super register that's allocatable.
2022bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2023  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2024    if (allocatableRegs_[*AS] && hasInterval(*AS))
2025      return true;
2026  return false;
2027}
2028
2029/// getRepresentativeReg - Find the largest super register of the specified
2030/// physical register.
2031unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2032  // Find the largest super-register that is allocatable.
2033  unsigned BestReg = Reg;
2034  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2035    unsigned SuperReg = *AS;
2036    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2037      BestReg = SuperReg;
2038      break;
2039    }
2040  }
2041  return BestReg;
2042}
2043
2044/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2045/// specified interval that conflicts with the specified physical register.
2046unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2047                                                   unsigned PhysReg) const {
2048  unsigned NumConflicts = 0;
2049  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2050  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2051         E = mri_->reg_end(); I != E; ++I) {
2052    MachineOperand &O = I.getOperand();
2053    MachineInstr *MI = O.getParent();
2054    if (MI->isDebugValue())
2055      continue;
2056    SlotIndex Index = getInstructionIndex(MI);
2057    if (pli.liveAt(Index))
2058      ++NumConflicts;
2059  }
2060  return NumConflicts;
2061}
2062
2063/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2064/// around all defs and uses of the specified interval. Return true if it
2065/// was able to cut its interval.
2066bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2067                                            unsigned PhysReg, VirtRegMap &vrm) {
2068  unsigned SpillReg = getRepresentativeReg(PhysReg);
2069
2070  DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
2071               << " represented by " << tri_->getName(SpillReg) << '\n');
2072
2073  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2074    // If there are registers which alias PhysReg, but which are not a
2075    // sub-register of the chosen representative super register. Assert
2076    // since we can't handle it yet.
2077    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2078           tri_->isSuperRegister(*AS, SpillReg));
2079
2080  bool Cut = false;
2081  SmallVector<unsigned, 4> PRegs;
2082  if (hasInterval(SpillReg))
2083    PRegs.push_back(SpillReg);
2084  for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
2085    if (hasInterval(*SR))
2086      PRegs.push_back(*SR);
2087
2088  DEBUG({
2089    dbgs() << "Trying to spill:";
2090    for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
2091      dbgs() << ' ' << tri_->getName(PRegs[i]);
2092    dbgs() << '\n';
2093  });
2094
2095  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2096  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2097         E = mri_->reg_end(); I != E; ++I) {
2098    MachineOperand &O = I.getOperand();
2099    MachineInstr *MI = O.getParent();
2100    if (MI->isDebugValue() || SeenMIs.count(MI))
2101      continue;
2102    SeenMIs.insert(MI);
2103    SlotIndex Index = getInstructionIndex(MI);
2104    bool LiveReg = false;
2105    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2106      unsigned PReg = PRegs[i];
2107      LiveInterval &pli = getInterval(PReg);
2108      if (!pli.liveAt(Index))
2109        continue;
2110      LiveReg = true;
2111      SlotIndex StartIdx = Index.getLoadIndex();
2112      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2113      if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
2114        std::string msg;
2115        raw_string_ostream Msg(msg);
2116        Msg << "Ran out of registers during register allocation!";
2117        if (MI->isInlineAsm()) {
2118          Msg << "\nPlease check your inline asm statement for invalid "
2119              << "constraints:\n";
2120          MI->print(Msg, tm_);
2121        }
2122        report_fatal_error(Msg.str());
2123      }
2124      pli.removeRange(StartIdx, EndIdx);
2125      LiveReg = true;
2126    }
2127    if (!LiveReg)
2128      continue;
2129    DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
2130    vrm.addEmergencySpill(SpillReg, MI);
2131    Cut = true;
2132  }
2133  return Cut;
2134}
2135
2136LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2137                                                  MachineInstr* startInst) {
2138  LiveInterval& Interval = getOrCreateInterval(reg);
2139  VNInfo* VN = Interval.getNextValue(
2140    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2141    startInst, getVNInfoAllocator());
2142  VN->setHasPHIKill(true);
2143  LiveRange LR(
2144     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2145     getMBBEndIdx(startInst->getParent()), VN);
2146  Interval.addRange(LR);
2147
2148  return LR;
2149}
2150
2151