LiveIntervalAnalysis.cpp revision 94b6e1418202e9f6cf54856d5ec2891c9012777a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "regalloc"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/Value.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/Target/TargetRegisterInfo.h"
27#include "llvm/Target/TargetInstrInfo.h"
28#include "llvm/Target/TargetMachine.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Support/raw_ostream.h"
33#include "llvm/ADT/DenseSet.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/ADT/STLExtras.h"
36#include <algorithm>
37#include <limits>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45STATISTIC(numIntervals , "Number of original intervals");
46
47char LiveIntervals::ID = 0;
48INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
49                "Live Interval Analysis", false, false)
50INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
51INITIALIZE_PASS_DEPENDENCY(LiveVariables)
52INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
53INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
54INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
55                "Live Interval Analysis", false, false)
56
57void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
58  AU.setPreservesCFG();
59  AU.addRequired<AliasAnalysis>();
60  AU.addPreserved<AliasAnalysis>();
61  AU.addRequired<LiveVariables>();
62  AU.addPreserved<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreserved<SlotIndexes>();
66  AU.addRequiredTransitive<SlotIndexes>();
67  MachineFunctionPass::getAnalysisUsage(AU);
68}
69
70void LiveIntervals::releaseMemory() {
71  // Free the live intervals themselves.
72  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
73       E = r2iMap_.end(); I != E; ++I)
74    delete I->second;
75
76  r2iMap_.clear();
77  RegMaskSlots.clear();
78  RegMaskBits.clear();
79  RegMaskBlocks.clear();
80
81  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
82  VNInfoAllocator.Reset();
83}
84
85/// runOnMachineFunction - Register allocate the whole function
86///
87bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
88  mf_ = &fn;
89  mri_ = &mf_->getRegInfo();
90  tm_ = &fn.getTarget();
91  tri_ = tm_->getRegisterInfo();
92  tii_ = tm_->getInstrInfo();
93  aa_ = &getAnalysis<AliasAnalysis>();
94  lv_ = &getAnalysis<LiveVariables>();
95  indexes_ = &getAnalysis<SlotIndexes>();
96  allocatableRegs_ = tri_->getAllocatableSet(fn);
97  reservedRegs_ = tri_->getReservedRegs(fn);
98
99  computeIntervals();
100
101  numIntervals += getNumIntervals();
102
103  DEBUG(dump());
104  return true;
105}
106
107/// print - Implement the dump method.
108void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
109  OS << "********** INTERVALS **********\n";
110  for (const_iterator I = begin(), E = end(); I != E; ++I) {
111    I->second->print(OS, tri_);
112    OS << "\n";
113  }
114
115  printInstrs(OS);
116}
117
118void LiveIntervals::printInstrs(raw_ostream &OS) const {
119  OS << "********** MACHINEINSTRS **********\n";
120  mf_->print(OS, indexes_);
121}
122
123void LiveIntervals::dumpInstrs() const {
124  printInstrs(dbgs());
125}
126
127static
128bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
129  unsigned Reg = MI.getOperand(MOIdx).getReg();
130  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
131    const MachineOperand &MO = MI.getOperand(i);
132    if (!MO.isReg())
133      continue;
134    if (MO.getReg() == Reg && MO.isDef()) {
135      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
136             MI.getOperand(MOIdx).getSubReg() &&
137             (MO.getSubReg() || MO.isImplicit()));
138      return true;
139    }
140  }
141  return false;
142}
143
144/// isPartialRedef - Return true if the specified def at the specific index is
145/// partially re-defining the specified live interval. A common case of this is
146/// a definition of the sub-register.
147bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
148                                   LiveInterval &interval) {
149  if (!MO.getSubReg() || MO.isEarlyClobber())
150    return false;
151
152  SlotIndex RedefIndex = MIIdx.getRegSlot();
153  const LiveRange *OldLR =
154    interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
155  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
156  if (DefMI != 0) {
157    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
158  }
159  return false;
160}
161
162void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
163                                             MachineBasicBlock::iterator mi,
164                                             SlotIndex MIIdx,
165                                             MachineOperand& MO,
166                                             unsigned MOIdx,
167                                             LiveInterval &interval) {
168  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
169
170  // Virtual registers may be defined multiple times (due to phi
171  // elimination and 2-addr elimination).  Much of what we do only has to be
172  // done once for the vreg.  We use an empty interval to detect the first
173  // time we see a vreg.
174  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
175  if (interval.empty()) {
176    // Get the Idx of the defining instructions.
177    SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
178
179    // Make sure the first definition is not a partial redefinition. Add an
180    // <imp-def> of the full register.
181    // FIXME: LiveIntervals shouldn't modify the code like this.  Whoever
182    // created the machine instruction should annotate it with <undef> flags
183    // as needed.  Then we can simply assert here.  The REG_SEQUENCE lowering
184    // is the main suspect.
185    if (MO.getSubReg()) {
186      mi->addRegisterDefined(interval.reg);
187      // Mark all defs of interval.reg on this instruction as reading <undef>.
188      for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
189        MachineOperand &MO2 = mi->getOperand(i);
190        if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
191          MO2.setIsUndef();
192      }
193    }
194
195    VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
196    assert(ValNo->id == 0 && "First value in interval is not 0?");
197
198    // Loop over all of the blocks that the vreg is defined in.  There are
199    // two cases we have to handle here.  The most common case is a vreg
200    // whose lifetime is contained within a basic block.  In this case there
201    // will be a single kill, in MBB, which comes after the definition.
202    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
203      // FIXME: what about dead vars?
204      SlotIndex killIdx;
205      if (vi.Kills[0] != mi)
206        killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
207      else
208        killIdx = defIndex.getDeadSlot();
209
210      // If the kill happens after the definition, we have an intra-block
211      // live range.
212      if (killIdx > defIndex) {
213        assert(vi.AliveBlocks.empty() &&
214               "Shouldn't be alive across any blocks!");
215        LiveRange LR(defIndex, killIdx, ValNo);
216        interval.addRange(LR);
217        DEBUG(dbgs() << " +" << LR << "\n");
218        return;
219      }
220    }
221
222    // The other case we handle is when a virtual register lives to the end
223    // of the defining block, potentially live across some blocks, then is
224    // live into some number of blocks, but gets killed.  Start by adding a
225    // range that goes from this definition to the end of the defining block.
226    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
227    DEBUG(dbgs() << " +" << NewLR);
228    interval.addRange(NewLR);
229
230    bool PHIJoin = lv_->isPHIJoin(interval.reg);
231
232    if (PHIJoin) {
233      // A phi join register is killed at the end of the MBB and revived as a new
234      // valno in the killing blocks.
235      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
236      DEBUG(dbgs() << " phi-join");
237      ValNo->setHasPHIKill(true);
238    } else {
239      // Iterate over all of the blocks that the variable is completely
240      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
241      // live interval.
242      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
243               E = vi.AliveBlocks.end(); I != E; ++I) {
244        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
245        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
246        interval.addRange(LR);
247        DEBUG(dbgs() << " +" << LR);
248      }
249    }
250
251    // Finally, this virtual register is live from the start of any killing
252    // block to the 'use' slot of the killing instruction.
253    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
254      MachineInstr *Kill = vi.Kills[i];
255      SlotIndex Start = getMBBStartIdx(Kill->getParent());
256      SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
257
258      // Create interval with one of a NEW value number.  Note that this value
259      // number isn't actually defined by an instruction, weird huh? :)
260      if (PHIJoin) {
261        assert(getInstructionFromIndex(Start) == 0 &&
262               "PHI def index points at actual instruction.");
263        ValNo = interval.getNextValue(Start, VNInfoAllocator);
264        ValNo->setIsPHIDef(true);
265      }
266      LiveRange LR(Start, killIdx, ValNo);
267      interval.addRange(LR);
268      DEBUG(dbgs() << " +" << LR);
269    }
270
271  } else {
272    if (MultipleDefsBySameMI(*mi, MOIdx))
273      // Multiple defs of the same virtual register by the same instruction.
274      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
275      // This is likely due to elimination of REG_SEQUENCE instructions. Return
276      // here since there is nothing to do.
277      return;
278
279    // If this is the second time we see a virtual register definition, it
280    // must be due to phi elimination or two addr elimination.  If this is
281    // the result of two address elimination, then the vreg is one of the
282    // def-and-use register operand.
283
284    // It may also be partial redef like this:
285    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
286    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
287    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
288    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
289      // If this is a two-address definition, then we have already processed
290      // the live range.  The only problem is that we didn't realize there
291      // are actually two values in the live interval.  Because of this we
292      // need to take the LiveRegion that defines this register and split it
293      // into two values.
294      SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
295
296      const LiveRange *OldLR =
297        interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
298      VNInfo *OldValNo = OldLR->valno;
299      SlotIndex DefIndex = OldValNo->def.getRegSlot();
300
301      // Delete the previous value, which should be short and continuous,
302      // because the 2-addr copy must be in the same MBB as the redef.
303      interval.removeRange(DefIndex, RedefIndex);
304
305      // The new value number (#1) is defined by the instruction we claimed
306      // defined value #0.
307      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
308
309      // Value#0 is now defined by the 2-addr instruction.
310      OldValNo->def = RedefIndex;
311
312      // Add the new live interval which replaces the range for the input copy.
313      LiveRange LR(DefIndex, RedefIndex, ValNo);
314      DEBUG(dbgs() << " replace range with " << LR);
315      interval.addRange(LR);
316
317      // If this redefinition is dead, we need to add a dummy unit live
318      // range covering the def slot.
319      if (MO.isDead())
320        interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
321                                    OldValNo));
322
323      DEBUG({
324          dbgs() << " RESULT: ";
325          interval.print(dbgs(), tri_);
326        });
327    } else if (lv_->isPHIJoin(interval.reg)) {
328      // In the case of PHI elimination, each variable definition is only
329      // live until the end of the block.  We've already taken care of the
330      // rest of the live range.
331
332      SlotIndex defIndex = MIIdx.getRegSlot();
333      if (MO.isEarlyClobber())
334        defIndex = MIIdx.getRegSlot(true);
335
336      VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
337
338      SlotIndex killIndex = getMBBEndIdx(mbb);
339      LiveRange LR(defIndex, killIndex, ValNo);
340      interval.addRange(LR);
341      ValNo->setHasPHIKill(true);
342      DEBUG(dbgs() << " phi-join +" << LR);
343    } else {
344      llvm_unreachable("Multiply defined register");
345    }
346  }
347
348  DEBUG(dbgs() << '\n');
349}
350
351#ifndef NDEBUG
352static bool isRegLiveOutOf(const MachineBasicBlock *MBB, unsigned Reg) {
353  for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
354                                              SE = MBB->succ_end();
355       SI != SE; ++SI) {
356    const MachineBasicBlock* succ = *SI;
357    if (succ->isLiveIn(Reg))
358      return true;
359  }
360  return false;
361}
362#endif
363
364void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
365                                              MachineBasicBlock::iterator mi,
366                                              SlotIndex MIIdx,
367                                              MachineOperand& MO,
368                                              LiveInterval &interval) {
369  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
370
371  SlotIndex baseIndex = MIIdx;
372  SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber());
373  SlotIndex end = start;
374
375  // If it is not used after definition, it is considered dead at
376  // the instruction defining it. Hence its interval is:
377  // [defSlot(def), defSlot(def)+1)
378  // For earlyclobbers, the defSlot was pushed back one; the extra
379  // advance below compensates.
380  if (MO.isDead()) {
381    DEBUG(dbgs() << " dead");
382    end = start.getDeadSlot();
383    goto exit;
384  }
385
386  // If it is not dead on definition, it must be killed by a
387  // subsequent instruction. Hence its interval is:
388  // [defSlot(def), useSlot(kill)+1)
389  baseIndex = baseIndex.getNextIndex();
390  while (++mi != MBB->end()) {
391
392    if (mi->isDebugValue())
393      continue;
394    if (getInstructionFromIndex(baseIndex) == 0)
395      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
396
397    if (mi->killsRegister(interval.reg, tri_)) {
398      DEBUG(dbgs() << " killed");
399      end = baseIndex.getRegSlot();
400      goto exit;
401    } else {
402      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
403      if (DefIdx != -1) {
404        if (mi->isRegTiedToUseOperand(DefIdx)) {
405          // Two-address instruction.
406          end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber());
407        } else {
408          // Another instruction redefines the register before it is ever read.
409          // Then the register is essentially dead at the instruction that
410          // defines it. Hence its interval is:
411          // [defSlot(def), defSlot(def)+1)
412          DEBUG(dbgs() << " dead");
413          end = start.getDeadSlot();
414        }
415        goto exit;
416      }
417    }
418
419    baseIndex = baseIndex.getNextIndex();
420  }
421
422  // If we get here the register *should* be live out.
423  assert(!isAllocatable(interval.reg) && "Physregs shouldn't be live out!");
424
425  // FIXME: We need saner rules for reserved regs.
426  if (isReserved(interval.reg)) {
427    assert(!isRegLiveOutOf(MBB, interval.reg) && "Reserved reg live-out?");
428    end = start.getDeadSlot();
429  } else {
430    // Unreserved, unallocable registers like EFLAGS can be live across basic
431    // block boundaries.
432    assert(isRegLiveOutOf(MBB, interval.reg) && "Unreserved reg not live-out?");
433    end = getMBBEndIdx(MBB);
434  }
435exit:
436  assert(start < end && "did not find end of interval?");
437
438  // Already exists? Extend old live interval.
439  VNInfo *ValNo = interval.getVNInfoAt(start);
440  bool Extend = ValNo != 0;
441  if (!Extend)
442    ValNo = interval.getNextValue(start, VNInfoAllocator);
443  LiveRange LR(start, end, ValNo);
444  interval.addRange(LR);
445  DEBUG(dbgs() << " +" << LR << '\n');
446}
447
448void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
449                                      MachineBasicBlock::iterator MI,
450                                      SlotIndex MIIdx,
451                                      MachineOperand& MO,
452                                      unsigned MOIdx) {
453  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
454    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
455                             getOrCreateInterval(MO.getReg()));
456  else
457    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
458                              getOrCreateInterval(MO.getReg()));
459}
460
461void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
462                                         SlotIndex MIIdx,
463                                         LiveInterval &interval) {
464  assert(TargetRegisterInfo::isPhysicalRegister(interval.reg) &&
465         "Only physical registers can be live in.");
466  assert((!isAllocatable(interval.reg) || MBB->getParent()->begin() ||
467          MBB->isLandingPad()) &&
468          "Allocatable live-ins only valid for entry blocks and landing pads.");
469
470  DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
471
472  // Look for kills, if it reaches a def before it's killed, then it shouldn't
473  // be considered a livein.
474  MachineBasicBlock::iterator mi = MBB->begin();
475  MachineBasicBlock::iterator E = MBB->end();
476  // Skip over DBG_VALUE at the start of the MBB.
477  if (mi != E && mi->isDebugValue()) {
478    while (++mi != E && mi->isDebugValue())
479      ;
480    if (mi == E)
481      // MBB is empty except for DBG_VALUE's.
482      return;
483  }
484
485  SlotIndex baseIndex = MIIdx;
486  SlotIndex start = baseIndex;
487  if (getInstructionFromIndex(baseIndex) == 0)
488    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
489
490  SlotIndex end = baseIndex;
491  bool SeenDefUse = false;
492
493  while (mi != E) {
494    if (mi->killsRegister(interval.reg, tri_)) {
495      DEBUG(dbgs() << " killed");
496      end = baseIndex.getRegSlot();
497      SeenDefUse = true;
498      break;
499    } else if (mi->definesRegister(interval.reg, tri_)) {
500      // Another instruction redefines the register before it is ever read.
501      // Then the register is essentially dead at the instruction that defines
502      // it. Hence its interval is:
503      // [defSlot(def), defSlot(def)+1)
504      DEBUG(dbgs() << " dead");
505      end = start.getDeadSlot();
506      SeenDefUse = true;
507      break;
508    }
509
510    while (++mi != E && mi->isDebugValue())
511      // Skip over DBG_VALUE.
512      ;
513    if (mi != E)
514      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
515  }
516
517  // Live-in register might not be used at all.
518  if (!SeenDefUse) {
519    if (isAllocatable(interval.reg) || isReserved(interval.reg)) {
520      // This must be an entry block or landing pad - we asserted so on entry
521      // to the function. For these blocks the interval is dead on entry.
522      DEBUG(dbgs() << " dead");
523      end = start.getDeadSlot();
524    } else {
525      assert(isRegLiveOutOf(MBB, interval.reg) &&
526             "Live in reg untouched in block should be be live through.");
527      DEBUG(dbgs() << " live through");
528      end = getMBBEndIdx(MBB);
529    }
530  }
531
532  SlotIndex defIdx = getMBBStartIdx(MBB);
533  assert(getInstructionFromIndex(defIdx) == 0 &&
534         "PHI def index points at actual instruction.");
535  VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator);
536  vni->setIsPHIDef(true);
537  LiveRange LR(start, end, vni);
538
539  interval.addRange(LR);
540  DEBUG(dbgs() << " +" << LR << '\n');
541}
542
543/// computeIntervals - computes the live intervals for virtual
544/// registers. for some ordering of the machine instructions [1,N] a
545/// live interval is an interval [i, j) where 1 <= i <= j < N for
546/// which a variable is live
547void LiveIntervals::computeIntervals() {
548  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
549               << "********** Function: "
550               << ((Value*)mf_->getFunction())->getName() << '\n');
551
552  RegMaskBlocks.resize(mf_->getNumBlockIDs());
553
554  SmallVector<unsigned, 8> UndefUses;
555  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
556       MBBI != E; ++MBBI) {
557    MachineBasicBlock *MBB = MBBI;
558    RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
559
560    if (MBB->empty())
561      continue;
562
563    // Track the index of the current machine instr.
564    SlotIndex MIIndex = getMBBStartIdx(MBB);
565    DEBUG(dbgs() << "BB#" << MBB->getNumber()
566          << ":\t\t# derived from " << MBB->getName() << "\n");
567
568    // Create intervals for live-ins to this BB first.
569    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
570           LE = MBB->livein_end(); LI != LE; ++LI) {
571      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
572    }
573
574    // Skip over empty initial indices.
575    if (getInstructionFromIndex(MIIndex) == 0)
576      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
577
578    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
579         MI != miEnd; ++MI) {
580      DEBUG(dbgs() << MIIndex << "\t" << *MI);
581      if (MI->isDebugValue())
582        continue;
583      assert(indexes_->getInstructionFromIndex(MIIndex) == MI &&
584             "Lost SlotIndex synchronization");
585
586      // Handle defs.
587      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
588        MachineOperand &MO = MI->getOperand(i);
589
590        // Collect register masks.
591        if (MO.isRegMask()) {
592          RegMaskSlots.push_back(MIIndex.getRegSlot());
593          RegMaskBits.push_back(MO.getRegMask());
594          continue;
595        }
596
597        if (!MO.isReg() || !MO.getReg())
598          continue;
599
600        // handle register defs - build intervals
601        if (MO.isDef())
602          handleRegisterDef(MBB, MI, MIIndex, MO, i);
603        else if (MO.isUndef())
604          UndefUses.push_back(MO.getReg());
605      }
606
607      // Move to the next instr slot.
608      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
609    }
610
611    // Compute the number of register mask instructions in this block.
612    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
613    RMB.second = RegMaskSlots.size() - RMB.first;;
614  }
615
616  // Create empty intervals for registers defined by implicit_def's (except
617  // for those implicit_def that define values which are liveout of their
618  // blocks.
619  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
620    unsigned UndefReg = UndefUses[i];
621    (void)getOrCreateInterval(UndefReg);
622  }
623}
624
625LiveInterval* LiveIntervals::createInterval(unsigned reg) {
626  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
627  return new LiveInterval(reg, Weight);
628}
629
630/// dupInterval - Duplicate a live interval. The caller is responsible for
631/// managing the allocated memory.
632LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
633  LiveInterval *NewLI = createInterval(li->reg);
634  NewLI->Copy(*li, mri_, getVNInfoAllocator());
635  return NewLI;
636}
637
638/// shrinkToUses - After removing some uses of a register, shrink its live
639/// range to just the remaining uses. This method does not compute reaching
640/// defs for new uses, and it doesn't remove dead defs.
641bool LiveIntervals::shrinkToUses(LiveInterval *li,
642                                 SmallVectorImpl<MachineInstr*> *dead) {
643  DEBUG(dbgs() << "Shrink: " << *li << '\n');
644  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
645         && "Can only shrink virtual registers");
646  // Find all the values used, including PHI kills.
647  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
648
649  // Blocks that have already been added to WorkList as live-out.
650  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
651
652  // Visit all instructions reading li->reg.
653  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg);
654       MachineInstr *UseMI = I.skipInstruction();) {
655    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
656      continue;
657    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
658    // Note: This intentionally picks up the wrong VNI in case of an EC redef.
659    // See below.
660    VNInfo *VNI = li->getVNInfoBefore(Idx);
661    if (!VNI) {
662      // This shouldn't happen: readsVirtualRegister returns true, but there is
663      // no live value. It is likely caused by a target getting <undef> flags
664      // wrong.
665      DEBUG(dbgs() << Idx << '\t' << *UseMI
666                   << "Warning: Instr claims to read non-existent value in "
667                    << *li << '\n');
668      continue;
669    }
670    // Special case: An early-clobber tied operand reads and writes the
671    // register one slot early.  The getVNInfoBefore call above would have
672    // picked up the value defined by UseMI.  Adjust the kill slot and value.
673    if (SlotIndex::isSameInstr(VNI->def, Idx)) {
674      Idx = VNI->def;
675      VNI = li->getVNInfoBefore(Idx);
676      assert(VNI && "Early-clobber tied value not available");
677    }
678    WorkList.push_back(std::make_pair(Idx, VNI));
679  }
680
681  // Create a new live interval with only minimal live segments per def.
682  LiveInterval NewLI(li->reg, 0);
683  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
684       I != E; ++I) {
685    VNInfo *VNI = *I;
686    if (VNI->isUnused())
687      continue;
688    NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
689  }
690
691  // Keep track of the PHIs that are in use.
692  SmallPtrSet<VNInfo*, 8> UsedPHIs;
693
694  // Extend intervals to reach all uses in WorkList.
695  while (!WorkList.empty()) {
696    SlotIndex Idx = WorkList.back().first;
697    VNInfo *VNI = WorkList.back().second;
698    WorkList.pop_back();
699    const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
700    SlotIndex BlockStart = getMBBStartIdx(MBB);
701
702    // Extend the live range for VNI to be live at Idx.
703    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
704      (void)ExtVNI;
705      assert(ExtVNI == VNI && "Unexpected existing value number");
706      // Is this a PHIDef we haven't seen before?
707      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
708        continue;
709      // The PHI is live, make sure the predecessors are live-out.
710      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
711           PE = MBB->pred_end(); PI != PE; ++PI) {
712        if (!LiveOut.insert(*PI))
713          continue;
714        SlotIndex Stop = getMBBEndIdx(*PI);
715        // A predecessor is not required to have a live-out value for a PHI.
716        if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
717          WorkList.push_back(std::make_pair(Stop, PVNI));
718      }
719      continue;
720    }
721
722    // VNI is live-in to MBB.
723    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
724    NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
725
726    // Make sure VNI is live-out from the predecessors.
727    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
728         PE = MBB->pred_end(); PI != PE; ++PI) {
729      if (!LiveOut.insert(*PI))
730        continue;
731      SlotIndex Stop = getMBBEndIdx(*PI);
732      assert(li->getVNInfoBefore(Stop) == VNI &&
733             "Wrong value out of predecessor");
734      WorkList.push_back(std::make_pair(Stop, VNI));
735    }
736  }
737
738  // Handle dead values.
739  bool CanSeparate = false;
740  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
741       I != E; ++I) {
742    VNInfo *VNI = *I;
743    if (VNI->isUnused())
744      continue;
745    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
746    assert(LII != NewLI.end() && "Missing live range for PHI");
747    if (LII->end != VNI->def.getDeadSlot())
748      continue;
749    if (VNI->isPHIDef()) {
750      // This is a dead PHI. Remove it.
751      VNI->setIsUnused(true);
752      NewLI.removeRange(*LII);
753      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
754      CanSeparate = true;
755    } else {
756      // This is a dead def. Make sure the instruction knows.
757      MachineInstr *MI = getInstructionFromIndex(VNI->def);
758      assert(MI && "No instruction defining live value");
759      MI->addRegisterDead(li->reg, tri_);
760      if (dead && MI->allDefsAreDead()) {
761        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
762        dead->push_back(MI);
763      }
764    }
765  }
766
767  // Move the trimmed ranges back.
768  li->ranges.swap(NewLI.ranges);
769  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
770  return CanSeparate;
771}
772
773
774//===----------------------------------------------------------------------===//
775// Register allocator hooks.
776//
777
778void LiveIntervals::addKillFlags() {
779  for (iterator I = begin(), E = end(); I != E; ++I) {
780    unsigned Reg = I->first;
781    if (TargetRegisterInfo::isPhysicalRegister(Reg))
782      continue;
783    if (mri_->reg_nodbg_empty(Reg))
784      continue;
785    LiveInterval *LI = I->second;
786
787    // Every instruction that kills Reg corresponds to a live range end point.
788    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
789         ++RI) {
790      // A block index indicates an MBB edge.
791      if (RI->end.isBlock())
792        continue;
793      MachineInstr *MI = getInstructionFromIndex(RI->end);
794      if (!MI)
795        continue;
796      MI->addRegisterKilled(Reg, NULL);
797    }
798  }
799}
800
801#ifndef NDEBUG
802static bool intervalRangesSane(const LiveInterval& li) {
803  if (li.empty()) {
804    return true;
805  }
806
807  SlotIndex lastEnd = li.begin()->start;
808  for (LiveInterval::const_iterator lrItr = li.begin(), lrEnd = li.end();
809       lrItr != lrEnd; ++lrItr) {
810    const LiveRange& lr = *lrItr;
811    if (lastEnd > lr.start || lr.start >= lr.end)
812      return false;
813    lastEnd = lr.end;
814  }
815
816  return true;
817}
818#endif
819
820template <typename DefSetT>
821static void handleMoveDefs(LiveIntervals& lis, SlotIndex origIdx,
822                           SlotIndex miIdx, const DefSetT& defs) {
823  for (typename DefSetT::const_iterator defItr = defs.begin(),
824                                        defEnd = defs.end();
825       defItr != defEnd; ++defItr) {
826    unsigned def = *defItr;
827    LiveInterval& li = lis.getInterval(def);
828    LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
829    assert(lr != 0 && "No range for def?");
830    lr->start = miIdx.getRegSlot();
831    lr->valno->def = miIdx.getRegSlot();
832    assert(intervalRangesSane(li) && "Broke live interval moving def.");
833  }
834}
835
836template <typename DeadDefSetT>
837static void handleMoveDeadDefs(LiveIntervals& lis, SlotIndex origIdx,
838                               SlotIndex miIdx, const DeadDefSetT& deadDefs) {
839  for (typename DeadDefSetT::const_iterator deadDefItr = deadDefs.begin(),
840                                            deadDefEnd = deadDefs.end();
841       deadDefItr != deadDefEnd; ++deadDefItr) {
842    unsigned deadDef = *deadDefItr;
843    LiveInterval& li = lis.getInterval(deadDef);
844    LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
845    assert(lr != 0 && "No range for dead def?");
846    assert(lr->start == origIdx.getRegSlot() && "Bad dead range start?");
847    assert(lr->end == origIdx.getDeadSlot() && "Bad dead range end?");
848    assert(lr->valno->def == origIdx.getRegSlot() && "Bad dead valno def.");
849    LiveRange t(*lr);
850    t.start = miIdx.getRegSlot();
851    t.valno->def = miIdx.getRegSlot();
852    t.end = miIdx.getDeadSlot();
853    li.removeRange(*lr);
854    li.addRange(t);
855    assert(intervalRangesSane(li) && "Broke live interval moving dead def.");
856  }
857}
858
859template <typename ECSetT>
860static void handleMoveECs(LiveIntervals& lis, SlotIndex origIdx,
861                          SlotIndex miIdx, const ECSetT& ecs) {
862  for (typename ECSetT::const_iterator ecItr = ecs.begin(), ecEnd = ecs.end();
863       ecItr != ecEnd; ++ecItr) {
864    unsigned ec = *ecItr;
865    LiveInterval& li = lis.getInterval(ec);
866    LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot(true));
867    assert(lr != 0 && "No range for early clobber?");
868    assert(lr->start == origIdx.getRegSlot(true) && "Bad EC range start?");
869    assert(lr->end == origIdx.getRegSlot() && "Bad EC range end.");
870    assert(lr->valno->def == origIdx.getRegSlot(true) && "Bad EC valno def.");
871    LiveRange t(*lr);
872    t.start = miIdx.getRegSlot(true);
873    t.valno->def = miIdx.getRegSlot(true);
874    t.end = miIdx.getRegSlot();
875    li.removeRange(*lr);
876    li.addRange(t);
877    assert(intervalRangesSane(li) && "Broke live interval moving EC.");
878  }
879}
880
881static void moveKillFlags(unsigned reg, SlotIndex oldIdx, SlotIndex newIdx,
882                          LiveIntervals& lis,
883                          const TargetRegisterInfo& tri) {
884  MachineInstr* oldKillMI = lis.getInstructionFromIndex(oldIdx);
885  MachineInstr* newKillMI = lis.getInstructionFromIndex(newIdx);
886  assert(oldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
887  assert(!newKillMI->killsRegister(reg) && "New kill instr is already a kill.");
888  oldKillMI->clearRegisterKills(reg, &tri);
889  newKillMI->addRegisterKilled(reg, &tri);
890}
891
892template <typename UseSetT>
893static void handleMoveUses(const MachineBasicBlock *mbb,
894                           const MachineRegisterInfo& mri,
895                           const TargetRegisterInfo& tri,
896                           const BitVector& reservedRegs, LiveIntervals &lis,
897                           SlotIndex origIdx, SlotIndex miIdx,
898                           const UseSetT &uses) {
899  bool movingUp = miIdx < origIdx;
900  for (typename UseSetT::const_iterator usesItr = uses.begin(),
901                                        usesEnd = uses.end();
902       usesItr != usesEnd; ++usesItr) {
903    unsigned use = *usesItr;
904    if (!lis.hasInterval(use))
905      continue;
906    if (TargetRegisterInfo::isPhysicalRegister(use) && reservedRegs.test(use))
907      continue;
908    LiveInterval& li = lis.getInterval(use);
909    LiveRange* lr = li.getLiveRangeBefore(origIdx.getRegSlot());
910    assert(lr != 0 && "No range for use?");
911    bool liveThrough = lr->end > origIdx.getRegSlot();
912
913    if (movingUp) {
914      // If moving up and liveThrough - nothing to do.
915      // If not live through we need to extend the range to the last use
916      // between the old location and the new one.
917      if (!liveThrough) {
918        SlotIndex lastUseInRange = miIdx.getRegSlot();
919        for (MachineRegisterInfo::use_iterator useI = mri.use_begin(use),
920                                               useE = mri.use_end();
921             useI != useE; ++useI) {
922          const MachineInstr* mopI = &*useI;
923          const MachineOperand& mop = useI.getOperand();
924          SlotIndex instSlot = lis.getSlotIndexes()->getInstructionIndex(mopI);
925          SlotIndex opSlot = instSlot.getRegSlot(mop.isEarlyClobber());
926          if (opSlot > lastUseInRange && opSlot < origIdx)
927            lastUseInRange = opSlot;
928        }
929
930        // If we found a new instr endpoint update the kill flags.
931        if (lastUseInRange != miIdx.getRegSlot())
932          moveKillFlags(use, miIdx, lastUseInRange, lis, tri);
933
934        // Fix up the range end.
935        lr->end = lastUseInRange;
936      }
937    } else {
938      // Moving down is easy - the existing live range end tells us where
939      // the last kill is.
940      if (!liveThrough) {
941        // Easy fix - just update the range endpoint.
942        lr->end = miIdx.getRegSlot();
943      } else {
944        bool liveOut = lr->end >= lis.getSlotIndexes()->getMBBEndIdx(mbb);
945        if (!liveOut && miIdx.getRegSlot() > lr->end) {
946          moveKillFlags(use, lr->end, miIdx, lis, tri);
947          lr->end = miIdx.getRegSlot();
948        }
949      }
950    }
951    assert(intervalRangesSane(li) && "Broke live interval moving use.");
952  }
953}
954
955void LiveIntervals::moveInstr(MachineBasicBlock::iterator insertPt,
956                              MachineInstr *mi) {
957  MachineBasicBlock* mbb = mi->getParent();
958  assert((insertPt == mbb->end() || insertPt->getParent() == mbb) &&
959         "Cannot handle moves across basic block boundaries.");
960  assert(&*insertPt != mi && "No-op move requested?");
961  assert(!mi->isBundled() && "Can't handle bundled instructions yet.");
962
963  // Grab the original instruction index.
964  SlotIndex origIdx = indexes_->getInstructionIndex(mi);
965
966  // Move the machine instr and obtain its new index.
967  indexes_->removeMachineInstrFromMaps(mi);
968  mbb->splice(insertPt, mbb, mi);
969  SlotIndex miIdx = indexes_->insertMachineInstrInMaps(mi);
970
971  // Pick the direction.
972  bool movingUp = miIdx < origIdx;
973
974  // Collect the operands.
975  DenseSet<unsigned> uses, defs, deadDefs, ecs;
976  for (MachineInstr::mop_iterator mopItr = mi->operands_begin(),
977         mopEnd = mi->operands_end();
978       mopItr != mopEnd; ++mopItr) {
979    const MachineOperand& mop = *mopItr;
980
981    if (!mop.isReg() || mop.getReg() == 0)
982      continue;
983    unsigned reg = mop.getReg();
984
985    if (mop.readsReg() && !ecs.count(reg)) {
986      uses.insert(reg);
987    }
988    if (mop.isDef()) {
989      if (mop.isDead()) {
990        assert(!defs.count(reg) && "Can't mix defs with dead-defs.");
991        deadDefs.insert(reg);
992      } else if (mop.isEarlyClobber()) {
993        uses.erase(reg);
994        ecs.insert(reg);
995      } else {
996        assert(!deadDefs.count(reg) && "Can't mix defs with dead-defs.");
997        defs.insert(reg);
998      }
999    }
1000  }
1001
1002  if (movingUp) {
1003    handleMoveUses(mbb, *mri_, *tri_, reservedRegs_, *this, origIdx, miIdx, uses);
1004    handleMoveECs(*this, origIdx, miIdx, ecs);
1005    handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
1006    handleMoveDefs(*this, origIdx, miIdx, defs);
1007  } else {
1008    handleMoveDefs(*this, origIdx, miIdx, defs);
1009    handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
1010    handleMoveECs(*this, origIdx, miIdx, ecs);
1011    handleMoveUses(mbb, *mri_, *tri_, reservedRegs_, *this, origIdx, miIdx, uses);
1012  }
1013}
1014
1015/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1016/// allow one) virtual register operand, then its uses are implicitly using
1017/// the register. Returns the virtual register.
1018unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1019                                            MachineInstr *MI) const {
1020  unsigned RegOp = 0;
1021  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1022    MachineOperand &MO = MI->getOperand(i);
1023    if (!MO.isReg() || !MO.isUse())
1024      continue;
1025    unsigned Reg = MO.getReg();
1026    if (Reg == 0 || Reg == li.reg)
1027      continue;
1028
1029    if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isAllocatable(Reg))
1030      continue;
1031    RegOp = MO.getReg();
1032    break; // Found vreg operand - leave the loop.
1033  }
1034  return RegOp;
1035}
1036
1037/// isValNoAvailableAt - Return true if the val# of the specified interval
1038/// which reaches the given instruction also reaches the specified use index.
1039bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1040                                       SlotIndex UseIdx) const {
1041  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
1042  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
1043}
1044
1045/// isReMaterializable - Returns true if the definition MI of the specified
1046/// val# of the specified interval is re-materializable.
1047bool
1048LiveIntervals::isReMaterializable(const LiveInterval &li,
1049                                  const VNInfo *ValNo, MachineInstr *MI,
1050                                  const SmallVectorImpl<LiveInterval*> *SpillIs,
1051                                  bool &isLoad) {
1052  if (DisableReMat)
1053    return false;
1054
1055  if (!tii_->isTriviallyReMaterializable(MI, aa_))
1056    return false;
1057
1058  // Target-specific code can mark an instruction as being rematerializable
1059  // if it has one virtual reg use, though it had better be something like
1060  // a PIC base register which is likely to be live everywhere.
1061  unsigned ImpUse = getReMatImplicitUse(li, MI);
1062  if (ImpUse) {
1063    const LiveInterval &ImpLi = getInterval(ImpUse);
1064    for (MachineRegisterInfo::use_nodbg_iterator
1065           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
1066         ri != re; ++ri) {
1067      MachineInstr *UseMI = &*ri;
1068      SlotIndex UseIdx = getInstructionIndex(UseMI);
1069      if (li.getVNInfoAt(UseIdx) != ValNo)
1070        continue;
1071      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1072        return false;
1073    }
1074
1075    // If a register operand of the re-materialized instruction is going to
1076    // be spilled next, then it's not legal to re-materialize this instruction.
1077    if (SpillIs)
1078      for (unsigned i = 0, e = SpillIs->size(); i != e; ++i)
1079        if (ImpUse == (*SpillIs)[i]->reg)
1080          return false;
1081  }
1082  return true;
1083}
1084
1085/// isReMaterializable - Returns true if every definition of MI of every
1086/// val# of the specified interval is re-materializable.
1087bool
1088LiveIntervals::isReMaterializable(const LiveInterval &li,
1089                                  const SmallVectorImpl<LiveInterval*> *SpillIs,
1090                                  bool &isLoad) {
1091  isLoad = false;
1092  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1093       i != e; ++i) {
1094    const VNInfo *VNI = *i;
1095    if (VNI->isUnused())
1096      continue; // Dead val#.
1097    // Is the def for the val# rematerializable?
1098    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1099    if (!ReMatDefMI)
1100      return false;
1101    bool DefIsLoad = false;
1102    if (!ReMatDefMI ||
1103        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1104      return false;
1105    isLoad |= DefIsLoad;
1106  }
1107  return true;
1108}
1109
1110MachineBasicBlock*
1111LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
1112  // A local live range must be fully contained inside the block, meaning it is
1113  // defined and killed at instructions, not at block boundaries. It is not
1114  // live in or or out of any block.
1115  //
1116  // It is technically possible to have a PHI-defined live range identical to a
1117  // single block, but we are going to return false in that case.
1118
1119  SlotIndex Start = LI.beginIndex();
1120  if (Start.isBlock())
1121    return NULL;
1122
1123  SlotIndex Stop = LI.endIndex();
1124  if (Stop.isBlock())
1125    return NULL;
1126
1127  // getMBBFromIndex doesn't need to search the MBB table when both indexes
1128  // belong to proper instructions.
1129  MachineBasicBlock *MBB1 = indexes_->getMBBFromIndex(Start);
1130  MachineBasicBlock *MBB2 = indexes_->getMBBFromIndex(Stop);
1131  return MBB1 == MBB2 ? MBB1 : NULL;
1132}
1133
1134float
1135LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1136  // Limit the loop depth ridiculousness.
1137  if (loopDepth > 200)
1138    loopDepth = 200;
1139
1140  // The loop depth is used to roughly estimate the number of times the
1141  // instruction is executed. Something like 10^d is simple, but will quickly
1142  // overflow a float. This expression behaves like 10^d for small d, but is
1143  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1144  // headroom before overflow.
1145  // By the way, powf() might be unavailable here. For consistency,
1146  // We may take pow(double,double).
1147  float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
1148
1149  return (isDef + isUse) * lc;
1150}
1151
1152LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1153                                                  MachineInstr* startInst) {
1154  LiveInterval& Interval = getOrCreateInterval(reg);
1155  VNInfo* VN = Interval.getNextValue(
1156    SlotIndex(getInstructionIndex(startInst).getRegSlot()),
1157    getVNInfoAllocator());
1158  VN->setHasPHIKill(true);
1159  LiveRange LR(
1160     SlotIndex(getInstructionIndex(startInst).getRegSlot()),
1161     getMBBEndIdx(startInst->getParent()), VN);
1162  Interval.addRange(LR);
1163
1164  return LR;
1165}
1166
1167
1168//===----------------------------------------------------------------------===//
1169//                          Register mask functions
1170//===----------------------------------------------------------------------===//
1171
1172bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
1173                                             BitVector &UsableRegs) {
1174  if (LI.empty())
1175    return false;
1176  LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
1177
1178  // Use a smaller arrays for local live ranges.
1179  ArrayRef<SlotIndex> Slots;
1180  ArrayRef<const uint32_t*> Bits;
1181  if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
1182    Slots = getRegMaskSlotsInBlock(MBB->getNumber());
1183    Bits = getRegMaskBitsInBlock(MBB->getNumber());
1184  } else {
1185    Slots = getRegMaskSlots();
1186    Bits = getRegMaskBits();
1187  }
1188
1189  // We are going to enumerate all the register mask slots contained in LI.
1190  // Start with a binary search of RegMaskSlots to find a starting point.
1191  ArrayRef<SlotIndex>::iterator SlotI =
1192    std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
1193  ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
1194
1195  // No slots in range, LI begins after the last call.
1196  if (SlotI == SlotE)
1197    return false;
1198
1199  bool Found = false;
1200  for (;;) {
1201    assert(*SlotI >= LiveI->start);
1202    // Loop over all slots overlapping this segment.
1203    while (*SlotI < LiveI->end) {
1204      // *SlotI overlaps LI. Collect mask bits.
1205      if (!Found) {
1206        // This is the first overlap. Initialize UsableRegs to all ones.
1207        UsableRegs.clear();
1208        UsableRegs.resize(tri_->getNumRegs(), true);
1209        Found = true;
1210      }
1211      // Remove usable registers clobbered by this mask.
1212      UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
1213      if (++SlotI == SlotE)
1214        return Found;
1215    }
1216    // *SlotI is beyond the current LI segment.
1217    LiveI = LI.advanceTo(LiveI, *SlotI);
1218    if (LiveI == LiveE)
1219      return Found;
1220    // Advance SlotI until it overlaps.
1221    while (*SlotI < LiveI->start)
1222      if (++SlotI == SlotE)
1223        return Found;
1224  }
1225}
1226