LiveIntervalAnalysis.cpp revision 826cbac2a0cef418fd8949813761c2ed975f3df1
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.Reset();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << SlotIndex::getEmptyKey() << '\t' << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it checks for sub-register reference and it can check use as well.
223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224                                            unsigned Reg, bool CheckUse,
225                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226  for (LiveInterval::Ranges::const_iterator
227         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228    for (SlotIndex index = I->start.getBaseIndex(),
229           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230           index != end;
231           index = index.getNextIndex()) {
232      MachineInstr *MI = getInstructionFromIndex(index);
233      if (!MI)
234        continue;               // skip deleted instructions
235
236      if (JoinedCopies.count(MI))
237        continue;
238      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239        MachineOperand& MO = MI->getOperand(i);
240        if (!MO.isReg())
241          continue;
242        if (MO.isUse() && !CheckUse)
243          continue;
244        unsigned PhysReg = MO.getReg();
245        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246          continue;
247        if (tri_->isSubRegister(Reg, PhysReg))
248          return true;
249      }
250    }
251  }
252
253  return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258  if (TargetRegisterInfo::isPhysicalRegister(reg))
259    dbgs() << tri_->getName(reg);
260  else
261    dbgs() << "%reg" << reg;
262}
263#endif
264
265void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266                                             MachineBasicBlock::iterator mi,
267                                             SlotIndex MIIdx,
268                                             MachineOperand& MO,
269                                             unsigned MOIdx,
270                                             LiveInterval &interval) {
271  DEBUG({
272      dbgs() << "\t\tregister: ";
273      printRegName(interval.reg, tri_);
274    });
275
276  // Virtual registers may be defined multiple times (due to phi
277  // elimination and 2-addr elimination).  Much of what we do only has to be
278  // done once for the vreg.  We use an empty interval to detect the first
279  // time we see a vreg.
280  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281  if (interval.empty()) {
282    // Get the Idx of the defining instructions.
283    SlotIndex defIndex = MIIdx.getDefIndex();
284    // Earlyclobbers move back one, so that they overlap the live range
285    // of inputs.
286    if (MO.isEarlyClobber())
287      defIndex = MIIdx.getUseIndex();
288    VNInfo *ValNo;
289    MachineInstr *CopyMI = NULL;
290    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293      CopyMI = mi;
294    // Earlyclobbers move back one.
295    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296
297    assert(ValNo->id == 0 && "First value in interval is not 0?");
298
299    // Loop over all of the blocks that the vreg is defined in.  There are
300    // two cases we have to handle here.  The most common case is a vreg
301    // whose lifetime is contained within a basic block.  In this case there
302    // will be a single kill, in MBB, which comes after the definition.
303    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304      // FIXME: what about dead vars?
305      SlotIndex killIdx;
306      if (vi.Kills[0] != mi)
307        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308      else
309        killIdx = defIndex.getStoreIndex();
310
311      // If the kill happens after the definition, we have an intra-block
312      // live range.
313      if (killIdx > defIndex) {
314        assert(vi.AliveBlocks.empty() &&
315               "Shouldn't be alive across any blocks!");
316        LiveRange LR(defIndex, killIdx, ValNo);
317        interval.addRange(LR);
318        DEBUG(dbgs() << " +" << LR << "\n");
319        ValNo->addKill(killIdx);
320        return;
321      }
322    }
323
324    // The other case we handle is when a virtual register lives to the end
325    // of the defining block, potentially live across some blocks, then is
326    // live into some number of blocks, but gets killed.  Start by adding a
327    // range that goes from this definition to the end of the defining block.
328    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329    DEBUG(dbgs() << " +" << NewLR);
330    interval.addRange(NewLR);
331
332    bool PHIJoin = lv_->isPHIJoin(interval.reg);
333
334    if (PHIJoin) {
335      // A phi join register is killed at the end of the MBB and revived as a new
336      // valno in the killing blocks.
337      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338      DEBUG(dbgs() << " phi-join");
339      ValNo->addKill(indexes_->getTerminatorGap(mbb));
340      ValNo->setHasPHIKill(true);
341    } else {
342      // Iterate over all of the blocks that the variable is completely
343      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
344      // live interval.
345      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346               E = vi.AliveBlocks.end(); I != E; ++I) {
347        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349        interval.addRange(LR);
350        DEBUG(dbgs() << " +" << LR);
351      }
352    }
353
354    // Finally, this virtual register is live from the start of any killing
355    // block to the 'use' slot of the killing instruction.
356    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357      MachineInstr *Kill = vi.Kills[i];
358      SlotIndex Start = getMBBStartIdx(Kill->getParent());
359      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
360
361      // Create interval with one of a NEW value number.  Note that this value
362      // number isn't actually defined by an instruction, weird huh? :)
363      if (PHIJoin) {
364        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
365                                      VNInfoAllocator);
366        ValNo->setIsPHIDef(true);
367      }
368      LiveRange LR(Start, killIdx, ValNo);
369      interval.addRange(LR);
370      ValNo->addKill(killIdx);
371      DEBUG(dbgs() << " +" << LR);
372    }
373
374  } else {
375    // If this is the second time we see a virtual register definition, it
376    // must be due to phi elimination or two addr elimination.  If this is
377    // the result of two address elimination, then the vreg is one of the
378    // def-and-use register operand.
379    if (mi->isRegTiedToUseOperand(MOIdx)) {
380      // If this is a two-address definition, then we have already processed
381      // the live range.  The only problem is that we didn't realize there
382      // are actually two values in the live interval.  Because of this we
383      // need to take the LiveRegion that defines this register and split it
384      // into two values.
385      assert(interval.containsOneValue());
386      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387      SlotIndex RedefIndex = MIIdx.getDefIndex();
388      if (MO.isEarlyClobber())
389        RedefIndex = MIIdx.getUseIndex();
390
391      const LiveRange *OldLR =
392        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393      VNInfo *OldValNo = OldLR->valno;
394
395      // Delete the initial value, which should be short and continuous,
396      // because the 2-addr copy must be in the same MBB as the redef.
397      interval.removeRange(DefIndex, RedefIndex);
398
399      // Two-address vregs should always only be redefined once.  This means
400      // that at this point, there should be exactly one value number in it.
401      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
402
403      // The new value number (#1) is defined by the instruction we claimed
404      // defined value #0.
405      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406                                            false, // update at *
407                                            VNInfoAllocator);
408      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
409
410      // Value#0 is now defined by the 2-addr instruction.
411      OldValNo->def  = RedefIndex;
412      OldValNo->setCopy(0);
413
414      // Add the new live interval which replaces the range for the input copy.
415      LiveRange LR(DefIndex, RedefIndex, ValNo);
416      DEBUG(dbgs() << " replace range with " << LR);
417      interval.addRange(LR);
418      ValNo->addKill(RedefIndex);
419
420      // If this redefinition is dead, we need to add a dummy unit live
421      // range covering the def slot.
422      if (MO.isDead())
423        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
424                                    OldValNo));
425
426      DEBUG({
427          dbgs() << " RESULT: ";
428          interval.print(dbgs(), tri_);
429        });
430    } else {
431      assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432      // In the case of PHI elimination, each variable definition is only
433      // live until the end of the block.  We've already taken care of the
434      // rest of the live range.
435
436      SlotIndex defIndex = MIIdx.getDefIndex();
437      if (MO.isEarlyClobber())
438        defIndex = MIIdx.getUseIndex();
439
440      VNInfo *ValNo;
441      MachineInstr *CopyMI = NULL;
442      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
445        CopyMI = mi;
446      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
447
448      SlotIndex killIndex = getMBBEndIdx(mbb);
449      LiveRange LR(defIndex, killIndex, ValNo);
450      interval.addRange(LR);
451      ValNo->addKill(indexes_->getTerminatorGap(mbb));
452      ValNo->setHasPHIKill(true);
453      DEBUG(dbgs() << " phi-join +" << LR);
454    }
455  }
456
457  DEBUG(dbgs() << '\n');
458}
459
460void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461                                              MachineBasicBlock::iterator mi,
462                                              SlotIndex MIIdx,
463                                              MachineOperand& MO,
464                                              LiveInterval &interval,
465                                              MachineInstr *CopyMI) {
466  // A physical register cannot be live across basic block, so its
467  // lifetime must end somewhere in its defining basic block.
468  DEBUG({
469      dbgs() << "\t\tregister: ";
470      printRegName(interval.reg, tri_);
471    });
472
473  SlotIndex baseIndex = MIIdx;
474  SlotIndex start = baseIndex.getDefIndex();
475  // Earlyclobbers move back one.
476  if (MO.isEarlyClobber())
477    start = MIIdx.getUseIndex();
478  SlotIndex end = start;
479
480  // If it is not used after definition, it is considered dead at
481  // the instruction defining it. Hence its interval is:
482  // [defSlot(def), defSlot(def)+1)
483  // For earlyclobbers, the defSlot was pushed back one; the extra
484  // advance below compensates.
485  if (MO.isDead()) {
486    DEBUG(dbgs() << " dead");
487    end = start.getStoreIndex();
488    goto exit;
489  }
490
491  // If it is not dead on definition, it must be killed by a
492  // subsequent instruction. Hence its interval is:
493  // [defSlot(def), useSlot(kill)+1)
494  baseIndex = baseIndex.getNextIndex();
495  while (++mi != MBB->end()) {
496
497    if (mi->isDebugValue())
498      continue;
499    if (getInstructionFromIndex(baseIndex) == 0)
500      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
501
502    if (mi->killsRegister(interval.reg, tri_)) {
503      DEBUG(dbgs() << " killed");
504      end = baseIndex.getDefIndex();
505      goto exit;
506    } else {
507      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
508      if (DefIdx != -1) {
509        if (mi->isRegTiedToUseOperand(DefIdx)) {
510          // Two-address instruction.
511          end = baseIndex.getDefIndex();
512        } else {
513          // Another instruction redefines the register before it is ever read.
514          // Then the register is essentially dead at the instruction that
515          // defines it. Hence its interval is:
516          // [defSlot(def), defSlot(def)+1)
517          DEBUG(dbgs() << " dead");
518          end = start.getStoreIndex();
519        }
520        goto exit;
521      }
522    }
523
524    baseIndex = baseIndex.getNextIndex();
525  }
526
527  // The only case we should have a dead physreg here without a killing or
528  // instruction where we know it's dead is if it is live-in to the function
529  // and never used. Another possible case is the implicit use of the
530  // physical register has been deleted by two-address pass.
531  end = start.getStoreIndex();
532
533exit:
534  assert(start < end && "did not find end of interval?");
535
536  // Already exists? Extend old live interval.
537  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538  bool Extend = OldLR != interval.end();
539  VNInfo *ValNo = Extend
540    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541  if (MO.isEarlyClobber() && Extend)
542    ValNo->setHasRedefByEC(true);
543  LiveRange LR(start, end, ValNo);
544  interval.addRange(LR);
545  LR.valno->addKill(end);
546  DEBUG(dbgs() << " +" << LR << '\n');
547}
548
549void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550                                      MachineBasicBlock::iterator MI,
551                                      SlotIndex MIIdx,
552                                      MachineOperand& MO,
553                                      unsigned MOIdx) {
554  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556                             getOrCreateInterval(MO.getReg()));
557  else if (allocatableRegs_[MO.getReg()]) {
558    MachineInstr *CopyMI = NULL;
559    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
562      CopyMI = MI;
563    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564                              getOrCreateInterval(MO.getReg()), CopyMI);
565    // Def of a register also defines its sub-registers.
566    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567      // If MI also modifies the sub-register explicitly, avoid processing it
568      // more than once. Do not pass in TRI here so it checks for exact match.
569      if (!MI->modifiesRegister(*AS))
570        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571                                  getOrCreateInterval(*AS), 0);
572  }
573}
574
575void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
576                                         SlotIndex MIIdx,
577                                         LiveInterval &interval, bool isAlias) {
578  DEBUG({
579      dbgs() << "\t\tlivein register: ";
580      printRegName(interval.reg, tri_);
581    });
582
583  // Look for kills, if it reaches a def before it's killed, then it shouldn't
584  // be considered a livein.
585  MachineBasicBlock::iterator mi = MBB->begin();
586  SlotIndex baseIndex = MIIdx;
587  SlotIndex start = baseIndex;
588  if (getInstructionFromIndex(baseIndex) == 0)
589    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
590
591  SlotIndex end = baseIndex;
592  bool SeenDefUse = false;
593
594  MachineBasicBlock::iterator E = MBB->end();
595  while (mi != E) {
596    while (mi != E && mi->isDebugValue())
597      ++mi;
598    if (mi == E)
599      break;
600    if (mi->killsRegister(interval.reg, tri_)) {
601      DEBUG(dbgs() << " killed");
602      end = baseIndex.getDefIndex();
603      SeenDefUse = true;
604      break;
605    } else if (mi->modifiesRegister(interval.reg, tri_)) {
606      // Another instruction redefines the register before it is ever read.
607      // Then the register is essentially dead at the instruction that defines
608      // it. Hence its interval is:
609      // [defSlot(def), defSlot(def)+1)
610      DEBUG(dbgs() << " dead");
611      end = start.getStoreIndex();
612      SeenDefUse = true;
613      break;
614    }
615
616    ++mi;
617    if (mi != E && !mi->isDebugValue()) {
618      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
619    }
620  }
621
622  // Live-in register might not be used at all.
623  if (!SeenDefUse) {
624    if (isAlias) {
625      DEBUG(dbgs() << " dead");
626      end = MIIdx.getStoreIndex();
627    } else {
628      DEBUG(dbgs() << " live through");
629      end = baseIndex;
630    }
631  }
632
633  VNInfo *vni =
634    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
635                          0, false, VNInfoAllocator);
636  vni->setIsPHIDef(true);
637  LiveRange LR(start, end, vni);
638
639  interval.addRange(LR);
640  LR.valno->addKill(end);
641  DEBUG(dbgs() << " +" << LR << '\n');
642}
643
644/// computeIntervals - computes the live intervals for virtual
645/// registers. for some ordering of the machine instructions [1,N] a
646/// live interval is an interval [i, j) where 1 <= i <= j < N for
647/// which a variable is live
648void LiveIntervals::computeIntervals() {
649  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
650               << "********** Function: "
651               << ((Value*)mf_->getFunction())->getName() << '\n');
652
653  SmallVector<unsigned, 8> UndefUses;
654  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
655       MBBI != E; ++MBBI) {
656    MachineBasicBlock *MBB = MBBI;
657    if (MBB->empty())
658      continue;
659
660    // Track the index of the current machine instr.
661    SlotIndex MIIndex = getMBBStartIdx(MBB);
662    DEBUG(dbgs() << MBB->getName() << ":\n");
663
664    // Create intervals for live-ins to this BB first.
665    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
666           LE = MBB->livein_end(); LI != LE; ++LI) {
667      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
668      // Multiple live-ins can alias the same register.
669      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
670        if (!hasInterval(*AS))
671          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
672                               true);
673    }
674
675    // Skip over empty initial indices.
676    if (getInstructionFromIndex(MIIndex) == 0)
677      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
678
679    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
680         MI != miEnd; ++MI) {
681      DEBUG(dbgs() << MIIndex << "\t" << *MI);
682      if (MI->isDebugValue())
683        continue;
684
685      // Handle defs.
686      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
687        MachineOperand &MO = MI->getOperand(i);
688        if (!MO.isReg() || !MO.getReg())
689          continue;
690
691        // handle register defs - build intervals
692        if (MO.isDef())
693          handleRegisterDef(MBB, MI, MIIndex, MO, i);
694        else if (MO.isUndef())
695          UndefUses.push_back(MO.getReg());
696      }
697
698      // Move to the next instr slot.
699      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700    }
701  }
702
703  // Create empty intervals for registers defined by implicit_def's (except
704  // for those implicit_def that define values which are liveout of their
705  // blocks.
706  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
707    unsigned UndefReg = UndefUses[i];
708    (void)getOrCreateInterval(UndefReg);
709  }
710}
711
712LiveInterval* LiveIntervals::createInterval(unsigned reg) {
713  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
714  return new LiveInterval(reg, Weight);
715}
716
717/// dupInterval - Duplicate a live interval. The caller is responsible for
718/// managing the allocated memory.
719LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
720  LiveInterval *NewLI = createInterval(li->reg);
721  NewLI->Copy(*li, mri_, getVNInfoAllocator());
722  return NewLI;
723}
724
725/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
726/// copy field and returns the source register that defines it.
727unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
728  if (!VNI->getCopy())
729    return 0;
730
731  if (VNI->getCopy()->isExtractSubreg()) {
732    // If it's extracting out of a physical register, return the sub-register.
733    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
734    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
735      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
736      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
737      if (SrcSubReg == DstSubReg)
738        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
739        // reg1034 can still be coalesced to EDX.
740        return Reg;
741      assert(DstSubReg == 0);
742      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
743    }
744    return Reg;
745  } else if (VNI->getCopy()->isInsertSubreg() ||
746             VNI->getCopy()->isSubregToReg())
747    return VNI->getCopy()->getOperand(2).getReg();
748
749  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
750  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
751    return SrcReg;
752  llvm_unreachable("Unrecognized copy instruction!");
753  return 0;
754}
755
756//===----------------------------------------------------------------------===//
757// Register allocator hooks.
758//
759
760/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
761/// allow one) virtual register operand, then its uses are implicitly using
762/// the register. Returns the virtual register.
763unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
764                                            MachineInstr *MI) const {
765  unsigned RegOp = 0;
766  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
767    MachineOperand &MO = MI->getOperand(i);
768    if (!MO.isReg() || !MO.isUse())
769      continue;
770    unsigned Reg = MO.getReg();
771    if (Reg == 0 || Reg == li.reg)
772      continue;
773
774    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
775        !allocatableRegs_[Reg])
776      continue;
777    // FIXME: For now, only remat MI with at most one register operand.
778    assert(!RegOp &&
779           "Can't rematerialize instruction with multiple register operand!");
780    RegOp = MO.getReg();
781#ifndef NDEBUG
782    break;
783#endif
784  }
785  return RegOp;
786}
787
788/// isValNoAvailableAt - Return true if the val# of the specified interval
789/// which reaches the given instruction also reaches the specified use index.
790bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
791                                       SlotIndex UseIdx) const {
792  SlotIndex Index = getInstructionIndex(MI);
793  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
794  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
795  return UI != li.end() && UI->valno == ValNo;
796}
797
798/// isReMaterializable - Returns true if the definition MI of the specified
799/// val# of the specified interval is re-materializable.
800bool LiveIntervals::isReMaterializable(const LiveInterval &li,
801                                       const VNInfo *ValNo, MachineInstr *MI,
802                                       SmallVectorImpl<LiveInterval*> &SpillIs,
803                                       bool &isLoad) {
804  if (DisableReMat)
805    return false;
806
807  if (!tii_->isTriviallyReMaterializable(MI, aa_))
808    return false;
809
810  // Target-specific code can mark an instruction as being rematerializable
811  // if it has one virtual reg use, though it had better be something like
812  // a PIC base register which is likely to be live everywhere.
813  unsigned ImpUse = getReMatImplicitUse(li, MI);
814  if (ImpUse) {
815    const LiveInterval &ImpLi = getInterval(ImpUse);
816    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
817           re = mri_->use_end(); ri != re; ++ri) {
818      MachineInstr *UseMI = &*ri;
819      SlotIndex UseIdx = getInstructionIndex(UseMI);
820      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
821        continue;
822      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
823        return false;
824    }
825
826    // If a register operand of the re-materialized instruction is going to
827    // be spilled next, then it's not legal to re-materialize this instruction.
828    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
829      if (ImpUse == SpillIs[i]->reg)
830        return false;
831  }
832  return true;
833}
834
835/// isReMaterializable - Returns true if the definition MI of the specified
836/// val# of the specified interval is re-materializable.
837bool LiveIntervals::isReMaterializable(const LiveInterval &li,
838                                       const VNInfo *ValNo, MachineInstr *MI) {
839  SmallVector<LiveInterval*, 4> Dummy1;
840  bool Dummy2;
841  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
842}
843
844/// isReMaterializable - Returns true if every definition of MI of every
845/// val# of the specified interval is re-materializable.
846bool LiveIntervals::isReMaterializable(const LiveInterval &li,
847                                       SmallVectorImpl<LiveInterval*> &SpillIs,
848                                       bool &isLoad) {
849  isLoad = false;
850  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
851       i != e; ++i) {
852    const VNInfo *VNI = *i;
853    if (VNI->isUnused())
854      continue; // Dead val#.
855    // Is the def for the val# rematerializable?
856    if (!VNI->isDefAccurate())
857      return false;
858    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
859    bool DefIsLoad = false;
860    if (!ReMatDefMI ||
861        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
862      return false;
863    isLoad |= DefIsLoad;
864  }
865  return true;
866}
867
868/// FilterFoldedOps - Filter out two-address use operands. Return
869/// true if it finds any issue with the operands that ought to prevent
870/// folding.
871static bool FilterFoldedOps(MachineInstr *MI,
872                            SmallVector<unsigned, 2> &Ops,
873                            unsigned &MRInfo,
874                            SmallVector<unsigned, 2> &FoldOps) {
875  MRInfo = 0;
876  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
877    unsigned OpIdx = Ops[i];
878    MachineOperand &MO = MI->getOperand(OpIdx);
879    // FIXME: fold subreg use.
880    if (MO.getSubReg())
881      return true;
882    if (MO.isDef())
883      MRInfo |= (unsigned)VirtRegMap::isMod;
884    else {
885      // Filter out two-address use operand(s).
886      if (MI->isRegTiedToDefOperand(OpIdx)) {
887        MRInfo = VirtRegMap::isModRef;
888        continue;
889      }
890      MRInfo |= (unsigned)VirtRegMap::isRef;
891    }
892    FoldOps.push_back(OpIdx);
893  }
894  return false;
895}
896
897
898/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
899/// slot / to reg or any rematerialized load into ith operand of specified
900/// MI. If it is successul, MI is updated with the newly created MI and
901/// returns true.
902bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
903                                         VirtRegMap &vrm, MachineInstr *DefMI,
904                                         SlotIndex InstrIdx,
905                                         SmallVector<unsigned, 2> &Ops,
906                                         bool isSS, int Slot, unsigned Reg) {
907  // If it is an implicit def instruction, just delete it.
908  if (MI->isImplicitDef()) {
909    RemoveMachineInstrFromMaps(MI);
910    vrm.RemoveMachineInstrFromMaps(MI);
911    MI->eraseFromParent();
912    ++numFolds;
913    return true;
914  }
915
916  // Filter the list of operand indexes that are to be folded. Abort if
917  // any operand will prevent folding.
918  unsigned MRInfo = 0;
919  SmallVector<unsigned, 2> FoldOps;
920  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
921    return false;
922
923  // The only time it's safe to fold into a two address instruction is when
924  // it's folding reload and spill from / into a spill stack slot.
925  if (DefMI && (MRInfo & VirtRegMap::isMod))
926    return false;
927
928  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
929                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
930  if (fmi) {
931    // Remember this instruction uses the spill slot.
932    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
933
934    // Attempt to fold the memory reference into the instruction. If
935    // we can do this, we don't need to insert spill code.
936    MachineBasicBlock &MBB = *MI->getParent();
937    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
938      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
939    vrm.transferSpillPts(MI, fmi);
940    vrm.transferRestorePts(MI, fmi);
941    vrm.transferEmergencySpills(MI, fmi);
942    ReplaceMachineInstrInMaps(MI, fmi);
943    MI = MBB.insert(MBB.erase(MI), fmi);
944    ++numFolds;
945    return true;
946  }
947  return false;
948}
949
950/// canFoldMemoryOperand - Returns true if the specified load / store
951/// folding is possible.
952bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
953                                         SmallVector<unsigned, 2> &Ops,
954                                         bool ReMat) const {
955  // Filter the list of operand indexes that are to be folded. Abort if
956  // any operand will prevent folding.
957  unsigned MRInfo = 0;
958  SmallVector<unsigned, 2> FoldOps;
959  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
960    return false;
961
962  // It's only legal to remat for a use, not a def.
963  if (ReMat && (MRInfo & VirtRegMap::isMod))
964    return false;
965
966  return tii_->canFoldMemoryOperand(MI, FoldOps);
967}
968
969bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
970  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
971
972  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
973
974  if (mbb == 0)
975    return false;
976
977  for (++itr; itr != li.ranges.end(); ++itr) {
978    MachineBasicBlock *mbb2 =
979      indexes_->getMBBCoveringRange(itr->start, itr->end);
980
981    if (mbb2 != mbb)
982      return false;
983  }
984
985  return true;
986}
987
988/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
989/// interval on to-be re-materialized operands of MI) with new register.
990void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
991                                       MachineInstr *MI, unsigned NewVReg,
992                                       VirtRegMap &vrm) {
993  // There is an implicit use. That means one of the other operand is
994  // being remat'ed and the remat'ed instruction has li.reg as an
995  // use operand. Make sure we rewrite that as well.
996  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
997    MachineOperand &MO = MI->getOperand(i);
998    if (!MO.isReg())
999      continue;
1000    unsigned Reg = MO.getReg();
1001    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1002      continue;
1003    if (!vrm.isReMaterialized(Reg))
1004      continue;
1005    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1006    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1007    if (UseMO)
1008      UseMO->setReg(NewVReg);
1009  }
1010}
1011
1012/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1013/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1014bool LiveIntervals::
1015rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1016                 bool TrySplit, SlotIndex index, SlotIndex end,
1017                 MachineInstr *MI,
1018                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1019                 unsigned Slot, int LdSlot,
1020                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1021                 VirtRegMap &vrm,
1022                 const TargetRegisterClass* rc,
1023                 SmallVector<int, 4> &ReMatIds,
1024                 const MachineLoopInfo *loopInfo,
1025                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1026                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1027                 std::vector<LiveInterval*> &NewLIs) {
1028  bool CanFold = false;
1029 RestartInstruction:
1030  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1031    MachineOperand& mop = MI->getOperand(i);
1032    if (!mop.isReg())
1033      continue;
1034    unsigned Reg = mop.getReg();
1035    unsigned RegI = Reg;
1036    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1037      continue;
1038    if (Reg != li.reg)
1039      continue;
1040
1041    bool TryFold = !DefIsReMat;
1042    bool FoldSS = true; // Default behavior unless it's a remat.
1043    int FoldSlot = Slot;
1044    if (DefIsReMat) {
1045      // If this is the rematerializable definition MI itself and
1046      // all of its uses are rematerialized, simply delete it.
1047      if (MI == ReMatOrigDefMI && CanDelete) {
1048        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1049                     << MI << '\n');
1050        RemoveMachineInstrFromMaps(MI);
1051        vrm.RemoveMachineInstrFromMaps(MI);
1052        MI->eraseFromParent();
1053        break;
1054      }
1055
1056      // If def for this use can't be rematerialized, then try folding.
1057      // If def is rematerializable and it's a load, also try folding.
1058      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1059      if (isLoad) {
1060        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1061        FoldSS = isLoadSS;
1062        FoldSlot = LdSlot;
1063      }
1064    }
1065
1066    // Scan all of the operands of this instruction rewriting operands
1067    // to use NewVReg instead of li.reg as appropriate.  We do this for
1068    // two reasons:
1069    //
1070    //   1. If the instr reads the same spilled vreg multiple times, we
1071    //      want to reuse the NewVReg.
1072    //   2. If the instr is a two-addr instruction, we are required to
1073    //      keep the src/dst regs pinned.
1074    //
1075    // Keep track of whether we replace a use and/or def so that we can
1076    // create the spill interval with the appropriate range.
1077
1078    HasUse = mop.isUse();
1079    HasDef = mop.isDef();
1080    SmallVector<unsigned, 2> Ops;
1081    Ops.push_back(i);
1082    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1083      const MachineOperand &MOj = MI->getOperand(j);
1084      if (!MOj.isReg())
1085        continue;
1086      unsigned RegJ = MOj.getReg();
1087      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1088        continue;
1089      if (RegJ == RegI) {
1090        Ops.push_back(j);
1091        if (!MOj.isUndef()) {
1092          HasUse |= MOj.isUse();
1093          HasDef |= MOj.isDef();
1094        }
1095      }
1096    }
1097
1098    // Create a new virtual register for the spill interval.
1099    // Create the new register now so we can map the fold instruction
1100    // to the new register so when it is unfolded we get the correct
1101    // answer.
1102    bool CreatedNewVReg = false;
1103    if (NewVReg == 0) {
1104      NewVReg = mri_->createVirtualRegister(rc);
1105      vrm.grow();
1106      CreatedNewVReg = true;
1107
1108      // The new virtual register should get the same allocation hints as the
1109      // old one.
1110      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1111      if (Hint.first || Hint.second)
1112        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1113    }
1114
1115    if (!TryFold)
1116      CanFold = false;
1117    else {
1118      // Do not fold load / store here if we are splitting. We'll find an
1119      // optimal point to insert a load / store later.
1120      if (!TrySplit) {
1121        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1122                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1123          // Folding the load/store can completely change the instruction in
1124          // unpredictable ways, rescan it from the beginning.
1125
1126          if (FoldSS) {
1127            // We need to give the new vreg the same stack slot as the
1128            // spilled interval.
1129            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1130          }
1131
1132          HasUse = false;
1133          HasDef = false;
1134          CanFold = false;
1135          if (isNotInMIMap(MI))
1136            break;
1137          goto RestartInstruction;
1138        }
1139      } else {
1140        // We'll try to fold it later if it's profitable.
1141        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1142      }
1143    }
1144
1145    mop.setReg(NewVReg);
1146    if (mop.isImplicit())
1147      rewriteImplicitOps(li, MI, NewVReg, vrm);
1148
1149    // Reuse NewVReg for other reads.
1150    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1151      MachineOperand &mopj = MI->getOperand(Ops[j]);
1152      mopj.setReg(NewVReg);
1153      if (mopj.isImplicit())
1154        rewriteImplicitOps(li, MI, NewVReg, vrm);
1155    }
1156
1157    if (CreatedNewVReg) {
1158      if (DefIsReMat) {
1159        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1160        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1161          // Each valnum may have its own remat id.
1162          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1163        } else {
1164          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1165        }
1166        if (!CanDelete || (HasUse && HasDef)) {
1167          // If this is a two-addr instruction then its use operands are
1168          // rematerializable but its def is not. It should be assigned a
1169          // stack slot.
1170          vrm.assignVirt2StackSlot(NewVReg, Slot);
1171        }
1172      } else {
1173        vrm.assignVirt2StackSlot(NewVReg, Slot);
1174      }
1175    } else if (HasUse && HasDef &&
1176               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1177      // If this interval hasn't been assigned a stack slot (because earlier
1178      // def is a deleted remat def), do it now.
1179      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1180      vrm.assignVirt2StackSlot(NewVReg, Slot);
1181    }
1182
1183    // Re-matting an instruction with virtual register use. Add the
1184    // register as an implicit use on the use MI.
1185    if (DefIsReMat && ImpUse)
1186      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1187
1188    // Create a new register interval for this spill / remat.
1189    LiveInterval &nI = getOrCreateInterval(NewVReg);
1190    if (CreatedNewVReg) {
1191      NewLIs.push_back(&nI);
1192      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1193      if (TrySplit)
1194        vrm.setIsSplitFromReg(NewVReg, li.reg);
1195    }
1196
1197    if (HasUse) {
1198      if (CreatedNewVReg) {
1199        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1200                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1201        DEBUG(dbgs() << " +" << LR);
1202        nI.addRange(LR);
1203      } else {
1204        // Extend the split live interval to this def / use.
1205        SlotIndex End = index.getDefIndex();
1206        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1207                     nI.getValNumInfo(nI.getNumValNums()-1));
1208        DEBUG(dbgs() << " +" << LR);
1209        nI.addRange(LR);
1210      }
1211    }
1212    if (HasDef) {
1213      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1214                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1215      DEBUG(dbgs() << " +" << LR);
1216      nI.addRange(LR);
1217    }
1218
1219    DEBUG({
1220        dbgs() << "\t\t\t\tAdded new interval: ";
1221        nI.print(dbgs(), tri_);
1222        dbgs() << '\n';
1223      });
1224  }
1225  return CanFold;
1226}
1227bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1228                                   const VNInfo *VNI,
1229                                   MachineBasicBlock *MBB,
1230                                   SlotIndex Idx) const {
1231  SlotIndex End = getMBBEndIdx(MBB);
1232  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1233    if (VNI->kills[j].isPHI())
1234      continue;
1235
1236    SlotIndex KillIdx = VNI->kills[j];
1237    if (KillIdx > Idx && KillIdx <= End)
1238      return true;
1239  }
1240  return false;
1241}
1242
1243/// RewriteInfo - Keep track of machine instrs that will be rewritten
1244/// during spilling.
1245namespace {
1246  struct RewriteInfo {
1247    SlotIndex Index;
1248    MachineInstr *MI;
1249    bool HasUse;
1250    bool HasDef;
1251    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1252      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1253  };
1254
1255  struct RewriteInfoCompare {
1256    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1257      return LHS.Index < RHS.Index;
1258    }
1259  };
1260}
1261
1262void LiveIntervals::
1263rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1264                    LiveInterval::Ranges::const_iterator &I,
1265                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1266                    unsigned Slot, int LdSlot,
1267                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1268                    VirtRegMap &vrm,
1269                    const TargetRegisterClass* rc,
1270                    SmallVector<int, 4> &ReMatIds,
1271                    const MachineLoopInfo *loopInfo,
1272                    BitVector &SpillMBBs,
1273                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1274                    BitVector &RestoreMBBs,
1275                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1276                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1277                    std::vector<LiveInterval*> &NewLIs) {
1278  bool AllCanFold = true;
1279  unsigned NewVReg = 0;
1280  SlotIndex start = I->start.getBaseIndex();
1281  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1282
1283  // First collect all the def / use in this live range that will be rewritten.
1284  // Make sure they are sorted according to instruction index.
1285  std::vector<RewriteInfo> RewriteMIs;
1286  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1287         re = mri_->reg_end(); ri != re; ) {
1288    MachineInstr *MI = &*ri;
1289    MachineOperand &O = ri.getOperand();
1290    ++ri;
1291    if (MI->isDebugValue()) {
1292      // Remove debug info for now.
1293      O.setReg(0U);
1294      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1295      continue;
1296    }
1297    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1298    SlotIndex index = getInstructionIndex(MI);
1299    if (index < start || index >= end)
1300      continue;
1301
1302    if (O.isUndef())
1303      // Must be defined by an implicit def. It should not be spilled. Note,
1304      // this is for correctness reason. e.g.
1305      // 8   %reg1024<def> = IMPLICIT_DEF
1306      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1307      // The live range [12, 14) are not part of the r1024 live interval since
1308      // it's defined by an implicit def. It will not conflicts with live
1309      // interval of r1025. Now suppose both registers are spilled, you can
1310      // easily see a situation where both registers are reloaded before
1311      // the INSERT_SUBREG and both target registers that would overlap.
1312      continue;
1313    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1314  }
1315  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1316
1317  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1318  // Now rewrite the defs and uses.
1319  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1320    RewriteInfo &rwi = RewriteMIs[i];
1321    ++i;
1322    SlotIndex index = rwi.Index;
1323    bool MIHasUse = rwi.HasUse;
1324    bool MIHasDef = rwi.HasDef;
1325    MachineInstr *MI = rwi.MI;
1326    // If MI def and/or use the same register multiple times, then there
1327    // are multiple entries.
1328    unsigned NumUses = MIHasUse;
1329    while (i != e && RewriteMIs[i].MI == MI) {
1330      assert(RewriteMIs[i].Index == index);
1331      bool isUse = RewriteMIs[i].HasUse;
1332      if (isUse) ++NumUses;
1333      MIHasUse |= isUse;
1334      MIHasDef |= RewriteMIs[i].HasDef;
1335      ++i;
1336    }
1337    MachineBasicBlock *MBB = MI->getParent();
1338
1339    if (ImpUse && MI != ReMatDefMI) {
1340      // Re-matting an instruction with virtual register use. Prevent interval
1341      // from being spilled.
1342      getInterval(ImpUse).markNotSpillable();
1343    }
1344
1345    unsigned MBBId = MBB->getNumber();
1346    unsigned ThisVReg = 0;
1347    if (TrySplit) {
1348      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1349      if (NVI != MBBVRegsMap.end()) {
1350        ThisVReg = NVI->second;
1351        // One common case:
1352        // x = use
1353        // ...
1354        // ...
1355        // def = ...
1356        //     = use
1357        // It's better to start a new interval to avoid artifically
1358        // extend the new interval.
1359        if (MIHasDef && !MIHasUse) {
1360          MBBVRegsMap.erase(MBB->getNumber());
1361          ThisVReg = 0;
1362        }
1363      }
1364    }
1365
1366    bool IsNew = ThisVReg == 0;
1367    if (IsNew) {
1368      // This ends the previous live interval. If all of its def / use
1369      // can be folded, give it a low spill weight.
1370      if (NewVReg && TrySplit && AllCanFold) {
1371        LiveInterval &nI = getOrCreateInterval(NewVReg);
1372        nI.weight /= 10.0F;
1373      }
1374      AllCanFold = true;
1375    }
1376    NewVReg = ThisVReg;
1377
1378    bool HasDef = false;
1379    bool HasUse = false;
1380    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1381                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1382                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1383                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1384                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1385    if (!HasDef && !HasUse)
1386      continue;
1387
1388    AllCanFold &= CanFold;
1389
1390    // Update weight of spill interval.
1391    LiveInterval &nI = getOrCreateInterval(NewVReg);
1392    if (!TrySplit) {
1393      // The spill weight is now infinity as it cannot be spilled again.
1394      nI.markNotSpillable();
1395      continue;
1396    }
1397
1398    // Keep track of the last def and first use in each MBB.
1399    if (HasDef) {
1400      if (MI != ReMatOrigDefMI || !CanDelete) {
1401        bool HasKill = false;
1402        if (!HasUse)
1403          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1404        else {
1405          // If this is a two-address code, then this index starts a new VNInfo.
1406          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1407          if (VNI)
1408            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1409        }
1410        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1411          SpillIdxes.find(MBBId);
1412        if (!HasKill) {
1413          if (SII == SpillIdxes.end()) {
1414            std::vector<SRInfo> S;
1415            S.push_back(SRInfo(index, NewVReg, true));
1416            SpillIdxes.insert(std::make_pair(MBBId, S));
1417          } else if (SII->second.back().vreg != NewVReg) {
1418            SII->second.push_back(SRInfo(index, NewVReg, true));
1419          } else if (index > SII->second.back().index) {
1420            // If there is an earlier def and this is a two-address
1421            // instruction, then it's not possible to fold the store (which
1422            // would also fold the load).
1423            SRInfo &Info = SII->second.back();
1424            Info.index = index;
1425            Info.canFold = !HasUse;
1426          }
1427          SpillMBBs.set(MBBId);
1428        } else if (SII != SpillIdxes.end() &&
1429                   SII->second.back().vreg == NewVReg &&
1430                   index > SII->second.back().index) {
1431          // There is an earlier def that's not killed (must be two-address).
1432          // The spill is no longer needed.
1433          SII->second.pop_back();
1434          if (SII->second.empty()) {
1435            SpillIdxes.erase(MBBId);
1436            SpillMBBs.reset(MBBId);
1437          }
1438        }
1439      }
1440    }
1441
1442    if (HasUse) {
1443      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1444        SpillIdxes.find(MBBId);
1445      if (SII != SpillIdxes.end() &&
1446          SII->second.back().vreg == NewVReg &&
1447          index > SII->second.back().index)
1448        // Use(s) following the last def, it's not safe to fold the spill.
1449        SII->second.back().canFold = false;
1450      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1451        RestoreIdxes.find(MBBId);
1452      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1453        // If we are splitting live intervals, only fold if it's the first
1454        // use and there isn't another use later in the MBB.
1455        RII->second.back().canFold = false;
1456      else if (IsNew) {
1457        // Only need a reload if there isn't an earlier def / use.
1458        if (RII == RestoreIdxes.end()) {
1459          std::vector<SRInfo> Infos;
1460          Infos.push_back(SRInfo(index, NewVReg, true));
1461          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1462        } else {
1463          RII->second.push_back(SRInfo(index, NewVReg, true));
1464        }
1465        RestoreMBBs.set(MBBId);
1466      }
1467    }
1468
1469    // Update spill weight.
1470    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1471    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1472  }
1473
1474  if (NewVReg && TrySplit && AllCanFold) {
1475    // If all of its def / use can be folded, give it a low spill weight.
1476    LiveInterval &nI = getOrCreateInterval(NewVReg);
1477    nI.weight /= 10.0F;
1478  }
1479}
1480
1481bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1482                        unsigned vr, BitVector &RestoreMBBs,
1483                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1484  if (!RestoreMBBs[Id])
1485    return false;
1486  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1487  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1488    if (Restores[i].index == index &&
1489        Restores[i].vreg == vr &&
1490        Restores[i].canFold)
1491      return true;
1492  return false;
1493}
1494
1495void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1496                        unsigned vr, BitVector &RestoreMBBs,
1497                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1498  if (!RestoreMBBs[Id])
1499    return;
1500  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1501  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1502    if (Restores[i].index == index && Restores[i].vreg)
1503      Restores[i].index = SlotIndex();
1504}
1505
1506/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1507/// spilled and create empty intervals for their uses.
1508void
1509LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1510                                    const TargetRegisterClass* rc,
1511                                    std::vector<LiveInterval*> &NewLIs) {
1512  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1513         re = mri_->reg_end(); ri != re; ) {
1514    MachineOperand &O = ri.getOperand();
1515    MachineInstr *MI = &*ri;
1516    ++ri;
1517    if (O.isDef()) {
1518      assert(MI->isImplicitDef() &&
1519             "Register def was not rewritten?");
1520      RemoveMachineInstrFromMaps(MI);
1521      vrm.RemoveMachineInstrFromMaps(MI);
1522      MI->eraseFromParent();
1523    } else {
1524      // This must be an use of an implicit_def so it's not part of the live
1525      // interval. Create a new empty live interval for it.
1526      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1527      unsigned NewVReg = mri_->createVirtualRegister(rc);
1528      vrm.grow();
1529      vrm.setIsImplicitlyDefined(NewVReg);
1530      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1531      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1532        MachineOperand &MO = MI->getOperand(i);
1533        if (MO.isReg() && MO.getReg() == li.reg) {
1534          MO.setReg(NewVReg);
1535          MO.setIsUndef();
1536        }
1537      }
1538    }
1539  }
1540}
1541
1542float
1543LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1544  // Limit the loop depth ridiculousness.
1545  if (loopDepth > 200)
1546    loopDepth = 200;
1547
1548  // The loop depth is used to roughly estimate the number of times the
1549  // instruction is executed. Something like 10^d is simple, but will quickly
1550  // overflow a float. This expression behaves like 10^d for small d, but is
1551  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1552  // headroom before overflow.
1553  float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1554
1555  return (isDef + isUse) * lc;
1556}
1557
1558void
1559LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1560  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1561    normalizeSpillWeight(*NewLIs[i]);
1562}
1563
1564std::vector<LiveInterval*> LiveIntervals::
1565addIntervalsForSpillsFast(const LiveInterval &li,
1566                          const MachineLoopInfo *loopInfo,
1567                          VirtRegMap &vrm) {
1568  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1569
1570  std::vector<LiveInterval*> added;
1571
1572  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1573
1574  DEBUG({
1575      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1576      li.dump();
1577      dbgs() << '\n';
1578    });
1579
1580  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1581
1582  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1583  while (RI != mri_->reg_end()) {
1584    MachineInstr* MI = &*RI;
1585
1586    SmallVector<unsigned, 2> Indices;
1587    bool HasUse = false;
1588    bool HasDef = false;
1589
1590    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1591      MachineOperand& mop = MI->getOperand(i);
1592      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1593
1594      HasUse |= MI->getOperand(i).isUse();
1595      HasDef |= MI->getOperand(i).isDef();
1596
1597      Indices.push_back(i);
1598    }
1599
1600    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1601                              Indices, true, slot, li.reg)) {
1602      unsigned NewVReg = mri_->createVirtualRegister(rc);
1603      vrm.grow();
1604      vrm.assignVirt2StackSlot(NewVReg, slot);
1605
1606      // create a new register for this spill
1607      LiveInterval &nI = getOrCreateInterval(NewVReg);
1608      nI.markNotSpillable();
1609
1610      // Rewrite register operands to use the new vreg.
1611      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1612           E = Indices.end(); I != E; ++I) {
1613        MI->getOperand(*I).setReg(NewVReg);
1614
1615        if (MI->getOperand(*I).isUse())
1616          MI->getOperand(*I).setIsKill(true);
1617      }
1618
1619      // Fill in  the new live interval.
1620      SlotIndex index = getInstructionIndex(MI);
1621      if (HasUse) {
1622        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1623                     nI.getNextValue(SlotIndex(), 0, false,
1624                                     getVNInfoAllocator()));
1625        DEBUG(dbgs() << " +" << LR);
1626        nI.addRange(LR);
1627        vrm.addRestorePoint(NewVReg, MI);
1628      }
1629      if (HasDef) {
1630        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1631                     nI.getNextValue(SlotIndex(), 0, false,
1632                                     getVNInfoAllocator()));
1633        DEBUG(dbgs() << " +" << LR);
1634        nI.addRange(LR);
1635        vrm.addSpillPoint(NewVReg, true, MI);
1636      }
1637
1638      added.push_back(&nI);
1639
1640      DEBUG({
1641          dbgs() << "\t\t\t\tadded new interval: ";
1642          nI.dump();
1643          dbgs() << '\n';
1644        });
1645    }
1646
1647
1648    RI = mri_->reg_begin(li.reg);
1649  }
1650
1651  return added;
1652}
1653
1654std::vector<LiveInterval*> LiveIntervals::
1655addIntervalsForSpills(const LiveInterval &li,
1656                      SmallVectorImpl<LiveInterval*> &SpillIs,
1657                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1658
1659  if (EnableFastSpilling)
1660    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1661
1662  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1663
1664  DEBUG({
1665      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1666      li.print(dbgs(), tri_);
1667      dbgs() << '\n';
1668    });
1669
1670  // Each bit specify whether a spill is required in the MBB.
1671  BitVector SpillMBBs(mf_->getNumBlockIDs());
1672  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1673  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1674  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1675  DenseMap<unsigned,unsigned> MBBVRegsMap;
1676  std::vector<LiveInterval*> NewLIs;
1677  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1678
1679  unsigned NumValNums = li.getNumValNums();
1680  SmallVector<MachineInstr*, 4> ReMatDefs;
1681  ReMatDefs.resize(NumValNums, NULL);
1682  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1683  ReMatOrigDefs.resize(NumValNums, NULL);
1684  SmallVector<int, 4> ReMatIds;
1685  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1686  BitVector ReMatDelete(NumValNums);
1687  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1688
1689  // Spilling a split live interval. It cannot be split any further. Also,
1690  // it's also guaranteed to be a single val# / range interval.
1691  if (vrm.getPreSplitReg(li.reg)) {
1692    vrm.setIsSplitFromReg(li.reg, 0);
1693    // Unset the split kill marker on the last use.
1694    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1695    if (KillIdx != SlotIndex()) {
1696      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1697      assert(KillMI && "Last use disappeared?");
1698      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1699      assert(KillOp != -1 && "Last use disappeared?");
1700      KillMI->getOperand(KillOp).setIsKill(false);
1701    }
1702    vrm.removeKillPoint(li.reg);
1703    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1704    Slot = vrm.getStackSlot(li.reg);
1705    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1706    MachineInstr *ReMatDefMI = DefIsReMat ?
1707      vrm.getReMaterializedMI(li.reg) : NULL;
1708    int LdSlot = 0;
1709    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1710    bool isLoad = isLoadSS ||
1711      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1712    bool IsFirstRange = true;
1713    for (LiveInterval::Ranges::const_iterator
1714           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1715      // If this is a split live interval with multiple ranges, it means there
1716      // are two-address instructions that re-defined the value. Only the
1717      // first def can be rematerialized!
1718      if (IsFirstRange) {
1719        // Note ReMatOrigDefMI has already been deleted.
1720        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1721                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1722                             false, vrm, rc, ReMatIds, loopInfo,
1723                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1724                             MBBVRegsMap, NewLIs);
1725      } else {
1726        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1727                             Slot, 0, false, false, false,
1728                             false, vrm, rc, ReMatIds, loopInfo,
1729                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1730                             MBBVRegsMap, NewLIs);
1731      }
1732      IsFirstRange = false;
1733    }
1734
1735    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1736    normalizeSpillWeights(NewLIs);
1737    return NewLIs;
1738  }
1739
1740  bool TrySplit = !intervalIsInOneMBB(li);
1741  if (TrySplit)
1742    ++numSplits;
1743  bool NeedStackSlot = false;
1744  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1745       i != e; ++i) {
1746    const VNInfo *VNI = *i;
1747    unsigned VN = VNI->id;
1748    if (VNI->isUnused())
1749      continue; // Dead val#.
1750    // Is the def for the val# rematerializable?
1751    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1752      ? getInstructionFromIndex(VNI->def) : 0;
1753    bool dummy;
1754    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1755      // Remember how to remat the def of this val#.
1756      ReMatOrigDefs[VN] = ReMatDefMI;
1757      // Original def may be modified so we have to make a copy here.
1758      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1759      CloneMIs.push_back(Clone);
1760      ReMatDefs[VN] = Clone;
1761
1762      bool CanDelete = true;
1763      if (VNI->hasPHIKill()) {
1764        // A kill is a phi node, not all of its uses can be rematerialized.
1765        // It must not be deleted.
1766        CanDelete = false;
1767        // Need a stack slot if there is any live range where uses cannot be
1768        // rematerialized.
1769        NeedStackSlot = true;
1770      }
1771      if (CanDelete)
1772        ReMatDelete.set(VN);
1773    } else {
1774      // Need a stack slot if there is any live range where uses cannot be
1775      // rematerialized.
1776      NeedStackSlot = true;
1777    }
1778  }
1779
1780  // One stack slot per live interval.
1781  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1782    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1783      Slot = vrm.assignVirt2StackSlot(li.reg);
1784
1785    // This case only occurs when the prealloc splitter has already assigned
1786    // a stack slot to this vreg.
1787    else
1788      Slot = vrm.getStackSlot(li.reg);
1789  }
1790
1791  // Create new intervals and rewrite defs and uses.
1792  for (LiveInterval::Ranges::const_iterator
1793         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1794    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1795    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1796    bool DefIsReMat = ReMatDefMI != NULL;
1797    bool CanDelete = ReMatDelete[I->valno->id];
1798    int LdSlot = 0;
1799    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1800    bool isLoad = isLoadSS ||
1801      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1802    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1803                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1804                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1805                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1806                               MBBVRegsMap, NewLIs);
1807  }
1808
1809  // Insert spills / restores if we are splitting.
1810  if (!TrySplit) {
1811    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1812    normalizeSpillWeights(NewLIs);
1813    return NewLIs;
1814  }
1815
1816  SmallPtrSet<LiveInterval*, 4> AddedKill;
1817  SmallVector<unsigned, 2> Ops;
1818  if (NeedStackSlot) {
1819    int Id = SpillMBBs.find_first();
1820    while (Id != -1) {
1821      std::vector<SRInfo> &spills = SpillIdxes[Id];
1822      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1823        SlotIndex index = spills[i].index;
1824        unsigned VReg = spills[i].vreg;
1825        LiveInterval &nI = getOrCreateInterval(VReg);
1826        bool isReMat = vrm.isReMaterialized(VReg);
1827        MachineInstr *MI = getInstructionFromIndex(index);
1828        bool CanFold = false;
1829        bool FoundUse = false;
1830        Ops.clear();
1831        if (spills[i].canFold) {
1832          CanFold = true;
1833          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1834            MachineOperand &MO = MI->getOperand(j);
1835            if (!MO.isReg() || MO.getReg() != VReg)
1836              continue;
1837
1838            Ops.push_back(j);
1839            if (MO.isDef())
1840              continue;
1841            if (isReMat ||
1842                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1843                                                RestoreMBBs, RestoreIdxes))) {
1844              // MI has two-address uses of the same register. If the use
1845              // isn't the first and only use in the BB, then we can't fold
1846              // it. FIXME: Move this to rewriteInstructionsForSpills.
1847              CanFold = false;
1848              break;
1849            }
1850            FoundUse = true;
1851          }
1852        }
1853        // Fold the store into the def if possible.
1854        bool Folded = false;
1855        if (CanFold && !Ops.empty()) {
1856          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1857            Folded = true;
1858            if (FoundUse) {
1859              // Also folded uses, do not issue a load.
1860              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1861              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1862            }
1863            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1864          }
1865        }
1866
1867        // Otherwise tell the spiller to issue a spill.
1868        if (!Folded) {
1869          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1870          bool isKill = LR->end == index.getStoreIndex();
1871          if (!MI->registerDefIsDead(nI.reg))
1872            // No need to spill a dead def.
1873            vrm.addSpillPoint(VReg, isKill, MI);
1874          if (isKill)
1875            AddedKill.insert(&nI);
1876        }
1877      }
1878      Id = SpillMBBs.find_next(Id);
1879    }
1880  }
1881
1882  int Id = RestoreMBBs.find_first();
1883  while (Id != -1) {
1884    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1885    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1886      SlotIndex index = restores[i].index;
1887      if (index == SlotIndex())
1888        continue;
1889      unsigned VReg = restores[i].vreg;
1890      LiveInterval &nI = getOrCreateInterval(VReg);
1891      bool isReMat = vrm.isReMaterialized(VReg);
1892      MachineInstr *MI = getInstructionFromIndex(index);
1893      bool CanFold = false;
1894      Ops.clear();
1895      if (restores[i].canFold) {
1896        CanFold = true;
1897        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1898          MachineOperand &MO = MI->getOperand(j);
1899          if (!MO.isReg() || MO.getReg() != VReg)
1900            continue;
1901
1902          if (MO.isDef()) {
1903            // If this restore were to be folded, it would have been folded
1904            // already.
1905            CanFold = false;
1906            break;
1907          }
1908          Ops.push_back(j);
1909        }
1910      }
1911
1912      // Fold the load into the use if possible.
1913      bool Folded = false;
1914      if (CanFold && !Ops.empty()) {
1915        if (!isReMat)
1916          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1917        else {
1918          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1919          int LdSlot = 0;
1920          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1921          // If the rematerializable def is a load, also try to fold it.
1922          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1923            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1924                                          Ops, isLoadSS, LdSlot, VReg);
1925          if (!Folded) {
1926            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1927            if (ImpUse) {
1928              // Re-matting an instruction with virtual register use. Add the
1929              // register as an implicit use on the use MI and mark the register
1930              // interval as unspillable.
1931              LiveInterval &ImpLi = getInterval(ImpUse);
1932              ImpLi.markNotSpillable();
1933              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1934            }
1935          }
1936        }
1937      }
1938      // If folding is not possible / failed, then tell the spiller to issue a
1939      // load / rematerialization for us.
1940      if (Folded)
1941        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1942      else
1943        vrm.addRestorePoint(VReg, MI);
1944    }
1945    Id = RestoreMBBs.find_next(Id);
1946  }
1947
1948  // Finalize intervals: add kills, finalize spill weights, and filter out
1949  // dead intervals.
1950  std::vector<LiveInterval*> RetNewLIs;
1951  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1952    LiveInterval *LI = NewLIs[i];
1953    if (!LI->empty()) {
1954      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1955      if (!AddedKill.count(LI)) {
1956        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1957        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1958        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1959        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1960        assert(UseIdx != -1);
1961        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1962          LastUse->getOperand(UseIdx).setIsKill();
1963          vrm.addKillPoint(LI->reg, LastUseIdx);
1964        }
1965      }
1966      RetNewLIs.push_back(LI);
1967    }
1968  }
1969
1970  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1971  normalizeSpillWeights(RetNewLIs);
1972  return RetNewLIs;
1973}
1974
1975/// hasAllocatableSuperReg - Return true if the specified physical register has
1976/// any super register that's allocatable.
1977bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1978  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1979    if (allocatableRegs_[*AS] && hasInterval(*AS))
1980      return true;
1981  return false;
1982}
1983
1984/// getRepresentativeReg - Find the largest super register of the specified
1985/// physical register.
1986unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1987  // Find the largest super-register that is allocatable.
1988  unsigned BestReg = Reg;
1989  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1990    unsigned SuperReg = *AS;
1991    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1992      BestReg = SuperReg;
1993      break;
1994    }
1995  }
1996  return BestReg;
1997}
1998
1999/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2000/// specified interval that conflicts with the specified physical register.
2001unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2002                                                   unsigned PhysReg) const {
2003  unsigned NumConflicts = 0;
2004  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2005  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2006         E = mri_->reg_end(); I != E; ++I) {
2007    MachineOperand &O = I.getOperand();
2008    MachineInstr *MI = O.getParent();
2009    SlotIndex Index = getInstructionIndex(MI);
2010    if (pli.liveAt(Index))
2011      ++NumConflicts;
2012  }
2013  return NumConflicts;
2014}
2015
2016/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2017/// around all defs and uses of the specified interval. Return true if it
2018/// was able to cut its interval.
2019bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2020                                            unsigned PhysReg, VirtRegMap &vrm) {
2021  unsigned SpillReg = getRepresentativeReg(PhysReg);
2022
2023  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2024    // If there are registers which alias PhysReg, but which are not a
2025    // sub-register of the chosen representative super register. Assert
2026    // since we can't handle it yet.
2027    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2028           tri_->isSuperRegister(*AS, SpillReg));
2029
2030  bool Cut = false;
2031  SmallVector<unsigned, 4> PRegs;
2032  if (hasInterval(SpillReg))
2033    PRegs.push_back(SpillReg);
2034  else {
2035    SmallSet<unsigned, 4> Added;
2036    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2037      if (Added.insert(*AS) && hasInterval(*AS)) {
2038        PRegs.push_back(*AS);
2039        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2040          Added.insert(*ASS);
2041      }
2042  }
2043
2044  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2045  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2046         E = mri_->reg_end(); I != E; ++I) {
2047    MachineOperand &O = I.getOperand();
2048    MachineInstr *MI = O.getParent();
2049    if (SeenMIs.count(MI))
2050      continue;
2051    SeenMIs.insert(MI);
2052    SlotIndex Index = getInstructionIndex(MI);
2053    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2054      unsigned PReg = PRegs[i];
2055      LiveInterval &pli = getInterval(PReg);
2056      if (!pli.liveAt(Index))
2057        continue;
2058      vrm.addEmergencySpill(PReg, MI);
2059      SlotIndex StartIdx = Index.getLoadIndex();
2060      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2061      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2062        pli.removeRange(StartIdx, EndIdx);
2063        Cut = true;
2064      } else {
2065        std::string msg;
2066        raw_string_ostream Msg(msg);
2067        Msg << "Ran out of registers during register allocation!";
2068        if (MI->isInlineAsm()) {
2069          Msg << "\nPlease check your inline asm statement for invalid "
2070              << "constraints:\n";
2071          MI->print(Msg, tm_);
2072        }
2073        llvm_report_error(Msg.str());
2074      }
2075      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2076        if (!hasInterval(*AS))
2077          continue;
2078        LiveInterval &spli = getInterval(*AS);
2079        if (spli.liveAt(Index))
2080          spli.removeRange(Index.getLoadIndex(),
2081                           Index.getNextIndex().getBaseIndex());
2082      }
2083    }
2084  }
2085  return Cut;
2086}
2087
2088LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2089                                                  MachineInstr* startInst) {
2090  LiveInterval& Interval = getOrCreateInterval(reg);
2091  VNInfo* VN = Interval.getNextValue(
2092    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2093    startInst, true, getVNInfoAllocator());
2094  VN->setHasPHIKill(true);
2095  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2096  LiveRange LR(
2097     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2098     getMBBEndIdx(startInst->getParent()), VN);
2099  Interval.addRange(LR);
2100
2101  return LR;
2102}
2103
2104