LiveIntervalAnalysis.cpp revision 3de23e6f6cf337451a0934159da494d645b93133
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
57
58static cl::opt<int> CoalescingLimit("early-coalescing-limit",
59                                    cl::init(-1), cl::Hidden);
60
61STATISTIC(numIntervals , "Number of original intervals");
62STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
63STATISTIC(numSplits    , "Number of intervals split");
64STATISTIC(numCoalescing, "Number of early coalescing performed");
65
66char LiveIntervals::ID = 0;
67static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
68
69void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70  AU.setPreservesCFG();
71  AU.addRequired<AliasAnalysis>();
72  AU.addPreserved<AliasAnalysis>();
73  AU.addPreserved<LiveVariables>();
74  AU.addRequired<LiveVariables>();
75  AU.addPreservedID(MachineLoopInfoID);
76  AU.addPreservedID(MachineDominatorsID);
77
78  if (!StrongPHIElim) {
79    AU.addPreservedID(PHIEliminationID);
80    AU.addRequiredID(PHIEliminationID);
81  }
82
83  AU.addRequiredID(TwoAddressInstructionPassID);
84  AU.addPreserved<ProcessImplicitDefs>();
85  AU.addRequired<ProcessImplicitDefs>();
86  AU.addPreserved<SlotIndexes>();
87  AU.addRequiredTransitive<SlotIndexes>();
88  MachineFunctionPass::getAnalysisUsage(AU);
89}
90
91void LiveIntervals::releaseMemory() {
92  // Free the live intervals themselves.
93  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
94       E = r2iMap_.end(); I != E; ++I)
95    delete I->second;
96
97  r2iMap_.clear();
98  phiJoinCopies.clear();
99
100  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
101  VNInfoAllocator.Reset();
102  while (!CloneMIs.empty()) {
103    MachineInstr *MI = CloneMIs.back();
104    CloneMIs.pop_back();
105    mf_->DeleteMachineInstr(MI);
106  }
107}
108
109/// runOnMachineFunction - Register allocate the whole function
110///
111bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
112  mf_ = &fn;
113  mri_ = &mf_->getRegInfo();
114  tm_ = &fn.getTarget();
115  tri_ = tm_->getRegisterInfo();
116  tii_ = tm_->getInstrInfo();
117  aa_ = &getAnalysis<AliasAnalysis>();
118  lv_ = &getAnalysis<LiveVariables>();
119  indexes_ = &getAnalysis<SlotIndexes>();
120  allocatableRegs_ = tri_->getAllocatableSet(fn);
121
122  computeIntervals();
123  performEarlyCoalescing();
124
125  numIntervals += getNumIntervals();
126
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133  OS << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second->print(OS, tri_);
136    OS << "\n";
137  }
138
139  printInstrs(OS);
140}
141
142void LiveIntervals::printInstrs(raw_ostream &OS) const {
143  OS << "********** MACHINEINSTRS **********\n";
144
145  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
146       mbbi != mbbe; ++mbbi) {
147    OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
148    for (MachineBasicBlock::iterator mii = mbbi->begin(),
149           mie = mbbi->end(); mii != mie; ++mii) {
150      OS << getInstructionIndex(mii) << '\t' << *mii;
151    }
152  }
153}
154
155void LiveIntervals::dumpInstrs() const {
156  printInstrs(errs());
157}
158
159/// conflictsWithPhysRegDef - Returns true if the specified register
160/// is defined during the duration of the specified interval.
161bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
162                                            VirtRegMap &vrm, unsigned reg) {
163  for (LiveInterval::Ranges::const_iterator
164         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
165    for (SlotIndex index = I->start.getBaseIndex(),
166           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
167           index != end;
168           index = index.getNextIndex()) {
169      // skip deleted instructions
170      while (index != end && !getInstructionFromIndex(index))
171        index = index.getNextIndex();
172      if (index == end) break;
173
174      MachineInstr *MI = getInstructionFromIndex(index);
175      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
176      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
177        if (SrcReg == li.reg || DstReg == li.reg)
178          continue;
179      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
180        MachineOperand& mop = MI->getOperand(i);
181        if (!mop.isReg())
182          continue;
183        unsigned PhysReg = mop.getReg();
184        if (PhysReg == 0 || PhysReg == li.reg)
185          continue;
186        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
187          if (!vrm.hasPhys(PhysReg))
188            continue;
189          PhysReg = vrm.getPhys(PhysReg);
190        }
191        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
192          return true;
193      }
194    }
195  }
196
197  return false;
198}
199
200/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
201/// it can check use as well.
202bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
203                                            unsigned Reg, bool CheckUse,
204                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
205  for (LiveInterval::Ranges::const_iterator
206         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
207    for (SlotIndex index = I->start.getBaseIndex(),
208           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
209           index != end;
210           index = index.getNextIndex()) {
211      // Skip deleted instructions.
212      MachineInstr *MI = 0;
213      while (index != end) {
214        MI = getInstructionFromIndex(index);
215        if (MI)
216          break;
217        index = index.getNextIndex();
218      }
219      if (index == end) break;
220
221      if (JoinedCopies.count(MI))
222        continue;
223      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
224        MachineOperand& MO = MI->getOperand(i);
225        if (!MO.isReg())
226          continue;
227        if (MO.isUse() && !CheckUse)
228          continue;
229        unsigned PhysReg = MO.getReg();
230        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
231          continue;
232        if (tri_->isSubRegister(Reg, PhysReg))
233          return true;
234      }
235    }
236  }
237
238  return false;
239}
240
241#ifndef NDEBUG
242static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
243  if (TargetRegisterInfo::isPhysicalRegister(reg))
244    errs() << tri_->getName(reg);
245  else
246    errs() << "%reg" << reg;
247}
248#endif
249
250void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
251                                             MachineBasicBlock::iterator mi,
252                                             SlotIndex MIIdx,
253                                             MachineOperand& MO,
254                                             unsigned MOIdx,
255                                             LiveInterval &interval) {
256  DEBUG({
257      errs() << "\t\tregister: ";
258      printRegName(interval.reg, tri_);
259    });
260
261  // Virtual registers may be defined multiple times (due to phi
262  // elimination and 2-addr elimination).  Much of what we do only has to be
263  // done once for the vreg.  We use an empty interval to detect the first
264  // time we see a vreg.
265  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
266  if (interval.empty()) {
267    // Get the Idx of the defining instructions.
268    SlotIndex defIndex = MIIdx.getDefIndex();
269    // Earlyclobbers move back one, so that they overlap the live range
270    // of inputs.
271    if (MO.isEarlyClobber())
272      defIndex = MIIdx.getUseIndex();
273    VNInfo *ValNo;
274    MachineInstr *CopyMI = NULL;
275    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
276    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
277        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
278        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
279        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
280      CopyMI = mi;
281    // Earlyclobbers move back one.
282    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
283
284    assert(ValNo->id == 0 && "First value in interval is not 0?");
285
286    // Loop over all of the blocks that the vreg is defined in.  There are
287    // two cases we have to handle here.  The most common case is a vreg
288    // whose lifetime is contained within a basic block.  In this case there
289    // will be a single kill, in MBB, which comes after the definition.
290    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
291      // FIXME: what about dead vars?
292      SlotIndex killIdx;
293      if (vi.Kills[0] != mi)
294        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
295      else
296        killIdx = defIndex.getStoreIndex();
297
298      // If the kill happens after the definition, we have an intra-block
299      // live range.
300      if (killIdx > defIndex) {
301        assert(vi.AliveBlocks.empty() &&
302               "Shouldn't be alive across any blocks!");
303        LiveRange LR(defIndex, killIdx, ValNo);
304        interval.addRange(LR);
305        DEBUG(errs() << " +" << LR << "\n");
306        ValNo->addKill(killIdx);
307        return;
308      }
309    }
310
311    // The other case we handle is when a virtual register lives to the end
312    // of the defining block, potentially live across some blocks, then is
313    // live into some number of blocks, but gets killed.  Start by adding a
314    // range that goes from this definition to the end of the defining block.
315    LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
316                    ValNo);
317    DEBUG(errs() << " +" << NewLR);
318    interval.addRange(NewLR);
319
320    // Iterate over all of the blocks that the variable is completely
321    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
322    // live interval.
323    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
324             E = vi.AliveBlocks.end(); I != E; ++I) {
325      LiveRange LR(
326          getMBBStartIdx(mf_->getBlockNumbered(*I)),
327          getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
328          ValNo);
329      interval.addRange(LR);
330      DEBUG(errs() << " +" << LR);
331    }
332
333    // Finally, this virtual register is live from the start of any killing
334    // block to the 'use' slot of the killing instruction.
335    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
336      MachineInstr *Kill = vi.Kills[i];
337      SlotIndex killIdx =
338        getInstructionIndex(Kill).getDefIndex();
339      LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
340      interval.addRange(LR);
341      ValNo->addKill(killIdx);
342      DEBUG(errs() << " +" << LR);
343    }
344
345  } else {
346    // If this is the second time we see a virtual register definition, it
347    // must be due to phi elimination or two addr elimination.  If this is
348    // the result of two address elimination, then the vreg is one of the
349    // def-and-use register operand.
350    if (mi->isRegTiedToUseOperand(MOIdx)) {
351      // If this is a two-address definition, then we have already processed
352      // the live range.  The only problem is that we didn't realize there
353      // are actually two values in the live interval.  Because of this we
354      // need to take the LiveRegion that defines this register and split it
355      // into two values.
356      assert(interval.containsOneValue());
357      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
358      SlotIndex RedefIndex = MIIdx.getDefIndex();
359      if (MO.isEarlyClobber())
360        RedefIndex = MIIdx.getUseIndex();
361
362      const LiveRange *OldLR =
363        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
364      VNInfo *OldValNo = OldLR->valno;
365
366      // Delete the initial value, which should be short and continuous,
367      // because the 2-addr copy must be in the same MBB as the redef.
368      interval.removeRange(DefIndex, RedefIndex);
369
370      // Two-address vregs should always only be redefined once.  This means
371      // that at this point, there should be exactly one value number in it.
372      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
373
374      // The new value number (#1) is defined by the instruction we claimed
375      // defined value #0.
376      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
377                                            false, // update at *
378                                            VNInfoAllocator);
379      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
380
381      // Value#0 is now defined by the 2-addr instruction.
382      OldValNo->def  = RedefIndex;
383      OldValNo->setCopy(0);
384      if (MO.isEarlyClobber())
385        OldValNo->setHasRedefByEC(true);
386
387      // Add the new live interval which replaces the range for the input copy.
388      LiveRange LR(DefIndex, RedefIndex, ValNo);
389      DEBUG(errs() << " replace range with " << LR);
390      interval.addRange(LR);
391      ValNo->addKill(RedefIndex);
392
393      // If this redefinition is dead, we need to add a dummy unit live
394      // range covering the def slot.
395      if (MO.isDead())
396        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
397                                    OldValNo));
398
399      DEBUG({
400          errs() << " RESULT: ";
401          interval.print(errs(), tri_);
402        });
403    } else {
404      // Otherwise, this must be because of phi elimination.  If this is the
405      // first redefinition of the vreg that we have seen, go back and change
406      // the live range in the PHI block to be a different value number.
407      if (interval.containsOneValue()) {
408        // Remove the old range that we now know has an incorrect number.
409        VNInfo *VNI = interval.getValNumInfo(0);
410        MachineInstr *Killer = vi.Kills[0];
411        phiJoinCopies.push_back(Killer);
412        SlotIndex Start = getMBBStartIdx(Killer->getParent());
413        SlotIndex End = getInstructionIndex(Killer).getDefIndex();
414        DEBUG({
415            errs() << " Removing [" << Start << "," << End << "] from: ";
416            interval.print(errs(), tri_);
417            errs() << "\n";
418          });
419        interval.removeRange(Start, End);
420        assert(interval.ranges.size() == 1 &&
421               "Newly discovered PHI interval has >1 ranges.");
422        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
423        VNI->addKill(indexes_->getTerminatorGap(killMBB));
424        VNI->setHasPHIKill(true);
425        DEBUG({
426            errs() << " RESULT: ";
427            interval.print(errs(), tri_);
428          });
429
430        // Replace the interval with one of a NEW value number.  Note that this
431        // value number isn't actually defined by an instruction, weird huh? :)
432        LiveRange LR(Start, End,
433                     interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
434                       0, false, VNInfoAllocator));
435        LR.valno->setIsPHIDef(true);
436        DEBUG(errs() << " replace range with " << LR);
437        interval.addRange(LR);
438        LR.valno->addKill(End);
439        DEBUG({
440            errs() << " RESULT: ";
441            interval.print(errs(), tri_);
442          });
443      }
444
445      // In the case of PHI elimination, each variable definition is only
446      // live until the end of the block.  We've already taken care of the
447      // rest of the live range.
448      SlotIndex defIndex = MIIdx.getDefIndex();
449      if (MO.isEarlyClobber())
450        defIndex = MIIdx.getUseIndex();
451
452      VNInfo *ValNo;
453      MachineInstr *CopyMI = NULL;
454      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
455      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
456          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
457          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
458          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
459        CopyMI = mi;
460      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
461
462      SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
463      LiveRange LR(defIndex, killIndex, ValNo);
464      interval.addRange(LR);
465      ValNo->addKill(indexes_->getTerminatorGap(mbb));
466      ValNo->setHasPHIKill(true);
467      DEBUG(errs() << " +" << LR);
468    }
469  }
470
471  DEBUG(errs() << '\n');
472}
473
474void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
475                                              MachineBasicBlock::iterator mi,
476                                              SlotIndex MIIdx,
477                                              MachineOperand& MO,
478                                              LiveInterval &interval,
479                                              MachineInstr *CopyMI) {
480  // A physical register cannot be live across basic block, so its
481  // lifetime must end somewhere in its defining basic block.
482  DEBUG({
483      errs() << "\t\tregister: ";
484      printRegName(interval.reg, tri_);
485    });
486
487  SlotIndex baseIndex = MIIdx;
488  SlotIndex start = baseIndex.getDefIndex();
489  // Earlyclobbers move back one.
490  if (MO.isEarlyClobber())
491    start = MIIdx.getUseIndex();
492  SlotIndex end = start;
493
494  // If it is not used after definition, it is considered dead at
495  // the instruction defining it. Hence its interval is:
496  // [defSlot(def), defSlot(def)+1)
497  // For earlyclobbers, the defSlot was pushed back one; the extra
498  // advance below compensates.
499  if (MO.isDead()) {
500    DEBUG(errs() << " dead");
501    end = start.getStoreIndex();
502    goto exit;
503  }
504
505  // If it is not dead on definition, it must be killed by a
506  // subsequent instruction. Hence its interval is:
507  // [defSlot(def), useSlot(kill)+1)
508  baseIndex = baseIndex.getNextIndex();
509  while (++mi != MBB->end()) {
510
511    if (getInstructionFromIndex(baseIndex) == 0)
512      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
513
514    if (mi->killsRegister(interval.reg, tri_)) {
515      DEBUG(errs() << " killed");
516      end = baseIndex.getDefIndex();
517      goto exit;
518    } else {
519      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
520      if (DefIdx != -1) {
521        if (mi->isRegTiedToUseOperand(DefIdx)) {
522          // Two-address instruction.
523          end = baseIndex.getDefIndex();
524          assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
525                 "Two address instruction is an early clobber?");
526        } else {
527          // Another instruction redefines the register before it is ever read.
528          // Then the register is essentially dead at the instruction that defines
529          // it. Hence its interval is:
530          // [defSlot(def), defSlot(def)+1)
531          DEBUG(errs() << " dead");
532          end = start.getStoreIndex();
533        }
534        goto exit;
535      }
536    }
537
538    baseIndex = baseIndex.getNextIndex();
539  }
540
541  // The only case we should have a dead physreg here without a killing or
542  // instruction where we know it's dead is if it is live-in to the function
543  // and never used. Another possible case is the implicit use of the
544  // physical register has been deleted by two-address pass.
545  end = start.getStoreIndex();
546
547exit:
548  assert(start < end && "did not find end of interval?");
549
550  // Already exists? Extend old live interval.
551  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
552  bool Extend = OldLR != interval.end();
553  VNInfo *ValNo = Extend
554    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
555  if (MO.isEarlyClobber() && Extend)
556    ValNo->setHasRedefByEC(true);
557  LiveRange LR(start, end, ValNo);
558  interval.addRange(LR);
559  LR.valno->addKill(end);
560  DEBUG(errs() << " +" << LR << '\n');
561}
562
563void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
564                                      MachineBasicBlock::iterator MI,
565                                      SlotIndex MIIdx,
566                                      MachineOperand& MO,
567                                      unsigned MOIdx) {
568  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
569    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
570                             getOrCreateInterval(MO.getReg()));
571  else if (allocatableRegs_[MO.getReg()]) {
572    MachineInstr *CopyMI = NULL;
573    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
574    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
575        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
576        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
577        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
578      CopyMI = MI;
579    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580                              getOrCreateInterval(MO.getReg()), CopyMI);
581    // Def of a register also defines its sub-registers.
582    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583      // If MI also modifies the sub-register explicitly, avoid processing it
584      // more than once. Do not pass in TRI here so it checks for exact match.
585      if (!MI->modifiesRegister(*AS))
586        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                                  getOrCreateInterval(*AS), 0);
588  }
589}
590
591void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
592                                         SlotIndex MIIdx,
593                                         LiveInterval &interval, bool isAlias) {
594  DEBUG({
595      errs() << "\t\tlivein register: ";
596      printRegName(interval.reg, tri_);
597    });
598
599  // Look for kills, if it reaches a def before it's killed, then it shouldn't
600  // be considered a livein.
601  MachineBasicBlock::iterator mi = MBB->begin();
602  SlotIndex baseIndex = MIIdx;
603  SlotIndex start = baseIndex;
604  if (getInstructionFromIndex(baseIndex) == 0)
605    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
606
607  SlotIndex end = baseIndex;
608  bool SeenDefUse = false;
609
610  while (mi != MBB->end()) {
611    if (mi->killsRegister(interval.reg, tri_)) {
612      DEBUG(errs() << " killed");
613      end = baseIndex.getDefIndex();
614      SeenDefUse = true;
615      break;
616    } else if (mi->modifiesRegister(interval.reg, tri_)) {
617      // Another instruction redefines the register before it is ever read.
618      // Then the register is essentially dead at the instruction that defines
619      // it. Hence its interval is:
620      // [defSlot(def), defSlot(def)+1)
621      DEBUG(errs() << " dead");
622      end = start.getStoreIndex();
623      SeenDefUse = true;
624      break;
625    }
626
627    ++mi;
628    if (mi != MBB->end()) {
629      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
630    }
631  }
632
633  // Live-in register might not be used at all.
634  if (!SeenDefUse) {
635    if (isAlias) {
636      DEBUG(errs() << " dead");
637      end = MIIdx.getStoreIndex();
638    } else {
639      DEBUG(errs() << " live through");
640      end = baseIndex;
641    }
642  }
643
644  VNInfo *vni =
645    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
646                          0, false, VNInfoAllocator);
647  vni->setIsPHIDef(true);
648  LiveRange LR(start, end, vni);
649
650  interval.addRange(LR);
651  LR.valno->addKill(end);
652  DEBUG(errs() << " +" << LR << '\n');
653}
654
655bool LiveIntervals::
656isSafeAndProfitableToCoalesce(LiveInterval &DstInt,
657                              LiveInterval &SrcInt,
658                              SmallVector<MachineInstr*,16> &IdentCopies,
659                              SmallVector<MachineInstr*,16> &OtherCopies) {
660  unsigned NumIdent = 0;
661  for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
662         re = mri_->def_end(); ri != re; ++ri) {
663    MachineInstr *MI = &*ri;
664    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
665    if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
666      return false;
667    if (SrcReg != DstInt.reg) {
668      // Non-identity copy - we cannot handle overlapping intervals
669      if (DstInt.liveAt(getInstructionIndex(MI)))
670        return false;
671      OtherCopies.push_back(MI);
672    } else {
673      IdentCopies.push_back(MI);
674      ++NumIdent;
675    }
676  }
677
678  return IdentCopies.size() > OtherCopies.size();
679}
680
681void LiveIntervals::performEarlyCoalescing() {
682  if (!EarlyCoalescing)
683    return;
684
685  /// Perform early coalescing: eliminate copies which feed into phi joins
686  /// and whose sources are defined by the phi joins.
687  for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
688    MachineInstr *Join = phiJoinCopies[i];
689    if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
690      break;
691
692    unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
693    bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
694#ifndef NDEBUG
695    assert(isMove && "PHI join instruction must be a move!");
696#else
697    isMove = isMove;
698#endif
699
700    LiveInterval &DstInt = getInterval(PHIDst);
701    LiveInterval &SrcInt = getInterval(PHISrc);
702    SmallVector<MachineInstr*, 16> IdentCopies;
703    SmallVector<MachineInstr*, 16> OtherCopies;
704    if (!isSafeAndProfitableToCoalesce(DstInt, SrcInt,
705                                       IdentCopies, OtherCopies))
706      continue;
707
708    DEBUG(errs() << "PHI Join: " << *Join);
709    assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
710    assert(std::distance(mri_->use_begin(PHISrc), mri_->use_end()) == 1 &&
711           "PHI join src should not be used elsewhere");
712    VNInfo *VNI = DstInt.getValNumInfo(0);
713
714    // Change the non-identity copies to directly target the phi destination.
715    for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
716      MachineInstr *PHICopy = OtherCopies[i];
717      SlotIndex MIIndex = getInstructionIndex(PHICopy);
718      DEBUG(errs() << "Moving: " << MIIndex << ' ' << *PHICopy);
719      SlotIndex DefIndex = MIIndex.getDefIndex();
720      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
721      SlotIndex StartIndex = SLR->start;
722      SlotIndex EndIndex = SLR->end;
723
724      // Delete val# defined by the now identity copy and add the range from
725      // beginning of the mbb to the end of the range.
726      SrcInt.removeValNo(SLR->valno);
727      DEBUG(errs() << "  added range [" << StartIndex << ','
728            << EndIndex << "] to reg" << DstInt.reg << '\n');
729      assert (!DstInt.liveAt(StartIndex) && "Cannot coalesce when dst live!");
730      VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
731                                           VNInfoAllocator);
732      NewVNI->setHasPHIKill(true);
733      DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
734      for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
735        MachineOperand &MO = PHICopy->getOperand(j);
736        if (!MO.isReg() || MO.getReg() != PHISrc)
737          continue;
738        MO.setReg(PHIDst);
739      }
740    }
741
742    // Now let's eliminate all the would-be identity copies.
743    for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
744      MachineInstr *PHICopy = IdentCopies[i];
745      DEBUG(errs() << "Coalescing: " << *PHICopy);
746
747      SlotIndex MIIndex = getInstructionIndex(PHICopy);
748      SlotIndex DefIndex = MIIndex.getDefIndex();
749      LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
750      SlotIndex StartIndex = SLR->start;
751      SlotIndex EndIndex = SLR->end;
752
753      // Delete val# defined by the now identity copy and add the range from
754      // beginning of the mbb to the end of the range.
755      SrcInt.removeValNo(SLR->valno);
756      RemoveMachineInstrFromMaps(PHICopy);
757      PHICopy->eraseFromParent();
758      DEBUG(errs() << "  added range [" << StartIndex << ','
759            << EndIndex << "] to reg" << DstInt.reg << '\n');
760      DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
761    }
762
763    // Remove the phi join and update the phi block liveness.
764    SlotIndex MIIndex = getInstructionIndex(Join);
765    SlotIndex UseIndex = MIIndex.getUseIndex();
766    SlotIndex DefIndex = MIIndex.getDefIndex();
767    LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
768    LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
769    DLR->valno->setCopy(0);
770    DLR->valno->setIsDefAccurate(false);
771    DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
772    SrcInt.removeRange(SLR->start, SLR->end);
773    assert(SrcInt.empty());
774    removeInterval(PHISrc);
775    RemoveMachineInstrFromMaps(Join);
776    Join->eraseFromParent();
777
778    ++numCoalescing;
779  }
780}
781
782/// computeIntervals - computes the live intervals for virtual
783/// registers. for some ordering of the machine instructions [1,N] a
784/// live interval is an interval [i, j) where 1 <= i <= j < N for
785/// which a variable is live
786void LiveIntervals::computeIntervals() {
787  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
788               << "********** Function: "
789               << ((Value*)mf_->getFunction())->getName() << '\n');
790
791  SmallVector<unsigned, 8> UndefUses;
792  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
793       MBBI != E; ++MBBI) {
794    MachineBasicBlock *MBB = MBBI;
795    // Track the index of the current machine instr.
796    SlotIndex MIIndex = getMBBStartIdx(MBB);
797    DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
798
799    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
800
801    // Create intervals for live-ins to this BB first.
802    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
803           LE = MBB->livein_end(); LI != LE; ++LI) {
804      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
805      // Multiple live-ins can alias the same register.
806      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
807        if (!hasInterval(*AS))
808          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
809                               true);
810    }
811
812    // Skip over empty initial indices.
813    if (getInstructionFromIndex(MIIndex) == 0)
814      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
815
816    for (; MI != miEnd; ++MI) {
817      DEBUG(errs() << MIIndex << "\t" << *MI);
818
819      // Handle defs.
820      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
821        MachineOperand &MO = MI->getOperand(i);
822        if (!MO.isReg() || !MO.getReg())
823          continue;
824
825        // handle register defs - build intervals
826        if (MO.isDef())
827          handleRegisterDef(MBB, MI, MIIndex, MO, i);
828        else if (MO.isUndef())
829          UndefUses.push_back(MO.getReg());
830      }
831
832      // Move to the next instr slot.
833      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
834    }
835  }
836
837  // Create empty intervals for registers defined by implicit_def's (except
838  // for those implicit_def that define values which are liveout of their
839  // blocks.
840  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
841    unsigned UndefReg = UndefUses[i];
842    (void)getOrCreateInterval(UndefReg);
843  }
844}
845
846LiveInterval* LiveIntervals::createInterval(unsigned reg) {
847  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
848  return new LiveInterval(reg, Weight);
849}
850
851/// dupInterval - Duplicate a live interval. The caller is responsible for
852/// managing the allocated memory.
853LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
854  LiveInterval *NewLI = createInterval(li->reg);
855  NewLI->Copy(*li, mri_, getVNInfoAllocator());
856  return NewLI;
857}
858
859/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
860/// copy field and returns the source register that defines it.
861unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
862  if (!VNI->getCopy())
863    return 0;
864
865  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
866    // If it's extracting out of a physical register, return the sub-register.
867    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
868    if (TargetRegisterInfo::isPhysicalRegister(Reg))
869      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
870    return Reg;
871  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
872             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
873    return VNI->getCopy()->getOperand(2).getReg();
874
875  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
876  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
877    return SrcReg;
878  llvm_unreachable("Unrecognized copy instruction!");
879  return 0;
880}
881
882//===----------------------------------------------------------------------===//
883// Register allocator hooks.
884//
885
886/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
887/// allow one) virtual register operand, then its uses are implicitly using
888/// the register. Returns the virtual register.
889unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
890                                            MachineInstr *MI) const {
891  unsigned RegOp = 0;
892  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
893    MachineOperand &MO = MI->getOperand(i);
894    if (!MO.isReg() || !MO.isUse())
895      continue;
896    unsigned Reg = MO.getReg();
897    if (Reg == 0 || Reg == li.reg)
898      continue;
899
900    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
901        !allocatableRegs_[Reg])
902      continue;
903    // FIXME: For now, only remat MI with at most one register operand.
904    assert(!RegOp &&
905           "Can't rematerialize instruction with multiple register operand!");
906    RegOp = MO.getReg();
907#ifndef NDEBUG
908    break;
909#endif
910  }
911  return RegOp;
912}
913
914/// isValNoAvailableAt - Return true if the val# of the specified interval
915/// which reaches the given instruction also reaches the specified use index.
916bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
917                                       SlotIndex UseIdx) const {
918  SlotIndex Index = getInstructionIndex(MI);
919  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
920  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
921  return UI != li.end() && UI->valno == ValNo;
922}
923
924/// isReMaterializable - Returns true if the definition MI of the specified
925/// val# of the specified interval is re-materializable.
926bool LiveIntervals::isReMaterializable(const LiveInterval &li,
927                                       const VNInfo *ValNo, MachineInstr *MI,
928                                       SmallVectorImpl<LiveInterval*> &SpillIs,
929                                       bool &isLoad) {
930  if (DisableReMat)
931    return false;
932
933  if (!tii_->isTriviallyReMaterializable(MI, aa_))
934    return false;
935
936  // Target-specific code can mark an instruction as being rematerializable
937  // if it has one virtual reg use, though it had better be something like
938  // a PIC base register which is likely to be live everywhere.
939  unsigned ImpUse = getReMatImplicitUse(li, MI);
940  if (ImpUse) {
941    const LiveInterval &ImpLi = getInterval(ImpUse);
942    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
943           re = mri_->use_end(); ri != re; ++ri) {
944      MachineInstr *UseMI = &*ri;
945      SlotIndex UseIdx = getInstructionIndex(UseMI);
946      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
947        continue;
948      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
949        return false;
950    }
951
952    // If a register operand of the re-materialized instruction is going to
953    // be spilled next, then it's not legal to re-materialize this instruction.
954    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
955      if (ImpUse == SpillIs[i]->reg)
956        return false;
957  }
958  return true;
959}
960
961/// isReMaterializable - Returns true if the definition MI of the specified
962/// val# of the specified interval is re-materializable.
963bool LiveIntervals::isReMaterializable(const LiveInterval &li,
964                                       const VNInfo *ValNo, MachineInstr *MI) {
965  SmallVector<LiveInterval*, 4> Dummy1;
966  bool Dummy2;
967  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
968}
969
970/// isReMaterializable - Returns true if every definition of MI of every
971/// val# of the specified interval is re-materializable.
972bool LiveIntervals::isReMaterializable(const LiveInterval &li,
973                                       SmallVectorImpl<LiveInterval*> &SpillIs,
974                                       bool &isLoad) {
975  isLoad = false;
976  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
977       i != e; ++i) {
978    const VNInfo *VNI = *i;
979    if (VNI->isUnused())
980      continue; // Dead val#.
981    // Is the def for the val# rematerializable?
982    if (!VNI->isDefAccurate())
983      return false;
984    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
985    bool DefIsLoad = false;
986    if (!ReMatDefMI ||
987        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
988      return false;
989    isLoad |= DefIsLoad;
990  }
991  return true;
992}
993
994/// FilterFoldedOps - Filter out two-address use operands. Return
995/// true if it finds any issue with the operands that ought to prevent
996/// folding.
997static bool FilterFoldedOps(MachineInstr *MI,
998                            SmallVector<unsigned, 2> &Ops,
999                            unsigned &MRInfo,
1000                            SmallVector<unsigned, 2> &FoldOps) {
1001  MRInfo = 0;
1002  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1003    unsigned OpIdx = Ops[i];
1004    MachineOperand &MO = MI->getOperand(OpIdx);
1005    // FIXME: fold subreg use.
1006    if (MO.getSubReg())
1007      return true;
1008    if (MO.isDef())
1009      MRInfo |= (unsigned)VirtRegMap::isMod;
1010    else {
1011      // Filter out two-address use operand(s).
1012      if (MI->isRegTiedToDefOperand(OpIdx)) {
1013        MRInfo = VirtRegMap::isModRef;
1014        continue;
1015      }
1016      MRInfo |= (unsigned)VirtRegMap::isRef;
1017    }
1018    FoldOps.push_back(OpIdx);
1019  }
1020  return false;
1021}
1022
1023
1024/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1025/// slot / to reg or any rematerialized load into ith operand of specified
1026/// MI. If it is successul, MI is updated with the newly created MI and
1027/// returns true.
1028bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1029                                         VirtRegMap &vrm, MachineInstr *DefMI,
1030                                         SlotIndex InstrIdx,
1031                                         SmallVector<unsigned, 2> &Ops,
1032                                         bool isSS, int Slot, unsigned Reg) {
1033  // If it is an implicit def instruction, just delete it.
1034  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1035    RemoveMachineInstrFromMaps(MI);
1036    vrm.RemoveMachineInstrFromMaps(MI);
1037    MI->eraseFromParent();
1038    ++numFolds;
1039    return true;
1040  }
1041
1042  // Filter the list of operand indexes that are to be folded. Abort if
1043  // any operand will prevent folding.
1044  unsigned MRInfo = 0;
1045  SmallVector<unsigned, 2> FoldOps;
1046  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1047    return false;
1048
1049  // The only time it's safe to fold into a two address instruction is when
1050  // it's folding reload and spill from / into a spill stack slot.
1051  if (DefMI && (MRInfo & VirtRegMap::isMod))
1052    return false;
1053
1054  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1055                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1056  if (fmi) {
1057    // Remember this instruction uses the spill slot.
1058    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1059
1060    // Attempt to fold the memory reference into the instruction. If
1061    // we can do this, we don't need to insert spill code.
1062    MachineBasicBlock &MBB = *MI->getParent();
1063    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1064      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1065    vrm.transferSpillPts(MI, fmi);
1066    vrm.transferRestorePts(MI, fmi);
1067    vrm.transferEmergencySpills(MI, fmi);
1068    ReplaceMachineInstrInMaps(MI, fmi);
1069    MI = MBB.insert(MBB.erase(MI), fmi);
1070    ++numFolds;
1071    return true;
1072  }
1073  return false;
1074}
1075
1076/// canFoldMemoryOperand - Returns true if the specified load / store
1077/// folding is possible.
1078bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1079                                         SmallVector<unsigned, 2> &Ops,
1080                                         bool ReMat) const {
1081  // Filter the list of operand indexes that are to be folded. Abort if
1082  // any operand will prevent folding.
1083  unsigned MRInfo = 0;
1084  SmallVector<unsigned, 2> FoldOps;
1085  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1086    return false;
1087
1088  // It's only legal to remat for a use, not a def.
1089  if (ReMat && (MRInfo & VirtRegMap::isMod))
1090    return false;
1091
1092  return tii_->canFoldMemoryOperand(MI, FoldOps);
1093}
1094
1095bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1096  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1097
1098  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1099
1100  if (mbb == 0)
1101    return false;
1102
1103  for (++itr; itr != li.ranges.end(); ++itr) {
1104    MachineBasicBlock *mbb2 =
1105      indexes_->getMBBCoveringRange(itr->start, itr->end);
1106
1107    if (mbb2 != mbb)
1108      return false;
1109  }
1110
1111  return true;
1112}
1113
1114/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1115/// interval on to-be re-materialized operands of MI) with new register.
1116void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1117                                       MachineInstr *MI, unsigned NewVReg,
1118                                       VirtRegMap &vrm) {
1119  // There is an implicit use. That means one of the other operand is
1120  // being remat'ed and the remat'ed instruction has li.reg as an
1121  // use operand. Make sure we rewrite that as well.
1122  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1123    MachineOperand &MO = MI->getOperand(i);
1124    if (!MO.isReg())
1125      continue;
1126    unsigned Reg = MO.getReg();
1127    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1128      continue;
1129    if (!vrm.isReMaterialized(Reg))
1130      continue;
1131    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1132    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1133    if (UseMO)
1134      UseMO->setReg(NewVReg);
1135  }
1136}
1137
1138/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1139/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1140bool LiveIntervals::
1141rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1142                 bool TrySplit, SlotIndex index, SlotIndex end,
1143                 MachineInstr *MI,
1144                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1145                 unsigned Slot, int LdSlot,
1146                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1147                 VirtRegMap &vrm,
1148                 const TargetRegisterClass* rc,
1149                 SmallVector<int, 4> &ReMatIds,
1150                 const MachineLoopInfo *loopInfo,
1151                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1152                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1153                 std::vector<LiveInterval*> &NewLIs) {
1154  bool CanFold = false;
1155 RestartInstruction:
1156  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1157    MachineOperand& mop = MI->getOperand(i);
1158    if (!mop.isReg())
1159      continue;
1160    unsigned Reg = mop.getReg();
1161    unsigned RegI = Reg;
1162    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1163      continue;
1164    if (Reg != li.reg)
1165      continue;
1166
1167    bool TryFold = !DefIsReMat;
1168    bool FoldSS = true; // Default behavior unless it's a remat.
1169    int FoldSlot = Slot;
1170    if (DefIsReMat) {
1171      // If this is the rematerializable definition MI itself and
1172      // all of its uses are rematerialized, simply delete it.
1173      if (MI == ReMatOrigDefMI && CanDelete) {
1174        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1175                     << MI << '\n');
1176        RemoveMachineInstrFromMaps(MI);
1177        vrm.RemoveMachineInstrFromMaps(MI);
1178        MI->eraseFromParent();
1179        break;
1180      }
1181
1182      // If def for this use can't be rematerialized, then try folding.
1183      // If def is rematerializable and it's a load, also try folding.
1184      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1185      if (isLoad) {
1186        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1187        FoldSS = isLoadSS;
1188        FoldSlot = LdSlot;
1189      }
1190    }
1191
1192    // Scan all of the operands of this instruction rewriting operands
1193    // to use NewVReg instead of li.reg as appropriate.  We do this for
1194    // two reasons:
1195    //
1196    //   1. If the instr reads the same spilled vreg multiple times, we
1197    //      want to reuse the NewVReg.
1198    //   2. If the instr is a two-addr instruction, we are required to
1199    //      keep the src/dst regs pinned.
1200    //
1201    // Keep track of whether we replace a use and/or def so that we can
1202    // create the spill interval with the appropriate range.
1203
1204    HasUse = mop.isUse();
1205    HasDef = mop.isDef();
1206    SmallVector<unsigned, 2> Ops;
1207    Ops.push_back(i);
1208    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1209      const MachineOperand &MOj = MI->getOperand(j);
1210      if (!MOj.isReg())
1211        continue;
1212      unsigned RegJ = MOj.getReg();
1213      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1214        continue;
1215      if (RegJ == RegI) {
1216        Ops.push_back(j);
1217        if (!MOj.isUndef()) {
1218          HasUse |= MOj.isUse();
1219          HasDef |= MOj.isDef();
1220        }
1221      }
1222    }
1223
1224    // Create a new virtual register for the spill interval.
1225    // Create the new register now so we can map the fold instruction
1226    // to the new register so when it is unfolded we get the correct
1227    // answer.
1228    bool CreatedNewVReg = false;
1229    if (NewVReg == 0) {
1230      NewVReg = mri_->createVirtualRegister(rc);
1231      vrm.grow();
1232      CreatedNewVReg = true;
1233    }
1234
1235    if (!TryFold)
1236      CanFold = false;
1237    else {
1238      // Do not fold load / store here if we are splitting. We'll find an
1239      // optimal point to insert a load / store later.
1240      if (!TrySplit) {
1241        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1242                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1243          // Folding the load/store can completely change the instruction in
1244          // unpredictable ways, rescan it from the beginning.
1245
1246          if (FoldSS) {
1247            // We need to give the new vreg the same stack slot as the
1248            // spilled interval.
1249            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1250          }
1251
1252          HasUse = false;
1253          HasDef = false;
1254          CanFold = false;
1255          if (isNotInMIMap(MI))
1256            break;
1257          goto RestartInstruction;
1258        }
1259      } else {
1260        // We'll try to fold it later if it's profitable.
1261        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1262      }
1263    }
1264
1265    mop.setReg(NewVReg);
1266    if (mop.isImplicit())
1267      rewriteImplicitOps(li, MI, NewVReg, vrm);
1268
1269    // Reuse NewVReg for other reads.
1270    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1271      MachineOperand &mopj = MI->getOperand(Ops[j]);
1272      mopj.setReg(NewVReg);
1273      if (mopj.isImplicit())
1274        rewriteImplicitOps(li, MI, NewVReg, vrm);
1275    }
1276
1277    if (CreatedNewVReg) {
1278      if (DefIsReMat) {
1279        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1280        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1281          // Each valnum may have its own remat id.
1282          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1283        } else {
1284          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1285        }
1286        if (!CanDelete || (HasUse && HasDef)) {
1287          // If this is a two-addr instruction then its use operands are
1288          // rematerializable but its def is not. It should be assigned a
1289          // stack slot.
1290          vrm.assignVirt2StackSlot(NewVReg, Slot);
1291        }
1292      } else {
1293        vrm.assignVirt2StackSlot(NewVReg, Slot);
1294      }
1295    } else if (HasUse && HasDef &&
1296               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1297      // If this interval hasn't been assigned a stack slot (because earlier
1298      // def is a deleted remat def), do it now.
1299      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1300      vrm.assignVirt2StackSlot(NewVReg, Slot);
1301    }
1302
1303    // Re-matting an instruction with virtual register use. Add the
1304    // register as an implicit use on the use MI.
1305    if (DefIsReMat && ImpUse)
1306      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1307
1308    // Create a new register interval for this spill / remat.
1309    LiveInterval &nI = getOrCreateInterval(NewVReg);
1310    if (CreatedNewVReg) {
1311      NewLIs.push_back(&nI);
1312      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1313      if (TrySplit)
1314        vrm.setIsSplitFromReg(NewVReg, li.reg);
1315    }
1316
1317    if (HasUse) {
1318      if (CreatedNewVReg) {
1319        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1320                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1321        DEBUG(errs() << " +" << LR);
1322        nI.addRange(LR);
1323      } else {
1324        // Extend the split live interval to this def / use.
1325        SlotIndex End = index.getDefIndex();
1326        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1327                     nI.getValNumInfo(nI.getNumValNums()-1));
1328        DEBUG(errs() << " +" << LR);
1329        nI.addRange(LR);
1330      }
1331    }
1332    if (HasDef) {
1333      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1334                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1335      DEBUG(errs() << " +" << LR);
1336      nI.addRange(LR);
1337    }
1338
1339    DEBUG({
1340        errs() << "\t\t\t\tAdded new interval: ";
1341        nI.print(errs(), tri_);
1342        errs() << '\n';
1343      });
1344  }
1345  return CanFold;
1346}
1347bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1348                                   const VNInfo *VNI,
1349                                   MachineBasicBlock *MBB,
1350                                   SlotIndex Idx) const {
1351  SlotIndex End = getMBBEndIdx(MBB);
1352  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1353    if (VNI->kills[j].isPHI())
1354      continue;
1355
1356    SlotIndex KillIdx = VNI->kills[j];
1357    if (KillIdx > Idx && KillIdx < End)
1358      return true;
1359  }
1360  return false;
1361}
1362
1363/// RewriteInfo - Keep track of machine instrs that will be rewritten
1364/// during spilling.
1365namespace {
1366  struct RewriteInfo {
1367    SlotIndex Index;
1368    MachineInstr *MI;
1369    bool HasUse;
1370    bool HasDef;
1371    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1372      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1373  };
1374
1375  struct RewriteInfoCompare {
1376    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1377      return LHS.Index < RHS.Index;
1378    }
1379  };
1380}
1381
1382void LiveIntervals::
1383rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1384                    LiveInterval::Ranges::const_iterator &I,
1385                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1386                    unsigned Slot, int LdSlot,
1387                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1388                    VirtRegMap &vrm,
1389                    const TargetRegisterClass* rc,
1390                    SmallVector<int, 4> &ReMatIds,
1391                    const MachineLoopInfo *loopInfo,
1392                    BitVector &SpillMBBs,
1393                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1394                    BitVector &RestoreMBBs,
1395                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1396                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1397                    std::vector<LiveInterval*> &NewLIs) {
1398  bool AllCanFold = true;
1399  unsigned NewVReg = 0;
1400  SlotIndex start = I->start.getBaseIndex();
1401  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1402
1403  // First collect all the def / use in this live range that will be rewritten.
1404  // Make sure they are sorted according to instruction index.
1405  std::vector<RewriteInfo> RewriteMIs;
1406  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1407         re = mri_->reg_end(); ri != re; ) {
1408    MachineInstr *MI = &*ri;
1409    MachineOperand &O = ri.getOperand();
1410    ++ri;
1411    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1412    SlotIndex index = getInstructionIndex(MI);
1413    if (index < start || index >= end)
1414      continue;
1415
1416    if (O.isUndef())
1417      // Must be defined by an implicit def. It should not be spilled. Note,
1418      // this is for correctness reason. e.g.
1419      // 8   %reg1024<def> = IMPLICIT_DEF
1420      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1421      // The live range [12, 14) are not part of the r1024 live interval since
1422      // it's defined by an implicit def. It will not conflicts with live
1423      // interval of r1025. Now suppose both registers are spilled, you can
1424      // easily see a situation where both registers are reloaded before
1425      // the INSERT_SUBREG and both target registers that would overlap.
1426      continue;
1427    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1428  }
1429  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1430
1431  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1432  // Now rewrite the defs and uses.
1433  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1434    RewriteInfo &rwi = RewriteMIs[i];
1435    ++i;
1436    SlotIndex index = rwi.Index;
1437    bool MIHasUse = rwi.HasUse;
1438    bool MIHasDef = rwi.HasDef;
1439    MachineInstr *MI = rwi.MI;
1440    // If MI def and/or use the same register multiple times, then there
1441    // are multiple entries.
1442    unsigned NumUses = MIHasUse;
1443    while (i != e && RewriteMIs[i].MI == MI) {
1444      assert(RewriteMIs[i].Index == index);
1445      bool isUse = RewriteMIs[i].HasUse;
1446      if (isUse) ++NumUses;
1447      MIHasUse |= isUse;
1448      MIHasDef |= RewriteMIs[i].HasDef;
1449      ++i;
1450    }
1451    MachineBasicBlock *MBB = MI->getParent();
1452
1453    if (ImpUse && MI != ReMatDefMI) {
1454      // Re-matting an instruction with virtual register use. Update the
1455      // register interval's spill weight to HUGE_VALF to prevent it from
1456      // being spilled.
1457      LiveInterval &ImpLi = getInterval(ImpUse);
1458      ImpLi.weight = HUGE_VALF;
1459    }
1460
1461    unsigned MBBId = MBB->getNumber();
1462    unsigned ThisVReg = 0;
1463    if (TrySplit) {
1464      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1465      if (NVI != MBBVRegsMap.end()) {
1466        ThisVReg = NVI->second;
1467        // One common case:
1468        // x = use
1469        // ...
1470        // ...
1471        // def = ...
1472        //     = use
1473        // It's better to start a new interval to avoid artifically
1474        // extend the new interval.
1475        if (MIHasDef && !MIHasUse) {
1476          MBBVRegsMap.erase(MBB->getNumber());
1477          ThisVReg = 0;
1478        }
1479      }
1480    }
1481
1482    bool IsNew = ThisVReg == 0;
1483    if (IsNew) {
1484      // This ends the previous live interval. If all of its def / use
1485      // can be folded, give it a low spill weight.
1486      if (NewVReg && TrySplit && AllCanFold) {
1487        LiveInterval &nI = getOrCreateInterval(NewVReg);
1488        nI.weight /= 10.0F;
1489      }
1490      AllCanFold = true;
1491    }
1492    NewVReg = ThisVReg;
1493
1494    bool HasDef = false;
1495    bool HasUse = false;
1496    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1497                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1498                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1499                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1500                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1501    if (!HasDef && !HasUse)
1502      continue;
1503
1504    AllCanFold &= CanFold;
1505
1506    // Update weight of spill interval.
1507    LiveInterval &nI = getOrCreateInterval(NewVReg);
1508    if (!TrySplit) {
1509      // The spill weight is now infinity as it cannot be spilled again.
1510      nI.weight = HUGE_VALF;
1511      continue;
1512    }
1513
1514    // Keep track of the last def and first use in each MBB.
1515    if (HasDef) {
1516      if (MI != ReMatOrigDefMI || !CanDelete) {
1517        bool HasKill = false;
1518        if (!HasUse)
1519          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1520        else {
1521          // If this is a two-address code, then this index starts a new VNInfo.
1522          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1523          if (VNI)
1524            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1525        }
1526        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1527          SpillIdxes.find(MBBId);
1528        if (!HasKill) {
1529          if (SII == SpillIdxes.end()) {
1530            std::vector<SRInfo> S;
1531            S.push_back(SRInfo(index, NewVReg, true));
1532            SpillIdxes.insert(std::make_pair(MBBId, S));
1533          } else if (SII->second.back().vreg != NewVReg) {
1534            SII->second.push_back(SRInfo(index, NewVReg, true));
1535          } else if (index > SII->second.back().index) {
1536            // If there is an earlier def and this is a two-address
1537            // instruction, then it's not possible to fold the store (which
1538            // would also fold the load).
1539            SRInfo &Info = SII->second.back();
1540            Info.index = index;
1541            Info.canFold = !HasUse;
1542          }
1543          SpillMBBs.set(MBBId);
1544        } else if (SII != SpillIdxes.end() &&
1545                   SII->second.back().vreg == NewVReg &&
1546                   index > SII->second.back().index) {
1547          // There is an earlier def that's not killed (must be two-address).
1548          // The spill is no longer needed.
1549          SII->second.pop_back();
1550          if (SII->second.empty()) {
1551            SpillIdxes.erase(MBBId);
1552            SpillMBBs.reset(MBBId);
1553          }
1554        }
1555      }
1556    }
1557
1558    if (HasUse) {
1559      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1560        SpillIdxes.find(MBBId);
1561      if (SII != SpillIdxes.end() &&
1562          SII->second.back().vreg == NewVReg &&
1563          index > SII->second.back().index)
1564        // Use(s) following the last def, it's not safe to fold the spill.
1565        SII->second.back().canFold = false;
1566      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1567        RestoreIdxes.find(MBBId);
1568      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1569        // If we are splitting live intervals, only fold if it's the first
1570        // use and there isn't another use later in the MBB.
1571        RII->second.back().canFold = false;
1572      else if (IsNew) {
1573        // Only need a reload if there isn't an earlier def / use.
1574        if (RII == RestoreIdxes.end()) {
1575          std::vector<SRInfo> Infos;
1576          Infos.push_back(SRInfo(index, NewVReg, true));
1577          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1578        } else {
1579          RII->second.push_back(SRInfo(index, NewVReg, true));
1580        }
1581        RestoreMBBs.set(MBBId);
1582      }
1583    }
1584
1585    // Update spill weight.
1586    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1587    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1588  }
1589
1590  if (NewVReg && TrySplit && AllCanFold) {
1591    // If all of its def / use can be folded, give it a low spill weight.
1592    LiveInterval &nI = getOrCreateInterval(NewVReg);
1593    nI.weight /= 10.0F;
1594  }
1595}
1596
1597bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1598                        unsigned vr, BitVector &RestoreMBBs,
1599                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1600  if (!RestoreMBBs[Id])
1601    return false;
1602  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1603  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1604    if (Restores[i].index == index &&
1605        Restores[i].vreg == vr &&
1606        Restores[i].canFold)
1607      return true;
1608  return false;
1609}
1610
1611void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1612                        unsigned vr, BitVector &RestoreMBBs,
1613                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1614  if (!RestoreMBBs[Id])
1615    return;
1616  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1617  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1618    if (Restores[i].index == index && Restores[i].vreg)
1619      Restores[i].index = SlotIndex();
1620}
1621
1622/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1623/// spilled and create empty intervals for their uses.
1624void
1625LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1626                                    const TargetRegisterClass* rc,
1627                                    std::vector<LiveInterval*> &NewLIs) {
1628  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1629         re = mri_->reg_end(); ri != re; ) {
1630    MachineOperand &O = ri.getOperand();
1631    MachineInstr *MI = &*ri;
1632    ++ri;
1633    if (O.isDef()) {
1634      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1635             "Register def was not rewritten?");
1636      RemoveMachineInstrFromMaps(MI);
1637      vrm.RemoveMachineInstrFromMaps(MI);
1638      MI->eraseFromParent();
1639    } else {
1640      // This must be an use of an implicit_def so it's not part of the live
1641      // interval. Create a new empty live interval for it.
1642      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1643      unsigned NewVReg = mri_->createVirtualRegister(rc);
1644      vrm.grow();
1645      vrm.setIsImplicitlyDefined(NewVReg);
1646      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1647      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1648        MachineOperand &MO = MI->getOperand(i);
1649        if (MO.isReg() && MO.getReg() == li.reg) {
1650          MO.setReg(NewVReg);
1651          MO.setIsUndef();
1652        }
1653      }
1654    }
1655  }
1656}
1657
1658std::vector<LiveInterval*> LiveIntervals::
1659addIntervalsForSpillsFast(const LiveInterval &li,
1660                          const MachineLoopInfo *loopInfo,
1661                          VirtRegMap &vrm) {
1662  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1663
1664  std::vector<LiveInterval*> added;
1665
1666  assert(li.weight != HUGE_VALF &&
1667         "attempt to spill already spilled interval!");
1668
1669  DEBUG({
1670      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1671      li.dump();
1672      errs() << '\n';
1673    });
1674
1675  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1676
1677  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1678  while (RI != mri_->reg_end()) {
1679    MachineInstr* MI = &*RI;
1680
1681    SmallVector<unsigned, 2> Indices;
1682    bool HasUse = false;
1683    bool HasDef = false;
1684
1685    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1686      MachineOperand& mop = MI->getOperand(i);
1687      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1688
1689      HasUse |= MI->getOperand(i).isUse();
1690      HasDef |= MI->getOperand(i).isDef();
1691
1692      Indices.push_back(i);
1693    }
1694
1695    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1696                              Indices, true, slot, li.reg)) {
1697      unsigned NewVReg = mri_->createVirtualRegister(rc);
1698      vrm.grow();
1699      vrm.assignVirt2StackSlot(NewVReg, slot);
1700
1701      // create a new register for this spill
1702      LiveInterval &nI = getOrCreateInterval(NewVReg);
1703
1704      // the spill weight is now infinity as it
1705      // cannot be spilled again
1706      nI.weight = HUGE_VALF;
1707
1708      // Rewrite register operands to use the new vreg.
1709      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1710           E = Indices.end(); I != E; ++I) {
1711        MI->getOperand(*I).setReg(NewVReg);
1712
1713        if (MI->getOperand(*I).isUse())
1714          MI->getOperand(*I).setIsKill(true);
1715      }
1716
1717      // Fill in  the new live interval.
1718      SlotIndex index = getInstructionIndex(MI);
1719      if (HasUse) {
1720        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1721                     nI.getNextValue(SlotIndex(), 0, false,
1722                                     getVNInfoAllocator()));
1723        DEBUG(errs() << " +" << LR);
1724        nI.addRange(LR);
1725        vrm.addRestorePoint(NewVReg, MI);
1726      }
1727      if (HasDef) {
1728        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1729                     nI.getNextValue(SlotIndex(), 0, false,
1730                                     getVNInfoAllocator()));
1731        DEBUG(errs() << " +" << LR);
1732        nI.addRange(LR);
1733        vrm.addSpillPoint(NewVReg, true, MI);
1734      }
1735
1736      added.push_back(&nI);
1737
1738      DEBUG({
1739          errs() << "\t\t\t\tadded new interval: ";
1740          nI.dump();
1741          errs() << '\n';
1742        });
1743    }
1744
1745
1746    RI = mri_->reg_begin(li.reg);
1747  }
1748
1749  return added;
1750}
1751
1752std::vector<LiveInterval*> LiveIntervals::
1753addIntervalsForSpills(const LiveInterval &li,
1754                      SmallVectorImpl<LiveInterval*> &SpillIs,
1755                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1756
1757  if (EnableFastSpilling)
1758    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1759
1760  assert(li.weight != HUGE_VALF &&
1761         "attempt to spill already spilled interval!");
1762
1763  DEBUG({
1764      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1765      li.print(errs(), tri_);
1766      errs() << '\n';
1767    });
1768
1769  // Each bit specify whether a spill is required in the MBB.
1770  BitVector SpillMBBs(mf_->getNumBlockIDs());
1771  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1772  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1773  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1774  DenseMap<unsigned,unsigned> MBBVRegsMap;
1775  std::vector<LiveInterval*> NewLIs;
1776  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1777
1778  unsigned NumValNums = li.getNumValNums();
1779  SmallVector<MachineInstr*, 4> ReMatDefs;
1780  ReMatDefs.resize(NumValNums, NULL);
1781  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1782  ReMatOrigDefs.resize(NumValNums, NULL);
1783  SmallVector<int, 4> ReMatIds;
1784  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1785  BitVector ReMatDelete(NumValNums);
1786  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1787
1788  // Spilling a split live interval. It cannot be split any further. Also,
1789  // it's also guaranteed to be a single val# / range interval.
1790  if (vrm.getPreSplitReg(li.reg)) {
1791    vrm.setIsSplitFromReg(li.reg, 0);
1792    // Unset the split kill marker on the last use.
1793    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1794    if (KillIdx != SlotIndex()) {
1795      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1796      assert(KillMI && "Last use disappeared?");
1797      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1798      assert(KillOp != -1 && "Last use disappeared?");
1799      KillMI->getOperand(KillOp).setIsKill(false);
1800    }
1801    vrm.removeKillPoint(li.reg);
1802    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1803    Slot = vrm.getStackSlot(li.reg);
1804    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1805    MachineInstr *ReMatDefMI = DefIsReMat ?
1806      vrm.getReMaterializedMI(li.reg) : NULL;
1807    int LdSlot = 0;
1808    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1809    bool isLoad = isLoadSS ||
1810      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1811    bool IsFirstRange = true;
1812    for (LiveInterval::Ranges::const_iterator
1813           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1814      // If this is a split live interval with multiple ranges, it means there
1815      // are two-address instructions that re-defined the value. Only the
1816      // first def can be rematerialized!
1817      if (IsFirstRange) {
1818        // Note ReMatOrigDefMI has already been deleted.
1819        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1820                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1821                             false, vrm, rc, ReMatIds, loopInfo,
1822                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1823                             MBBVRegsMap, NewLIs);
1824      } else {
1825        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1826                             Slot, 0, false, false, false,
1827                             false, vrm, rc, ReMatIds, loopInfo,
1828                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1829                             MBBVRegsMap, NewLIs);
1830      }
1831      IsFirstRange = false;
1832    }
1833
1834    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1835    return NewLIs;
1836  }
1837
1838  bool TrySplit = !intervalIsInOneMBB(li);
1839  if (TrySplit)
1840    ++numSplits;
1841  bool NeedStackSlot = false;
1842  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1843       i != e; ++i) {
1844    const VNInfo *VNI = *i;
1845    unsigned VN = VNI->id;
1846    if (VNI->isUnused())
1847      continue; // Dead val#.
1848    // Is the def for the val# rematerializable?
1849    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1850      ? getInstructionFromIndex(VNI->def) : 0;
1851    bool dummy;
1852    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1853      // Remember how to remat the def of this val#.
1854      ReMatOrigDefs[VN] = ReMatDefMI;
1855      // Original def may be modified so we have to make a copy here.
1856      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1857      CloneMIs.push_back(Clone);
1858      ReMatDefs[VN] = Clone;
1859
1860      bool CanDelete = true;
1861      if (VNI->hasPHIKill()) {
1862        // A kill is a phi node, not all of its uses can be rematerialized.
1863        // It must not be deleted.
1864        CanDelete = false;
1865        // Need a stack slot if there is any live range where uses cannot be
1866        // rematerialized.
1867        NeedStackSlot = true;
1868      }
1869      if (CanDelete)
1870        ReMatDelete.set(VN);
1871    } else {
1872      // Need a stack slot if there is any live range where uses cannot be
1873      // rematerialized.
1874      NeedStackSlot = true;
1875    }
1876  }
1877
1878  // One stack slot per live interval.
1879  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1880    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1881      Slot = vrm.assignVirt2StackSlot(li.reg);
1882
1883    // This case only occurs when the prealloc splitter has already assigned
1884    // a stack slot to this vreg.
1885    else
1886      Slot = vrm.getStackSlot(li.reg);
1887  }
1888
1889  // Create new intervals and rewrite defs and uses.
1890  for (LiveInterval::Ranges::const_iterator
1891         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1892    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1893    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1894    bool DefIsReMat = ReMatDefMI != NULL;
1895    bool CanDelete = ReMatDelete[I->valno->id];
1896    int LdSlot = 0;
1897    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1898    bool isLoad = isLoadSS ||
1899      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1900    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1901                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1902                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1903                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1904                               MBBVRegsMap, NewLIs);
1905  }
1906
1907  // Insert spills / restores if we are splitting.
1908  if (!TrySplit) {
1909    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1910    return NewLIs;
1911  }
1912
1913  SmallPtrSet<LiveInterval*, 4> AddedKill;
1914  SmallVector<unsigned, 2> Ops;
1915  if (NeedStackSlot) {
1916    int Id = SpillMBBs.find_first();
1917    while (Id != -1) {
1918      std::vector<SRInfo> &spills = SpillIdxes[Id];
1919      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1920        SlotIndex index = spills[i].index;
1921        unsigned VReg = spills[i].vreg;
1922        LiveInterval &nI = getOrCreateInterval(VReg);
1923        bool isReMat = vrm.isReMaterialized(VReg);
1924        MachineInstr *MI = getInstructionFromIndex(index);
1925        bool CanFold = false;
1926        bool FoundUse = false;
1927        Ops.clear();
1928        if (spills[i].canFold) {
1929          CanFold = true;
1930          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1931            MachineOperand &MO = MI->getOperand(j);
1932            if (!MO.isReg() || MO.getReg() != VReg)
1933              continue;
1934
1935            Ops.push_back(j);
1936            if (MO.isDef())
1937              continue;
1938            if (isReMat ||
1939                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1940                                                RestoreMBBs, RestoreIdxes))) {
1941              // MI has two-address uses of the same register. If the use
1942              // isn't the first and only use in the BB, then we can't fold
1943              // it. FIXME: Move this to rewriteInstructionsForSpills.
1944              CanFold = false;
1945              break;
1946            }
1947            FoundUse = true;
1948          }
1949        }
1950        // Fold the store into the def if possible.
1951        bool Folded = false;
1952        if (CanFold && !Ops.empty()) {
1953          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1954            Folded = true;
1955            if (FoundUse) {
1956              // Also folded uses, do not issue a load.
1957              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1958              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1959            }
1960            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1961          }
1962        }
1963
1964        // Otherwise tell the spiller to issue a spill.
1965        if (!Folded) {
1966          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1967          bool isKill = LR->end == index.getStoreIndex();
1968          if (!MI->registerDefIsDead(nI.reg))
1969            // No need to spill a dead def.
1970            vrm.addSpillPoint(VReg, isKill, MI);
1971          if (isKill)
1972            AddedKill.insert(&nI);
1973        }
1974      }
1975      Id = SpillMBBs.find_next(Id);
1976    }
1977  }
1978
1979  int Id = RestoreMBBs.find_first();
1980  while (Id != -1) {
1981    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1982    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1983      SlotIndex index = restores[i].index;
1984      if (index == SlotIndex())
1985        continue;
1986      unsigned VReg = restores[i].vreg;
1987      LiveInterval &nI = getOrCreateInterval(VReg);
1988      bool isReMat = vrm.isReMaterialized(VReg);
1989      MachineInstr *MI = getInstructionFromIndex(index);
1990      bool CanFold = false;
1991      Ops.clear();
1992      if (restores[i].canFold) {
1993        CanFold = true;
1994        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1995          MachineOperand &MO = MI->getOperand(j);
1996          if (!MO.isReg() || MO.getReg() != VReg)
1997            continue;
1998
1999          if (MO.isDef()) {
2000            // If this restore were to be folded, it would have been folded
2001            // already.
2002            CanFold = false;
2003            break;
2004          }
2005          Ops.push_back(j);
2006        }
2007      }
2008
2009      // Fold the load into the use if possible.
2010      bool Folded = false;
2011      if (CanFold && !Ops.empty()) {
2012        if (!isReMat)
2013          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2014        else {
2015          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2016          int LdSlot = 0;
2017          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2018          // If the rematerializable def is a load, also try to fold it.
2019          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2020            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2021                                          Ops, isLoadSS, LdSlot, VReg);
2022          if (!Folded) {
2023            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2024            if (ImpUse) {
2025              // Re-matting an instruction with virtual register use. Add the
2026              // register as an implicit use on the use MI and update the register
2027              // interval's spill weight to HUGE_VALF to prevent it from being
2028              // spilled.
2029              LiveInterval &ImpLi = getInterval(ImpUse);
2030              ImpLi.weight = HUGE_VALF;
2031              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2032            }
2033          }
2034        }
2035      }
2036      // If folding is not possible / failed, then tell the spiller to issue a
2037      // load / rematerialization for us.
2038      if (Folded)
2039        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2040      else
2041        vrm.addRestorePoint(VReg, MI);
2042    }
2043    Id = RestoreMBBs.find_next(Id);
2044  }
2045
2046  // Finalize intervals: add kills, finalize spill weights, and filter out
2047  // dead intervals.
2048  std::vector<LiveInterval*> RetNewLIs;
2049  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2050    LiveInterval *LI = NewLIs[i];
2051    if (!LI->empty()) {
2052      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2053      if (!AddedKill.count(LI)) {
2054        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2055        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2056        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2057        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2058        assert(UseIdx != -1);
2059        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2060          LastUse->getOperand(UseIdx).setIsKill();
2061          vrm.addKillPoint(LI->reg, LastUseIdx);
2062        }
2063      }
2064      RetNewLIs.push_back(LI);
2065    }
2066  }
2067
2068  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2069  return RetNewLIs;
2070}
2071
2072/// hasAllocatableSuperReg - Return true if the specified physical register has
2073/// any super register that's allocatable.
2074bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2075  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2076    if (allocatableRegs_[*AS] && hasInterval(*AS))
2077      return true;
2078  return false;
2079}
2080
2081/// getRepresentativeReg - Find the largest super register of the specified
2082/// physical register.
2083unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2084  // Find the largest super-register that is allocatable.
2085  unsigned BestReg = Reg;
2086  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2087    unsigned SuperReg = *AS;
2088    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2089      BestReg = SuperReg;
2090      break;
2091    }
2092  }
2093  return BestReg;
2094}
2095
2096/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2097/// specified interval that conflicts with the specified physical register.
2098unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2099                                                   unsigned PhysReg) const {
2100  unsigned NumConflicts = 0;
2101  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2102  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2103         E = mri_->reg_end(); I != E; ++I) {
2104    MachineOperand &O = I.getOperand();
2105    MachineInstr *MI = O.getParent();
2106    SlotIndex Index = getInstructionIndex(MI);
2107    if (pli.liveAt(Index))
2108      ++NumConflicts;
2109  }
2110  return NumConflicts;
2111}
2112
2113/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2114/// around all defs and uses of the specified interval. Return true if it
2115/// was able to cut its interval.
2116bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2117                                            unsigned PhysReg, VirtRegMap &vrm) {
2118  unsigned SpillReg = getRepresentativeReg(PhysReg);
2119
2120  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2121    // If there are registers which alias PhysReg, but which are not a
2122    // sub-register of the chosen representative super register. Assert
2123    // since we can't handle it yet.
2124    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2125           tri_->isSuperRegister(*AS, SpillReg));
2126
2127  bool Cut = false;
2128  SmallVector<unsigned, 4> PRegs;
2129  if (hasInterval(SpillReg))
2130    PRegs.push_back(SpillReg);
2131  else {
2132    SmallSet<unsigned, 4> Added;
2133    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2134      if (Added.insert(*AS) && hasInterval(*AS)) {
2135        PRegs.push_back(*AS);
2136        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2137          Added.insert(*ASS);
2138      }
2139  }
2140
2141  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2142  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2143         E = mri_->reg_end(); I != E; ++I) {
2144    MachineOperand &O = I.getOperand();
2145    MachineInstr *MI = O.getParent();
2146    if (SeenMIs.count(MI))
2147      continue;
2148    SeenMIs.insert(MI);
2149    SlotIndex Index = getInstructionIndex(MI);
2150    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2151      unsigned PReg = PRegs[i];
2152      LiveInterval &pli = getInterval(PReg);
2153      if (!pli.liveAt(Index))
2154        continue;
2155      vrm.addEmergencySpill(PReg, MI);
2156      SlotIndex StartIdx = Index.getLoadIndex();
2157      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2158      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2159        pli.removeRange(StartIdx, EndIdx);
2160        Cut = true;
2161      } else {
2162        std::string msg;
2163        raw_string_ostream Msg(msg);
2164        Msg << "Ran out of registers during register allocation!";
2165        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2166          Msg << "\nPlease check your inline asm statement for invalid "
2167              << "constraints:\n";
2168          MI->print(Msg, tm_);
2169        }
2170        llvm_report_error(Msg.str());
2171      }
2172      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2173        if (!hasInterval(*AS))
2174          continue;
2175        LiveInterval &spli = getInterval(*AS);
2176        if (spli.liveAt(Index))
2177          spli.removeRange(Index.getLoadIndex(),
2178                           Index.getNextIndex().getBaseIndex());
2179      }
2180    }
2181  }
2182  return Cut;
2183}
2184
2185LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2186                                                  MachineInstr* startInst) {
2187  LiveInterval& Interval = getOrCreateInterval(reg);
2188  VNInfo* VN = Interval.getNextValue(
2189    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2190    startInst, true, getVNInfoAllocator());
2191  VN->setHasPHIKill(true);
2192  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2193  LiveRange LR(
2194     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2195     getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2196  Interval.addRange(LR);
2197
2198  return LR;
2199}
2200
2201