LiveIntervalAnalysis.cpp revision 9836a26da3acc15deec878cd6ba21a6b5f2e753b
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.Reset();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      OS << getInstructionIndex(mii) << '\t' << *mii;
144    }
145  }
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(errs());
150}
151
152/// conflictsWithPhysRegDef - Returns true if the specified register
153/// is defined during the duration of the specified interval.
154bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
155                                            VirtRegMap &vrm, unsigned reg) {
156  for (LiveInterval::Ranges::const_iterator
157         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
158    for (SlotIndex index = I->start.getBaseIndex(),
159           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
160           index != end;
161           index = index.getNextIndex()) {
162      // skip deleted instructions
163      while (index != end && !getInstructionFromIndex(index))
164        index = index.getNextIndex();
165      if (index == end) break;
166
167      MachineInstr *MI = getInstructionFromIndex(index);
168      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
169      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
170        if (SrcReg == li.reg || DstReg == li.reg)
171          continue;
172      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
173        MachineOperand& mop = MI->getOperand(i);
174        if (!mop.isReg())
175          continue;
176        unsigned PhysReg = mop.getReg();
177        if (PhysReg == 0 || PhysReg == li.reg)
178          continue;
179        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
180          if (!vrm.hasPhys(PhysReg))
181            continue;
182          PhysReg = vrm.getPhys(PhysReg);
183        }
184        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
185          return true;
186      }
187    }
188  }
189
190  return false;
191}
192
193/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
194/// it can check use as well.
195bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
196                                            unsigned Reg, bool CheckUse,
197                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
198  for (LiveInterval::Ranges::const_iterator
199         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
200    for (SlotIndex index = I->start.getBaseIndex(),
201           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
202           index != end;
203           index = index.getNextIndex()) {
204      // Skip deleted instructions.
205      MachineInstr *MI = 0;
206      while (index != end) {
207        MI = getInstructionFromIndex(index);
208        if (MI)
209          break;
210        index = index.getNextIndex();
211      }
212      if (index == end) break;
213
214      if (JoinedCopies.count(MI))
215        continue;
216      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
217        MachineOperand& MO = MI->getOperand(i);
218        if (!MO.isReg())
219          continue;
220        if (MO.isUse() && !CheckUse)
221          continue;
222        unsigned PhysReg = MO.getReg();
223        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
224          continue;
225        if (tri_->isSubRegister(Reg, PhysReg))
226          return true;
227      }
228    }
229  }
230
231  return false;
232}
233
234#ifndef NDEBUG
235static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
236  if (TargetRegisterInfo::isPhysicalRegister(reg))
237    errs() << tri_->getName(reg);
238  else
239    errs() << "%reg" << reg;
240}
241#endif
242
243void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
244                                             MachineBasicBlock::iterator mi,
245                                             SlotIndex MIIdx,
246                                             MachineOperand& MO,
247                                             unsigned MOIdx,
248                                             LiveInterval &interval) {
249  DEBUG({
250      errs() << "\t\tregister: ";
251      printRegName(interval.reg, tri_);
252    });
253
254  // Virtual registers may be defined multiple times (due to phi
255  // elimination and 2-addr elimination).  Much of what we do only has to be
256  // done once for the vreg.  We use an empty interval to detect the first
257  // time we see a vreg.
258  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
259  if (interval.empty()) {
260    // Get the Idx of the defining instructions.
261    SlotIndex defIndex = MIIdx.getDefIndex();
262    // Earlyclobbers move back one, so that they overlap the live range
263    // of inputs.
264    if (MO.isEarlyClobber())
265      defIndex = MIIdx.getUseIndex();
266    VNInfo *ValNo;
267    MachineInstr *CopyMI = NULL;
268    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
269    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
270        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
271        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
272        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
273      CopyMI = mi;
274    // Earlyclobbers move back one.
275    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
276
277    assert(ValNo->id == 0 && "First value in interval is not 0?");
278
279    // Loop over all of the blocks that the vreg is defined in.  There are
280    // two cases we have to handle here.  The most common case is a vreg
281    // whose lifetime is contained within a basic block.  In this case there
282    // will be a single kill, in MBB, which comes after the definition.
283    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
284      // FIXME: what about dead vars?
285      SlotIndex killIdx;
286      if (vi.Kills[0] != mi)
287        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
288      else
289        killIdx = defIndex.getStoreIndex();
290
291      // If the kill happens after the definition, we have an intra-block
292      // live range.
293      if (killIdx > defIndex) {
294        assert(vi.AliveBlocks.empty() &&
295               "Shouldn't be alive across any blocks!");
296        LiveRange LR(defIndex, killIdx, ValNo);
297        interval.addRange(LR);
298        DEBUG(errs() << " +" << LR << "\n");
299        ValNo->addKill(killIdx);
300        return;
301      }
302    }
303
304    // The other case we handle is when a virtual register lives to the end
305    // of the defining block, potentially live across some blocks, then is
306    // live into some number of blocks, but gets killed.  Start by adding a
307    // range that goes from this definition to the end of the defining block.
308    LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
309                    ValNo);
310    DEBUG(errs() << " +" << NewLR);
311    interval.addRange(NewLR);
312
313    // Iterate over all of the blocks that the variable is completely
314    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
315    // live interval.
316    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
317             E = vi.AliveBlocks.end(); I != E; ++I) {
318      LiveRange LR(
319          getMBBStartIdx(mf_->getBlockNumbered(*I)),
320          getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
321          ValNo);
322      interval.addRange(LR);
323      DEBUG(errs() << " +" << LR);
324    }
325
326    // Finally, this virtual register is live from the start of any killing
327    // block to the 'use' slot of the killing instruction.
328    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
329      MachineInstr *Kill = vi.Kills[i];
330      SlotIndex killIdx =
331        getInstructionIndex(Kill).getDefIndex();
332      LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
333      interval.addRange(LR);
334      ValNo->addKill(killIdx);
335      DEBUG(errs() << " +" << LR);
336    }
337
338  } else {
339    // If this is the second time we see a virtual register definition, it
340    // must be due to phi elimination or two addr elimination.  If this is
341    // the result of two address elimination, then the vreg is one of the
342    // def-and-use register operand.
343    if (mi->isRegTiedToUseOperand(MOIdx)) {
344      // If this is a two-address definition, then we have already processed
345      // the live range.  The only problem is that we didn't realize there
346      // are actually two values in the live interval.  Because of this we
347      // need to take the LiveRegion that defines this register and split it
348      // into two values.
349      assert(interval.containsOneValue());
350      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
351      SlotIndex RedefIndex = MIIdx.getDefIndex();
352      if (MO.isEarlyClobber())
353        RedefIndex = MIIdx.getUseIndex();
354
355      const LiveRange *OldLR =
356        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
357      VNInfo *OldValNo = OldLR->valno;
358
359      // Delete the initial value, which should be short and continuous,
360      // because the 2-addr copy must be in the same MBB as the redef.
361      interval.removeRange(DefIndex, RedefIndex);
362
363      // Two-address vregs should always only be redefined once.  This means
364      // that at this point, there should be exactly one value number in it.
365      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
366
367      // The new value number (#1) is defined by the instruction we claimed
368      // defined value #0.
369      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
370                                            false, // update at *
371                                            VNInfoAllocator);
372      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
373
374      // Value#0 is now defined by the 2-addr instruction.
375      OldValNo->def  = RedefIndex;
376      OldValNo->setCopy(0);
377
378      // Add the new live interval which replaces the range for the input copy.
379      LiveRange LR(DefIndex, RedefIndex, ValNo);
380      DEBUG(errs() << " replace range with " << LR);
381      interval.addRange(LR);
382      ValNo->addKill(RedefIndex);
383
384      // If this redefinition is dead, we need to add a dummy unit live
385      // range covering the def slot.
386      if (MO.isDead())
387        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
388                                    OldValNo));
389
390      DEBUG({
391          errs() << " RESULT: ";
392          interval.print(errs(), tri_);
393        });
394    } else {
395      // Otherwise, this must be because of phi elimination.  If this is the
396      // first redefinition of the vreg that we have seen, go back and change
397      // the live range in the PHI block to be a different value number.
398      if (interval.containsOneValue()) {
399        // Remove the old range that we now know has an incorrect number.
400        VNInfo *VNI = interval.getValNumInfo(0);
401        MachineInstr *Killer = vi.Kills[0];
402        SlotIndex Start = getMBBStartIdx(Killer->getParent());
403        SlotIndex End = getInstructionIndex(Killer).getDefIndex();
404        DEBUG({
405            errs() << " Removing [" << Start << "," << End << "] from: ";
406            interval.print(errs(), tri_);
407            errs() << "\n";
408          });
409        interval.removeRange(Start, End);
410        assert(interval.ranges.size() == 1 &&
411               "Newly discovered PHI interval has >1 ranges.");
412        MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
413        VNI->addKill(indexes_->getTerminatorGap(killMBB));
414        VNI->setHasPHIKill(true);
415        DEBUG({
416            errs() << " RESULT: ";
417            interval.print(errs(), tri_);
418          });
419
420        // Replace the interval with one of a NEW value number.  Note that this
421        // value number isn't actually defined by an instruction, weird huh? :)
422        LiveRange LR(Start, End,
423                     interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
424                       0, false, VNInfoAllocator));
425        LR.valno->setIsPHIDef(true);
426        DEBUG(errs() << " replace range with " << LR);
427        interval.addRange(LR);
428        LR.valno->addKill(End);
429        DEBUG({
430            errs() << " RESULT: ";
431            interval.print(errs(), tri_);
432          });
433      }
434
435      // In the case of PHI elimination, each variable definition is only
436      // live until the end of the block.  We've already taken care of the
437      // rest of the live range.
438      SlotIndex defIndex = MIIdx.getDefIndex();
439      if (MO.isEarlyClobber())
440        defIndex = MIIdx.getUseIndex();
441
442      VNInfo *ValNo;
443      MachineInstr *CopyMI = NULL;
444      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
445      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
446          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
447          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
448          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
449        CopyMI = mi;
450      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
451
452      SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
453      LiveRange LR(defIndex, killIndex, ValNo);
454      interval.addRange(LR);
455      ValNo->addKill(indexes_->getTerminatorGap(mbb));
456      ValNo->setHasPHIKill(true);
457      DEBUG(errs() << " +" << LR);
458    }
459  }
460
461  DEBUG(errs() << '\n');
462}
463
464void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
465                                              MachineBasicBlock::iterator mi,
466                                              SlotIndex MIIdx,
467                                              MachineOperand& MO,
468                                              LiveInterval &interval,
469                                              MachineInstr *CopyMI) {
470  // A physical register cannot be live across basic block, so its
471  // lifetime must end somewhere in its defining basic block.
472  DEBUG({
473      errs() << "\t\tregister: ";
474      printRegName(interval.reg, tri_);
475    });
476
477  SlotIndex baseIndex = MIIdx;
478  SlotIndex start = baseIndex.getDefIndex();
479  // Earlyclobbers move back one.
480  if (MO.isEarlyClobber())
481    start = MIIdx.getUseIndex();
482  SlotIndex end = start;
483
484  // If it is not used after definition, it is considered dead at
485  // the instruction defining it. Hence its interval is:
486  // [defSlot(def), defSlot(def)+1)
487  // For earlyclobbers, the defSlot was pushed back one; the extra
488  // advance below compensates.
489  if (MO.isDead()) {
490    DEBUG(errs() << " dead");
491    end = start.getStoreIndex();
492    goto exit;
493  }
494
495  // If it is not dead on definition, it must be killed by a
496  // subsequent instruction. Hence its interval is:
497  // [defSlot(def), useSlot(kill)+1)
498  baseIndex = baseIndex.getNextIndex();
499  while (++mi != MBB->end()) {
500
501    if (getInstructionFromIndex(baseIndex) == 0)
502      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
503
504    if (mi->killsRegister(interval.reg, tri_)) {
505      DEBUG(errs() << " killed");
506      end = baseIndex.getDefIndex();
507      goto exit;
508    } else {
509      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
510      if (DefIdx != -1) {
511        if (mi->isRegTiedToUseOperand(DefIdx)) {
512          // Two-address instruction.
513          end = baseIndex.getDefIndex();
514        } else {
515          // Another instruction redefines the register before it is ever read.
516          // Then the register is essentially dead at the instruction that defines
517          // it. Hence its interval is:
518          // [defSlot(def), defSlot(def)+1)
519          DEBUG(errs() << " dead");
520          end = start.getStoreIndex();
521        }
522        goto exit;
523      }
524    }
525
526    baseIndex = baseIndex.getNextIndex();
527  }
528
529  // The only case we should have a dead physreg here without a killing or
530  // instruction where we know it's dead is if it is live-in to the function
531  // and never used. Another possible case is the implicit use of the
532  // physical register has been deleted by two-address pass.
533  end = start.getStoreIndex();
534
535exit:
536  assert(start < end && "did not find end of interval?");
537
538  // Already exists? Extend old live interval.
539  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
540  bool Extend = OldLR != interval.end();
541  VNInfo *ValNo = Extend
542    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
543  if (MO.isEarlyClobber() && Extend)
544    ValNo->setHasRedefByEC(true);
545  LiveRange LR(start, end, ValNo);
546  interval.addRange(LR);
547  LR.valno->addKill(end);
548  DEBUG(errs() << " +" << LR << '\n');
549}
550
551void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
552                                      MachineBasicBlock::iterator MI,
553                                      SlotIndex MIIdx,
554                                      MachineOperand& MO,
555                                      unsigned MOIdx) {
556  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
557    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
558                             getOrCreateInterval(MO.getReg()));
559  else if (allocatableRegs_[MO.getReg()]) {
560    MachineInstr *CopyMI = NULL;
561    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
562    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
563        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
564        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
565        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
566      CopyMI = MI;
567    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
568                              getOrCreateInterval(MO.getReg()), CopyMI);
569    // Def of a register also defines its sub-registers.
570    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
571      // If MI also modifies the sub-register explicitly, avoid processing it
572      // more than once. Do not pass in TRI here so it checks for exact match.
573      if (!MI->modifiesRegister(*AS))
574        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
575                                  getOrCreateInterval(*AS), 0);
576  }
577}
578
579void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
580                                         SlotIndex MIIdx,
581                                         LiveInterval &interval, bool isAlias) {
582  DEBUG({
583      errs() << "\t\tlivein register: ";
584      printRegName(interval.reg, tri_);
585    });
586
587  // Look for kills, if it reaches a def before it's killed, then it shouldn't
588  // be considered a livein.
589  MachineBasicBlock::iterator mi = MBB->begin();
590  SlotIndex baseIndex = MIIdx;
591  SlotIndex start = baseIndex;
592  if (getInstructionFromIndex(baseIndex) == 0)
593    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
594
595  SlotIndex end = baseIndex;
596  bool SeenDefUse = false;
597
598  while (mi != MBB->end()) {
599    if (mi->killsRegister(interval.reg, tri_)) {
600      DEBUG(errs() << " killed");
601      end = baseIndex.getDefIndex();
602      SeenDefUse = true;
603      break;
604    } else if (mi->modifiesRegister(interval.reg, tri_)) {
605      // Another instruction redefines the register before it is ever read.
606      // Then the register is essentially dead at the instruction that defines
607      // it. Hence its interval is:
608      // [defSlot(def), defSlot(def)+1)
609      DEBUG(errs() << " dead");
610      end = start.getStoreIndex();
611      SeenDefUse = true;
612      break;
613    }
614
615    ++mi;
616    if (mi != MBB->end()) {
617      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
618    }
619  }
620
621  // Live-in register might not be used at all.
622  if (!SeenDefUse) {
623    if (isAlias) {
624      DEBUG(errs() << " dead");
625      end = MIIdx.getStoreIndex();
626    } else {
627      DEBUG(errs() << " live through");
628      end = baseIndex;
629    }
630  }
631
632  VNInfo *vni =
633    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
634                          0, false, VNInfoAllocator);
635  vni->setIsPHIDef(true);
636  LiveRange LR(start, end, vni);
637
638  interval.addRange(LR);
639  LR.valno->addKill(end);
640  DEBUG(errs() << " +" << LR << '\n');
641}
642
643/// computeIntervals - computes the live intervals for virtual
644/// registers. for some ordering of the machine instructions [1,N] a
645/// live interval is an interval [i, j) where 1 <= i <= j < N for
646/// which a variable is live
647void LiveIntervals::computeIntervals() {
648  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
649               << "********** Function: "
650               << ((Value*)mf_->getFunction())->getName() << '\n');
651
652  SmallVector<unsigned, 8> UndefUses;
653  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
654       MBBI != E; ++MBBI) {
655    MachineBasicBlock *MBB = MBBI;
656    // Track the index of the current machine instr.
657    SlotIndex MIIndex = getMBBStartIdx(MBB);
658    DEBUG(errs() << MBB->getName() << ":\n");
659
660    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
661
662    // Create intervals for live-ins to this BB first.
663    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
664           LE = MBB->livein_end(); LI != LE; ++LI) {
665      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
666      // Multiple live-ins can alias the same register.
667      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
668        if (!hasInterval(*AS))
669          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
670                               true);
671    }
672
673    // Skip over empty initial indices.
674    if (getInstructionFromIndex(MIIndex) == 0)
675      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
676
677    for (; MI != miEnd; ++MI) {
678      DEBUG(errs() << MIIndex << "\t" << *MI);
679
680      // Handle defs.
681      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
682        MachineOperand &MO = MI->getOperand(i);
683        if (!MO.isReg() || !MO.getReg())
684          continue;
685
686        // handle register defs - build intervals
687        if (MO.isDef())
688          handleRegisterDef(MBB, MI, MIIndex, MO, i);
689        else if (MO.isUndef())
690          UndefUses.push_back(MO.getReg());
691      }
692
693      // Move to the next instr slot.
694      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
695    }
696  }
697
698  // Create empty intervals for registers defined by implicit_def's (except
699  // for those implicit_def that define values which are liveout of their
700  // blocks.
701  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
702    unsigned UndefReg = UndefUses[i];
703    (void)getOrCreateInterval(UndefReg);
704  }
705}
706
707LiveInterval* LiveIntervals::createInterval(unsigned reg) {
708  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
709  return new LiveInterval(reg, Weight);
710}
711
712/// dupInterval - Duplicate a live interval. The caller is responsible for
713/// managing the allocated memory.
714LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
715  LiveInterval *NewLI = createInterval(li->reg);
716  NewLI->Copy(*li, mri_, getVNInfoAllocator());
717  return NewLI;
718}
719
720/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
721/// copy field and returns the source register that defines it.
722unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
723  if (!VNI->getCopy())
724    return 0;
725
726  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
727    // If it's extracting out of a physical register, return the sub-register.
728    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
729    if (TargetRegisterInfo::isPhysicalRegister(Reg))
730      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
731    return Reg;
732  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
733             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
734    return VNI->getCopy()->getOperand(2).getReg();
735
736  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
737  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
738    return SrcReg;
739  llvm_unreachable("Unrecognized copy instruction!");
740  return 0;
741}
742
743//===----------------------------------------------------------------------===//
744// Register allocator hooks.
745//
746
747/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
748/// allow one) virtual register operand, then its uses are implicitly using
749/// the register. Returns the virtual register.
750unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
751                                            MachineInstr *MI) const {
752  unsigned RegOp = 0;
753  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
754    MachineOperand &MO = MI->getOperand(i);
755    if (!MO.isReg() || !MO.isUse())
756      continue;
757    unsigned Reg = MO.getReg();
758    if (Reg == 0 || Reg == li.reg)
759      continue;
760
761    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
762        !allocatableRegs_[Reg])
763      continue;
764    // FIXME: For now, only remat MI with at most one register operand.
765    assert(!RegOp &&
766           "Can't rematerialize instruction with multiple register operand!");
767    RegOp = MO.getReg();
768#ifndef NDEBUG
769    break;
770#endif
771  }
772  return RegOp;
773}
774
775/// isValNoAvailableAt - Return true if the val# of the specified interval
776/// which reaches the given instruction also reaches the specified use index.
777bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
778                                       SlotIndex UseIdx) const {
779  SlotIndex Index = getInstructionIndex(MI);
780  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
781  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
782  return UI != li.end() && UI->valno == ValNo;
783}
784
785/// isReMaterializable - Returns true if the definition MI of the specified
786/// val# of the specified interval is re-materializable.
787bool LiveIntervals::isReMaterializable(const LiveInterval &li,
788                                       const VNInfo *ValNo, MachineInstr *MI,
789                                       SmallVectorImpl<LiveInterval*> &SpillIs,
790                                       bool &isLoad) {
791  if (DisableReMat)
792    return false;
793
794  if (!tii_->isTriviallyReMaterializable(MI, aa_))
795    return false;
796
797  // Target-specific code can mark an instruction as being rematerializable
798  // if it has one virtual reg use, though it had better be something like
799  // a PIC base register which is likely to be live everywhere.
800  unsigned ImpUse = getReMatImplicitUse(li, MI);
801  if (ImpUse) {
802    const LiveInterval &ImpLi = getInterval(ImpUse);
803    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
804           re = mri_->use_end(); ri != re; ++ri) {
805      MachineInstr *UseMI = &*ri;
806      SlotIndex UseIdx = getInstructionIndex(UseMI);
807      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
808        continue;
809      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
810        return false;
811    }
812
813    // If a register operand of the re-materialized instruction is going to
814    // be spilled next, then it's not legal to re-materialize this instruction.
815    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
816      if (ImpUse == SpillIs[i]->reg)
817        return false;
818  }
819  return true;
820}
821
822/// isReMaterializable - Returns true if the definition MI of the specified
823/// val# of the specified interval is re-materializable.
824bool LiveIntervals::isReMaterializable(const LiveInterval &li,
825                                       const VNInfo *ValNo, MachineInstr *MI) {
826  SmallVector<LiveInterval*, 4> Dummy1;
827  bool Dummy2;
828  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
829}
830
831/// isReMaterializable - Returns true if every definition of MI of every
832/// val# of the specified interval is re-materializable.
833bool LiveIntervals::isReMaterializable(const LiveInterval &li,
834                                       SmallVectorImpl<LiveInterval*> &SpillIs,
835                                       bool &isLoad) {
836  isLoad = false;
837  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
838       i != e; ++i) {
839    const VNInfo *VNI = *i;
840    if (VNI->isUnused())
841      continue; // Dead val#.
842    // Is the def for the val# rematerializable?
843    if (!VNI->isDefAccurate())
844      return false;
845    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
846    bool DefIsLoad = false;
847    if (!ReMatDefMI ||
848        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
849      return false;
850    isLoad |= DefIsLoad;
851  }
852  return true;
853}
854
855/// FilterFoldedOps - Filter out two-address use operands. Return
856/// true if it finds any issue with the operands that ought to prevent
857/// folding.
858static bool FilterFoldedOps(MachineInstr *MI,
859                            SmallVector<unsigned, 2> &Ops,
860                            unsigned &MRInfo,
861                            SmallVector<unsigned, 2> &FoldOps) {
862  MRInfo = 0;
863  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
864    unsigned OpIdx = Ops[i];
865    MachineOperand &MO = MI->getOperand(OpIdx);
866    // FIXME: fold subreg use.
867    if (MO.getSubReg())
868      return true;
869    if (MO.isDef())
870      MRInfo |= (unsigned)VirtRegMap::isMod;
871    else {
872      // Filter out two-address use operand(s).
873      if (MI->isRegTiedToDefOperand(OpIdx)) {
874        MRInfo = VirtRegMap::isModRef;
875        continue;
876      }
877      MRInfo |= (unsigned)VirtRegMap::isRef;
878    }
879    FoldOps.push_back(OpIdx);
880  }
881  return false;
882}
883
884
885/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
886/// slot / to reg or any rematerialized load into ith operand of specified
887/// MI. If it is successul, MI is updated with the newly created MI and
888/// returns true.
889bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
890                                         VirtRegMap &vrm, MachineInstr *DefMI,
891                                         SlotIndex InstrIdx,
892                                         SmallVector<unsigned, 2> &Ops,
893                                         bool isSS, int Slot, unsigned Reg) {
894  // If it is an implicit def instruction, just delete it.
895  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
896    RemoveMachineInstrFromMaps(MI);
897    vrm.RemoveMachineInstrFromMaps(MI);
898    MI->eraseFromParent();
899    ++numFolds;
900    return true;
901  }
902
903  // Filter the list of operand indexes that are to be folded. Abort if
904  // any operand will prevent folding.
905  unsigned MRInfo = 0;
906  SmallVector<unsigned, 2> FoldOps;
907  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
908    return false;
909
910  // The only time it's safe to fold into a two address instruction is when
911  // it's folding reload and spill from / into a spill stack slot.
912  if (DefMI && (MRInfo & VirtRegMap::isMod))
913    return false;
914
915  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
916                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
917  if (fmi) {
918    // Remember this instruction uses the spill slot.
919    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
920
921    // Attempt to fold the memory reference into the instruction. If
922    // we can do this, we don't need to insert spill code.
923    MachineBasicBlock &MBB = *MI->getParent();
924    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
925      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
926    vrm.transferSpillPts(MI, fmi);
927    vrm.transferRestorePts(MI, fmi);
928    vrm.transferEmergencySpills(MI, fmi);
929    ReplaceMachineInstrInMaps(MI, fmi);
930    MI = MBB.insert(MBB.erase(MI), fmi);
931    ++numFolds;
932    return true;
933  }
934  return false;
935}
936
937/// canFoldMemoryOperand - Returns true if the specified load / store
938/// folding is possible.
939bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
940                                         SmallVector<unsigned, 2> &Ops,
941                                         bool ReMat) const {
942  // Filter the list of operand indexes that are to be folded. Abort if
943  // any operand will prevent folding.
944  unsigned MRInfo = 0;
945  SmallVector<unsigned, 2> FoldOps;
946  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
947    return false;
948
949  // It's only legal to remat for a use, not a def.
950  if (ReMat && (MRInfo & VirtRegMap::isMod))
951    return false;
952
953  return tii_->canFoldMemoryOperand(MI, FoldOps);
954}
955
956bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
957  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
958
959  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
960
961  if (mbb == 0)
962    return false;
963
964  for (++itr; itr != li.ranges.end(); ++itr) {
965    MachineBasicBlock *mbb2 =
966      indexes_->getMBBCoveringRange(itr->start, itr->end);
967
968    if (mbb2 != mbb)
969      return false;
970  }
971
972  return true;
973}
974
975/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
976/// interval on to-be re-materialized operands of MI) with new register.
977void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
978                                       MachineInstr *MI, unsigned NewVReg,
979                                       VirtRegMap &vrm) {
980  // There is an implicit use. That means one of the other operand is
981  // being remat'ed and the remat'ed instruction has li.reg as an
982  // use operand. Make sure we rewrite that as well.
983  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
984    MachineOperand &MO = MI->getOperand(i);
985    if (!MO.isReg())
986      continue;
987    unsigned Reg = MO.getReg();
988    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
989      continue;
990    if (!vrm.isReMaterialized(Reg))
991      continue;
992    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
993    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
994    if (UseMO)
995      UseMO->setReg(NewVReg);
996  }
997}
998
999/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1000/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1001bool LiveIntervals::
1002rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1003                 bool TrySplit, SlotIndex index, SlotIndex end,
1004                 MachineInstr *MI,
1005                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1006                 unsigned Slot, int LdSlot,
1007                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1008                 VirtRegMap &vrm,
1009                 const TargetRegisterClass* rc,
1010                 SmallVector<int, 4> &ReMatIds,
1011                 const MachineLoopInfo *loopInfo,
1012                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1013                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1014                 std::vector<LiveInterval*> &NewLIs) {
1015  bool CanFold = false;
1016 RestartInstruction:
1017  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1018    MachineOperand& mop = MI->getOperand(i);
1019    if (!mop.isReg())
1020      continue;
1021    unsigned Reg = mop.getReg();
1022    unsigned RegI = Reg;
1023    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1024      continue;
1025    if (Reg != li.reg)
1026      continue;
1027
1028    bool TryFold = !DefIsReMat;
1029    bool FoldSS = true; // Default behavior unless it's a remat.
1030    int FoldSlot = Slot;
1031    if (DefIsReMat) {
1032      // If this is the rematerializable definition MI itself and
1033      // all of its uses are rematerialized, simply delete it.
1034      if (MI == ReMatOrigDefMI && CanDelete) {
1035        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1036                     << MI << '\n');
1037        RemoveMachineInstrFromMaps(MI);
1038        vrm.RemoveMachineInstrFromMaps(MI);
1039        MI->eraseFromParent();
1040        break;
1041      }
1042
1043      // If def for this use can't be rematerialized, then try folding.
1044      // If def is rematerializable and it's a load, also try folding.
1045      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1046      if (isLoad) {
1047        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1048        FoldSS = isLoadSS;
1049        FoldSlot = LdSlot;
1050      }
1051    }
1052
1053    // Scan all of the operands of this instruction rewriting operands
1054    // to use NewVReg instead of li.reg as appropriate.  We do this for
1055    // two reasons:
1056    //
1057    //   1. If the instr reads the same spilled vreg multiple times, we
1058    //      want to reuse the NewVReg.
1059    //   2. If the instr is a two-addr instruction, we are required to
1060    //      keep the src/dst regs pinned.
1061    //
1062    // Keep track of whether we replace a use and/or def so that we can
1063    // create the spill interval with the appropriate range.
1064
1065    HasUse = mop.isUse();
1066    HasDef = mop.isDef();
1067    SmallVector<unsigned, 2> Ops;
1068    Ops.push_back(i);
1069    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1070      const MachineOperand &MOj = MI->getOperand(j);
1071      if (!MOj.isReg())
1072        continue;
1073      unsigned RegJ = MOj.getReg();
1074      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1075        continue;
1076      if (RegJ == RegI) {
1077        Ops.push_back(j);
1078        if (!MOj.isUndef()) {
1079          HasUse |= MOj.isUse();
1080          HasDef |= MOj.isDef();
1081        }
1082      }
1083    }
1084
1085    // Create a new virtual register for the spill interval.
1086    // Create the new register now so we can map the fold instruction
1087    // to the new register so when it is unfolded we get the correct
1088    // answer.
1089    bool CreatedNewVReg = false;
1090    if (NewVReg == 0) {
1091      NewVReg = mri_->createVirtualRegister(rc);
1092      vrm.grow();
1093      CreatedNewVReg = true;
1094
1095      // The new virtual register should get the same allocation hints as the
1096      // old one.
1097      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1098      if (Hint.first || Hint.second)
1099        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1100    }
1101
1102    if (!TryFold)
1103      CanFold = false;
1104    else {
1105      // Do not fold load / store here if we are splitting. We'll find an
1106      // optimal point to insert a load / store later.
1107      if (!TrySplit) {
1108        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1109                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1110          // Folding the load/store can completely change the instruction in
1111          // unpredictable ways, rescan it from the beginning.
1112
1113          if (FoldSS) {
1114            // We need to give the new vreg the same stack slot as the
1115            // spilled interval.
1116            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1117          }
1118
1119          HasUse = false;
1120          HasDef = false;
1121          CanFold = false;
1122          if (isNotInMIMap(MI))
1123            break;
1124          goto RestartInstruction;
1125        }
1126      } else {
1127        // We'll try to fold it later if it's profitable.
1128        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1129      }
1130    }
1131
1132    mop.setReg(NewVReg);
1133    if (mop.isImplicit())
1134      rewriteImplicitOps(li, MI, NewVReg, vrm);
1135
1136    // Reuse NewVReg for other reads.
1137    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1138      MachineOperand &mopj = MI->getOperand(Ops[j]);
1139      mopj.setReg(NewVReg);
1140      if (mopj.isImplicit())
1141        rewriteImplicitOps(li, MI, NewVReg, vrm);
1142    }
1143
1144    if (CreatedNewVReg) {
1145      if (DefIsReMat) {
1146        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1147        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1148          // Each valnum may have its own remat id.
1149          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1150        } else {
1151          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1152        }
1153        if (!CanDelete || (HasUse && HasDef)) {
1154          // If this is a two-addr instruction then its use operands are
1155          // rematerializable but its def is not. It should be assigned a
1156          // stack slot.
1157          vrm.assignVirt2StackSlot(NewVReg, Slot);
1158        }
1159      } else {
1160        vrm.assignVirt2StackSlot(NewVReg, Slot);
1161      }
1162    } else if (HasUse && HasDef &&
1163               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1164      // If this interval hasn't been assigned a stack slot (because earlier
1165      // def is a deleted remat def), do it now.
1166      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1167      vrm.assignVirt2StackSlot(NewVReg, Slot);
1168    }
1169
1170    // Re-matting an instruction with virtual register use. Add the
1171    // register as an implicit use on the use MI.
1172    if (DefIsReMat && ImpUse)
1173      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1174
1175    // Create a new register interval for this spill / remat.
1176    LiveInterval &nI = getOrCreateInterval(NewVReg);
1177    if (CreatedNewVReg) {
1178      NewLIs.push_back(&nI);
1179      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1180      if (TrySplit)
1181        vrm.setIsSplitFromReg(NewVReg, li.reg);
1182    }
1183
1184    if (HasUse) {
1185      if (CreatedNewVReg) {
1186        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1187                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1188        DEBUG(errs() << " +" << LR);
1189        nI.addRange(LR);
1190      } else {
1191        // Extend the split live interval to this def / use.
1192        SlotIndex End = index.getDefIndex();
1193        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1194                     nI.getValNumInfo(nI.getNumValNums()-1));
1195        DEBUG(errs() << " +" << LR);
1196        nI.addRange(LR);
1197      }
1198    }
1199    if (HasDef) {
1200      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1201                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1202      DEBUG(errs() << " +" << LR);
1203      nI.addRange(LR);
1204    }
1205
1206    DEBUG({
1207        errs() << "\t\t\t\tAdded new interval: ";
1208        nI.print(errs(), tri_);
1209        errs() << '\n';
1210      });
1211  }
1212  return CanFold;
1213}
1214bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1215                                   const VNInfo *VNI,
1216                                   MachineBasicBlock *MBB,
1217                                   SlotIndex Idx) const {
1218  SlotIndex End = getMBBEndIdx(MBB);
1219  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1220    if (VNI->kills[j].isPHI())
1221      continue;
1222
1223    SlotIndex KillIdx = VNI->kills[j];
1224    if (KillIdx > Idx && KillIdx < End)
1225      return true;
1226  }
1227  return false;
1228}
1229
1230/// RewriteInfo - Keep track of machine instrs that will be rewritten
1231/// during spilling.
1232namespace {
1233  struct RewriteInfo {
1234    SlotIndex Index;
1235    MachineInstr *MI;
1236    bool HasUse;
1237    bool HasDef;
1238    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1239      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1240  };
1241
1242  struct RewriteInfoCompare {
1243    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1244      return LHS.Index < RHS.Index;
1245    }
1246  };
1247}
1248
1249void LiveIntervals::
1250rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1251                    LiveInterval::Ranges::const_iterator &I,
1252                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1253                    unsigned Slot, int LdSlot,
1254                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1255                    VirtRegMap &vrm,
1256                    const TargetRegisterClass* rc,
1257                    SmallVector<int, 4> &ReMatIds,
1258                    const MachineLoopInfo *loopInfo,
1259                    BitVector &SpillMBBs,
1260                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1261                    BitVector &RestoreMBBs,
1262                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1263                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1264                    std::vector<LiveInterval*> &NewLIs) {
1265  bool AllCanFold = true;
1266  unsigned NewVReg = 0;
1267  SlotIndex start = I->start.getBaseIndex();
1268  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1269
1270  // First collect all the def / use in this live range that will be rewritten.
1271  // Make sure they are sorted according to instruction index.
1272  std::vector<RewriteInfo> RewriteMIs;
1273  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1274         re = mri_->reg_end(); ri != re; ) {
1275    MachineInstr *MI = &*ri;
1276    MachineOperand &O = ri.getOperand();
1277    ++ri;
1278    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1279    SlotIndex index = getInstructionIndex(MI);
1280    if (index < start || index >= end)
1281      continue;
1282
1283    if (O.isUndef())
1284      // Must be defined by an implicit def. It should not be spilled. Note,
1285      // this is for correctness reason. e.g.
1286      // 8   %reg1024<def> = IMPLICIT_DEF
1287      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1288      // The live range [12, 14) are not part of the r1024 live interval since
1289      // it's defined by an implicit def. It will not conflicts with live
1290      // interval of r1025. Now suppose both registers are spilled, you can
1291      // easily see a situation where both registers are reloaded before
1292      // the INSERT_SUBREG and both target registers that would overlap.
1293      continue;
1294    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1295  }
1296  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1297
1298  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1299  // Now rewrite the defs and uses.
1300  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1301    RewriteInfo &rwi = RewriteMIs[i];
1302    ++i;
1303    SlotIndex index = rwi.Index;
1304    bool MIHasUse = rwi.HasUse;
1305    bool MIHasDef = rwi.HasDef;
1306    MachineInstr *MI = rwi.MI;
1307    // If MI def and/or use the same register multiple times, then there
1308    // are multiple entries.
1309    unsigned NumUses = MIHasUse;
1310    while (i != e && RewriteMIs[i].MI == MI) {
1311      assert(RewriteMIs[i].Index == index);
1312      bool isUse = RewriteMIs[i].HasUse;
1313      if (isUse) ++NumUses;
1314      MIHasUse |= isUse;
1315      MIHasDef |= RewriteMIs[i].HasDef;
1316      ++i;
1317    }
1318    MachineBasicBlock *MBB = MI->getParent();
1319
1320    if (ImpUse && MI != ReMatDefMI) {
1321      // Re-matting an instruction with virtual register use. Update the
1322      // register interval's spill weight to HUGE_VALF to prevent it from
1323      // being spilled.
1324      LiveInterval &ImpLi = getInterval(ImpUse);
1325      ImpLi.weight = HUGE_VALF;
1326    }
1327
1328    unsigned MBBId = MBB->getNumber();
1329    unsigned ThisVReg = 0;
1330    if (TrySplit) {
1331      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1332      if (NVI != MBBVRegsMap.end()) {
1333        ThisVReg = NVI->second;
1334        // One common case:
1335        // x = use
1336        // ...
1337        // ...
1338        // def = ...
1339        //     = use
1340        // It's better to start a new interval to avoid artifically
1341        // extend the new interval.
1342        if (MIHasDef && !MIHasUse) {
1343          MBBVRegsMap.erase(MBB->getNumber());
1344          ThisVReg = 0;
1345        }
1346      }
1347    }
1348
1349    bool IsNew = ThisVReg == 0;
1350    if (IsNew) {
1351      // This ends the previous live interval. If all of its def / use
1352      // can be folded, give it a low spill weight.
1353      if (NewVReg && TrySplit && AllCanFold) {
1354        LiveInterval &nI = getOrCreateInterval(NewVReg);
1355        nI.weight /= 10.0F;
1356      }
1357      AllCanFold = true;
1358    }
1359    NewVReg = ThisVReg;
1360
1361    bool HasDef = false;
1362    bool HasUse = false;
1363    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1364                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1365                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1366                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1367                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1368    if (!HasDef && !HasUse)
1369      continue;
1370
1371    AllCanFold &= CanFold;
1372
1373    // Update weight of spill interval.
1374    LiveInterval &nI = getOrCreateInterval(NewVReg);
1375    if (!TrySplit) {
1376      // The spill weight is now infinity as it cannot be spilled again.
1377      nI.weight = HUGE_VALF;
1378      continue;
1379    }
1380
1381    // Keep track of the last def and first use in each MBB.
1382    if (HasDef) {
1383      if (MI != ReMatOrigDefMI || !CanDelete) {
1384        bool HasKill = false;
1385        if (!HasUse)
1386          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1387        else {
1388          // If this is a two-address code, then this index starts a new VNInfo.
1389          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1390          if (VNI)
1391            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1392        }
1393        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1394          SpillIdxes.find(MBBId);
1395        if (!HasKill) {
1396          if (SII == SpillIdxes.end()) {
1397            std::vector<SRInfo> S;
1398            S.push_back(SRInfo(index, NewVReg, true));
1399            SpillIdxes.insert(std::make_pair(MBBId, S));
1400          } else if (SII->second.back().vreg != NewVReg) {
1401            SII->second.push_back(SRInfo(index, NewVReg, true));
1402          } else if (index > SII->second.back().index) {
1403            // If there is an earlier def and this is a two-address
1404            // instruction, then it's not possible to fold the store (which
1405            // would also fold the load).
1406            SRInfo &Info = SII->second.back();
1407            Info.index = index;
1408            Info.canFold = !HasUse;
1409          }
1410          SpillMBBs.set(MBBId);
1411        } else if (SII != SpillIdxes.end() &&
1412                   SII->second.back().vreg == NewVReg &&
1413                   index > SII->second.back().index) {
1414          // There is an earlier def that's not killed (must be two-address).
1415          // The spill is no longer needed.
1416          SII->second.pop_back();
1417          if (SII->second.empty()) {
1418            SpillIdxes.erase(MBBId);
1419            SpillMBBs.reset(MBBId);
1420          }
1421        }
1422      }
1423    }
1424
1425    if (HasUse) {
1426      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1427        SpillIdxes.find(MBBId);
1428      if (SII != SpillIdxes.end() &&
1429          SII->second.back().vreg == NewVReg &&
1430          index > SII->second.back().index)
1431        // Use(s) following the last def, it's not safe to fold the spill.
1432        SII->second.back().canFold = false;
1433      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1434        RestoreIdxes.find(MBBId);
1435      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1436        // If we are splitting live intervals, only fold if it's the first
1437        // use and there isn't another use later in the MBB.
1438        RII->second.back().canFold = false;
1439      else if (IsNew) {
1440        // Only need a reload if there isn't an earlier def / use.
1441        if (RII == RestoreIdxes.end()) {
1442          std::vector<SRInfo> Infos;
1443          Infos.push_back(SRInfo(index, NewVReg, true));
1444          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1445        } else {
1446          RII->second.push_back(SRInfo(index, NewVReg, true));
1447        }
1448        RestoreMBBs.set(MBBId);
1449      }
1450    }
1451
1452    // Update spill weight.
1453    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1454    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1455  }
1456
1457  if (NewVReg && TrySplit && AllCanFold) {
1458    // If all of its def / use can be folded, give it a low spill weight.
1459    LiveInterval &nI = getOrCreateInterval(NewVReg);
1460    nI.weight /= 10.0F;
1461  }
1462}
1463
1464bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1465                        unsigned vr, BitVector &RestoreMBBs,
1466                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1467  if (!RestoreMBBs[Id])
1468    return false;
1469  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1470  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1471    if (Restores[i].index == index &&
1472        Restores[i].vreg == vr &&
1473        Restores[i].canFold)
1474      return true;
1475  return false;
1476}
1477
1478void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1479                        unsigned vr, BitVector &RestoreMBBs,
1480                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1481  if (!RestoreMBBs[Id])
1482    return;
1483  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1484  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1485    if (Restores[i].index == index && Restores[i].vreg)
1486      Restores[i].index = SlotIndex();
1487}
1488
1489/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1490/// spilled and create empty intervals for their uses.
1491void
1492LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1493                                    const TargetRegisterClass* rc,
1494                                    std::vector<LiveInterval*> &NewLIs) {
1495  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1496         re = mri_->reg_end(); ri != re; ) {
1497    MachineOperand &O = ri.getOperand();
1498    MachineInstr *MI = &*ri;
1499    ++ri;
1500    if (O.isDef()) {
1501      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1502             "Register def was not rewritten?");
1503      RemoveMachineInstrFromMaps(MI);
1504      vrm.RemoveMachineInstrFromMaps(MI);
1505      MI->eraseFromParent();
1506    } else {
1507      // This must be an use of an implicit_def so it's not part of the live
1508      // interval. Create a new empty live interval for it.
1509      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1510      unsigned NewVReg = mri_->createVirtualRegister(rc);
1511      vrm.grow();
1512      vrm.setIsImplicitlyDefined(NewVReg);
1513      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1514      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1515        MachineOperand &MO = MI->getOperand(i);
1516        if (MO.isReg() && MO.getReg() == li.reg) {
1517          MO.setReg(NewVReg);
1518          MO.setIsUndef();
1519        }
1520      }
1521    }
1522  }
1523}
1524
1525std::vector<LiveInterval*> LiveIntervals::
1526addIntervalsForSpillsFast(const LiveInterval &li,
1527                          const MachineLoopInfo *loopInfo,
1528                          VirtRegMap &vrm) {
1529  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1530
1531  std::vector<LiveInterval*> added;
1532
1533  assert(li.weight != HUGE_VALF &&
1534         "attempt to spill already spilled interval!");
1535
1536  DEBUG({
1537      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1538      li.dump();
1539      errs() << '\n';
1540    });
1541
1542  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1543
1544  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1545  while (RI != mri_->reg_end()) {
1546    MachineInstr* MI = &*RI;
1547
1548    SmallVector<unsigned, 2> Indices;
1549    bool HasUse = false;
1550    bool HasDef = false;
1551
1552    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1553      MachineOperand& mop = MI->getOperand(i);
1554      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1555
1556      HasUse |= MI->getOperand(i).isUse();
1557      HasDef |= MI->getOperand(i).isDef();
1558
1559      Indices.push_back(i);
1560    }
1561
1562    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1563                              Indices, true, slot, li.reg)) {
1564      unsigned NewVReg = mri_->createVirtualRegister(rc);
1565      vrm.grow();
1566      vrm.assignVirt2StackSlot(NewVReg, slot);
1567
1568      // create a new register for this spill
1569      LiveInterval &nI = getOrCreateInterval(NewVReg);
1570
1571      // the spill weight is now infinity as it
1572      // cannot be spilled again
1573      nI.weight = HUGE_VALF;
1574
1575      // Rewrite register operands to use the new vreg.
1576      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1577           E = Indices.end(); I != E; ++I) {
1578        MI->getOperand(*I).setReg(NewVReg);
1579
1580        if (MI->getOperand(*I).isUse())
1581          MI->getOperand(*I).setIsKill(true);
1582      }
1583
1584      // Fill in  the new live interval.
1585      SlotIndex index = getInstructionIndex(MI);
1586      if (HasUse) {
1587        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1588                     nI.getNextValue(SlotIndex(), 0, false,
1589                                     getVNInfoAllocator()));
1590        DEBUG(errs() << " +" << LR);
1591        nI.addRange(LR);
1592        vrm.addRestorePoint(NewVReg, MI);
1593      }
1594      if (HasDef) {
1595        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1596                     nI.getNextValue(SlotIndex(), 0, false,
1597                                     getVNInfoAllocator()));
1598        DEBUG(errs() << " +" << LR);
1599        nI.addRange(LR);
1600        vrm.addSpillPoint(NewVReg, true, MI);
1601      }
1602
1603      added.push_back(&nI);
1604
1605      DEBUG({
1606          errs() << "\t\t\t\tadded new interval: ";
1607          nI.dump();
1608          errs() << '\n';
1609        });
1610    }
1611
1612
1613    RI = mri_->reg_begin(li.reg);
1614  }
1615
1616  return added;
1617}
1618
1619std::vector<LiveInterval*> LiveIntervals::
1620addIntervalsForSpills(const LiveInterval &li,
1621                      SmallVectorImpl<LiveInterval*> &SpillIs,
1622                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1623
1624  if (EnableFastSpilling)
1625    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1626
1627  assert(li.weight != HUGE_VALF &&
1628         "attempt to spill already spilled interval!");
1629
1630  DEBUG({
1631      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1632      li.print(errs(), tri_);
1633      errs() << '\n';
1634    });
1635
1636  // Each bit specify whether a spill is required in the MBB.
1637  BitVector SpillMBBs(mf_->getNumBlockIDs());
1638  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1639  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1640  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1641  DenseMap<unsigned,unsigned> MBBVRegsMap;
1642  std::vector<LiveInterval*> NewLIs;
1643  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1644
1645  unsigned NumValNums = li.getNumValNums();
1646  SmallVector<MachineInstr*, 4> ReMatDefs;
1647  ReMatDefs.resize(NumValNums, NULL);
1648  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1649  ReMatOrigDefs.resize(NumValNums, NULL);
1650  SmallVector<int, 4> ReMatIds;
1651  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1652  BitVector ReMatDelete(NumValNums);
1653  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1654
1655  // Spilling a split live interval. It cannot be split any further. Also,
1656  // it's also guaranteed to be a single val# / range interval.
1657  if (vrm.getPreSplitReg(li.reg)) {
1658    vrm.setIsSplitFromReg(li.reg, 0);
1659    // Unset the split kill marker on the last use.
1660    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1661    if (KillIdx != SlotIndex()) {
1662      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1663      assert(KillMI && "Last use disappeared?");
1664      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1665      assert(KillOp != -1 && "Last use disappeared?");
1666      KillMI->getOperand(KillOp).setIsKill(false);
1667    }
1668    vrm.removeKillPoint(li.reg);
1669    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1670    Slot = vrm.getStackSlot(li.reg);
1671    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1672    MachineInstr *ReMatDefMI = DefIsReMat ?
1673      vrm.getReMaterializedMI(li.reg) : NULL;
1674    int LdSlot = 0;
1675    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1676    bool isLoad = isLoadSS ||
1677      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1678    bool IsFirstRange = true;
1679    for (LiveInterval::Ranges::const_iterator
1680           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1681      // If this is a split live interval with multiple ranges, it means there
1682      // are two-address instructions that re-defined the value. Only the
1683      // first def can be rematerialized!
1684      if (IsFirstRange) {
1685        // Note ReMatOrigDefMI has already been deleted.
1686        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1687                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1688                             false, vrm, rc, ReMatIds, loopInfo,
1689                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1690                             MBBVRegsMap, NewLIs);
1691      } else {
1692        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1693                             Slot, 0, false, false, false,
1694                             false, vrm, rc, ReMatIds, loopInfo,
1695                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1696                             MBBVRegsMap, NewLIs);
1697      }
1698      IsFirstRange = false;
1699    }
1700
1701    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1702    return NewLIs;
1703  }
1704
1705  bool TrySplit = !intervalIsInOneMBB(li);
1706  if (TrySplit)
1707    ++numSplits;
1708  bool NeedStackSlot = false;
1709  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1710       i != e; ++i) {
1711    const VNInfo *VNI = *i;
1712    unsigned VN = VNI->id;
1713    if (VNI->isUnused())
1714      continue; // Dead val#.
1715    // Is the def for the val# rematerializable?
1716    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1717      ? getInstructionFromIndex(VNI->def) : 0;
1718    bool dummy;
1719    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1720      // Remember how to remat the def of this val#.
1721      ReMatOrigDefs[VN] = ReMatDefMI;
1722      // Original def may be modified so we have to make a copy here.
1723      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1724      CloneMIs.push_back(Clone);
1725      ReMatDefs[VN] = Clone;
1726
1727      bool CanDelete = true;
1728      if (VNI->hasPHIKill()) {
1729        // A kill is a phi node, not all of its uses can be rematerialized.
1730        // It must not be deleted.
1731        CanDelete = false;
1732        // Need a stack slot if there is any live range where uses cannot be
1733        // rematerialized.
1734        NeedStackSlot = true;
1735      }
1736      if (CanDelete)
1737        ReMatDelete.set(VN);
1738    } else {
1739      // Need a stack slot if there is any live range where uses cannot be
1740      // rematerialized.
1741      NeedStackSlot = true;
1742    }
1743  }
1744
1745  // One stack slot per live interval.
1746  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1747    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1748      Slot = vrm.assignVirt2StackSlot(li.reg);
1749
1750    // This case only occurs when the prealloc splitter has already assigned
1751    // a stack slot to this vreg.
1752    else
1753      Slot = vrm.getStackSlot(li.reg);
1754  }
1755
1756  // Create new intervals and rewrite defs and uses.
1757  for (LiveInterval::Ranges::const_iterator
1758         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1759    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1760    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1761    bool DefIsReMat = ReMatDefMI != NULL;
1762    bool CanDelete = ReMatDelete[I->valno->id];
1763    int LdSlot = 0;
1764    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1765    bool isLoad = isLoadSS ||
1766      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1767    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1768                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1769                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1770                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1771                               MBBVRegsMap, NewLIs);
1772  }
1773
1774  // Insert spills / restores if we are splitting.
1775  if (!TrySplit) {
1776    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1777    return NewLIs;
1778  }
1779
1780  SmallPtrSet<LiveInterval*, 4> AddedKill;
1781  SmallVector<unsigned, 2> Ops;
1782  if (NeedStackSlot) {
1783    int Id = SpillMBBs.find_first();
1784    while (Id != -1) {
1785      std::vector<SRInfo> &spills = SpillIdxes[Id];
1786      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1787        SlotIndex index = spills[i].index;
1788        unsigned VReg = spills[i].vreg;
1789        LiveInterval &nI = getOrCreateInterval(VReg);
1790        bool isReMat = vrm.isReMaterialized(VReg);
1791        MachineInstr *MI = getInstructionFromIndex(index);
1792        bool CanFold = false;
1793        bool FoundUse = false;
1794        Ops.clear();
1795        if (spills[i].canFold) {
1796          CanFold = true;
1797          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1798            MachineOperand &MO = MI->getOperand(j);
1799            if (!MO.isReg() || MO.getReg() != VReg)
1800              continue;
1801
1802            Ops.push_back(j);
1803            if (MO.isDef())
1804              continue;
1805            if (isReMat ||
1806                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1807                                                RestoreMBBs, RestoreIdxes))) {
1808              // MI has two-address uses of the same register. If the use
1809              // isn't the first and only use in the BB, then we can't fold
1810              // it. FIXME: Move this to rewriteInstructionsForSpills.
1811              CanFold = false;
1812              break;
1813            }
1814            FoundUse = true;
1815          }
1816        }
1817        // Fold the store into the def if possible.
1818        bool Folded = false;
1819        if (CanFold && !Ops.empty()) {
1820          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1821            Folded = true;
1822            if (FoundUse) {
1823              // Also folded uses, do not issue a load.
1824              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1825              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1826            }
1827            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1828          }
1829        }
1830
1831        // Otherwise tell the spiller to issue a spill.
1832        if (!Folded) {
1833          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1834          bool isKill = LR->end == index.getStoreIndex();
1835          if (!MI->registerDefIsDead(nI.reg))
1836            // No need to spill a dead def.
1837            vrm.addSpillPoint(VReg, isKill, MI);
1838          if (isKill)
1839            AddedKill.insert(&nI);
1840        }
1841      }
1842      Id = SpillMBBs.find_next(Id);
1843    }
1844  }
1845
1846  int Id = RestoreMBBs.find_first();
1847  while (Id != -1) {
1848    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1849    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1850      SlotIndex index = restores[i].index;
1851      if (index == SlotIndex())
1852        continue;
1853      unsigned VReg = restores[i].vreg;
1854      LiveInterval &nI = getOrCreateInterval(VReg);
1855      bool isReMat = vrm.isReMaterialized(VReg);
1856      MachineInstr *MI = getInstructionFromIndex(index);
1857      bool CanFold = false;
1858      Ops.clear();
1859      if (restores[i].canFold) {
1860        CanFold = true;
1861        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1862          MachineOperand &MO = MI->getOperand(j);
1863          if (!MO.isReg() || MO.getReg() != VReg)
1864            continue;
1865
1866          if (MO.isDef()) {
1867            // If this restore were to be folded, it would have been folded
1868            // already.
1869            CanFold = false;
1870            break;
1871          }
1872          Ops.push_back(j);
1873        }
1874      }
1875
1876      // Fold the load into the use if possible.
1877      bool Folded = false;
1878      if (CanFold && !Ops.empty()) {
1879        if (!isReMat)
1880          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1881        else {
1882          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1883          int LdSlot = 0;
1884          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1885          // If the rematerializable def is a load, also try to fold it.
1886          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1887            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1888                                          Ops, isLoadSS, LdSlot, VReg);
1889          if (!Folded) {
1890            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1891            if (ImpUse) {
1892              // Re-matting an instruction with virtual register use. Add the
1893              // register as an implicit use on the use MI and update the register
1894              // interval's spill weight to HUGE_VALF to prevent it from being
1895              // spilled.
1896              LiveInterval &ImpLi = getInterval(ImpUse);
1897              ImpLi.weight = HUGE_VALF;
1898              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1899            }
1900          }
1901        }
1902      }
1903      // If folding is not possible / failed, then tell the spiller to issue a
1904      // load / rematerialization for us.
1905      if (Folded)
1906        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1907      else
1908        vrm.addRestorePoint(VReg, MI);
1909    }
1910    Id = RestoreMBBs.find_next(Id);
1911  }
1912
1913  // Finalize intervals: add kills, finalize spill weights, and filter out
1914  // dead intervals.
1915  std::vector<LiveInterval*> RetNewLIs;
1916  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1917    LiveInterval *LI = NewLIs[i];
1918    if (!LI->empty()) {
1919      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1920      if (!AddedKill.count(LI)) {
1921        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1922        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1923        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1924        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1925        assert(UseIdx != -1);
1926        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1927          LastUse->getOperand(UseIdx).setIsKill();
1928          vrm.addKillPoint(LI->reg, LastUseIdx);
1929        }
1930      }
1931      RetNewLIs.push_back(LI);
1932    }
1933  }
1934
1935  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1936  return RetNewLIs;
1937}
1938
1939/// hasAllocatableSuperReg - Return true if the specified physical register has
1940/// any super register that's allocatable.
1941bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1942  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1943    if (allocatableRegs_[*AS] && hasInterval(*AS))
1944      return true;
1945  return false;
1946}
1947
1948/// getRepresentativeReg - Find the largest super register of the specified
1949/// physical register.
1950unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1951  // Find the largest super-register that is allocatable.
1952  unsigned BestReg = Reg;
1953  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1954    unsigned SuperReg = *AS;
1955    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1956      BestReg = SuperReg;
1957      break;
1958    }
1959  }
1960  return BestReg;
1961}
1962
1963/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1964/// specified interval that conflicts with the specified physical register.
1965unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1966                                                   unsigned PhysReg) const {
1967  unsigned NumConflicts = 0;
1968  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1969  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1970         E = mri_->reg_end(); I != E; ++I) {
1971    MachineOperand &O = I.getOperand();
1972    MachineInstr *MI = O.getParent();
1973    SlotIndex Index = getInstructionIndex(MI);
1974    if (pli.liveAt(Index))
1975      ++NumConflicts;
1976  }
1977  return NumConflicts;
1978}
1979
1980/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1981/// around all defs and uses of the specified interval. Return true if it
1982/// was able to cut its interval.
1983bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1984                                            unsigned PhysReg, VirtRegMap &vrm) {
1985  unsigned SpillReg = getRepresentativeReg(PhysReg);
1986
1987  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1988    // If there are registers which alias PhysReg, but which are not a
1989    // sub-register of the chosen representative super register. Assert
1990    // since we can't handle it yet.
1991    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1992           tri_->isSuperRegister(*AS, SpillReg));
1993
1994  bool Cut = false;
1995  SmallVector<unsigned, 4> PRegs;
1996  if (hasInterval(SpillReg))
1997    PRegs.push_back(SpillReg);
1998  else {
1999    SmallSet<unsigned, 4> Added;
2000    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2001      if (Added.insert(*AS) && hasInterval(*AS)) {
2002        PRegs.push_back(*AS);
2003        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2004          Added.insert(*ASS);
2005      }
2006  }
2007
2008  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2009  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2010         E = mri_->reg_end(); I != E; ++I) {
2011    MachineOperand &O = I.getOperand();
2012    MachineInstr *MI = O.getParent();
2013    if (SeenMIs.count(MI))
2014      continue;
2015    SeenMIs.insert(MI);
2016    SlotIndex Index = getInstructionIndex(MI);
2017    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2018      unsigned PReg = PRegs[i];
2019      LiveInterval &pli = getInterval(PReg);
2020      if (!pli.liveAt(Index))
2021        continue;
2022      vrm.addEmergencySpill(PReg, MI);
2023      SlotIndex StartIdx = Index.getLoadIndex();
2024      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2025      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2026        pli.removeRange(StartIdx, EndIdx);
2027        Cut = true;
2028      } else {
2029        std::string msg;
2030        raw_string_ostream Msg(msg);
2031        Msg << "Ran out of registers during register allocation!";
2032        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2033          Msg << "\nPlease check your inline asm statement for invalid "
2034              << "constraints:\n";
2035          MI->print(Msg, tm_);
2036        }
2037        llvm_report_error(Msg.str());
2038      }
2039      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2040        if (!hasInterval(*AS))
2041          continue;
2042        LiveInterval &spli = getInterval(*AS);
2043        if (spli.liveAt(Index))
2044          spli.removeRange(Index.getLoadIndex(),
2045                           Index.getNextIndex().getBaseIndex());
2046      }
2047    }
2048  }
2049  return Cut;
2050}
2051
2052LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2053                                                  MachineInstr* startInst) {
2054  LiveInterval& Interval = getOrCreateInterval(reg);
2055  VNInfo* VN = Interval.getNextValue(
2056    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2057    startInst, true, getVNInfoAllocator());
2058  VN->setHasPHIKill(true);
2059  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2060  LiveRange LR(
2061     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2062     getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2063  Interval.addRange(LR);
2064
2065  return LR;
2066}
2067
2068