LiveIntervalAnalysis.cpp revision d6a6b3b7563a3f54ba48d465fc03ee10bbccb7a8
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.Reset();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << "    \t" << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it checks for sub-register reference and it can check use as well.
223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224                                            unsigned Reg, bool CheckUse,
225                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226  for (LiveInterval::Ranges::const_iterator
227         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228    for (SlotIndex index = I->start.getBaseIndex(),
229           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230           index != end;
231           index = index.getNextIndex()) {
232      MachineInstr *MI = getInstructionFromIndex(index);
233      if (!MI)
234        continue;               // skip deleted instructions
235
236      if (JoinedCopies.count(MI))
237        continue;
238      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239        MachineOperand& MO = MI->getOperand(i);
240        if (!MO.isReg())
241          continue;
242        if (MO.isUse() && !CheckUse)
243          continue;
244        unsigned PhysReg = MO.getReg();
245        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246          continue;
247        if (tri_->isSubRegister(Reg, PhysReg))
248          return true;
249      }
250    }
251  }
252
253  return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258  if (TargetRegisterInfo::isPhysicalRegister(reg))
259    dbgs() << tri_->getName(reg);
260  else
261    dbgs() << "%reg" << reg;
262}
263#endif
264
265void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266                                             MachineBasicBlock::iterator mi,
267                                             SlotIndex MIIdx,
268                                             MachineOperand& MO,
269                                             unsigned MOIdx,
270                                             LiveInterval &interval) {
271  DEBUG({
272      dbgs() << "\t\tregister: ";
273      printRegName(interval.reg, tri_);
274    });
275
276  // Virtual registers may be defined multiple times (due to phi
277  // elimination and 2-addr elimination).  Much of what we do only has to be
278  // done once for the vreg.  We use an empty interval to detect the first
279  // time we see a vreg.
280  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281  if (interval.empty()) {
282    // Get the Idx of the defining instructions.
283    SlotIndex defIndex = MIIdx.getDefIndex();
284    // Earlyclobbers move back one, so that they overlap the live range
285    // of inputs.
286    if (MO.isEarlyClobber())
287      defIndex = MIIdx.getUseIndex();
288    VNInfo *ValNo;
289    MachineInstr *CopyMI = NULL;
290    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293      CopyMI = mi;
294    // Earlyclobbers move back one.
295    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296
297    assert(ValNo->id == 0 && "First value in interval is not 0?");
298
299    // Loop over all of the blocks that the vreg is defined in.  There are
300    // two cases we have to handle here.  The most common case is a vreg
301    // whose lifetime is contained within a basic block.  In this case there
302    // will be a single kill, in MBB, which comes after the definition.
303    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304      // FIXME: what about dead vars?
305      SlotIndex killIdx;
306      if (vi.Kills[0] != mi)
307        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308      else
309        killIdx = defIndex.getStoreIndex();
310
311      // If the kill happens after the definition, we have an intra-block
312      // live range.
313      if (killIdx > defIndex) {
314        assert(vi.AliveBlocks.empty() &&
315               "Shouldn't be alive across any blocks!");
316        LiveRange LR(defIndex, killIdx, ValNo);
317        interval.addRange(LR);
318        DEBUG(dbgs() << " +" << LR << "\n");
319        ValNo->addKill(killIdx);
320        return;
321      }
322    }
323
324    // The other case we handle is when a virtual register lives to the end
325    // of the defining block, potentially live across some blocks, then is
326    // live into some number of blocks, but gets killed.  Start by adding a
327    // range that goes from this definition to the end of the defining block.
328    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329    DEBUG(dbgs() << " +" << NewLR);
330    interval.addRange(NewLR);
331
332    bool PHIJoin = lv_->isPHIJoin(interval.reg);
333
334    if (PHIJoin) {
335      // A phi join register is killed at the end of the MBB and revived as a new
336      // valno in the killing blocks.
337      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338      DEBUG(dbgs() << " phi-join");
339      ValNo->addKill(indexes_->getTerminatorGap(mbb));
340      ValNo->setHasPHIKill(true);
341    } else {
342      // Iterate over all of the blocks that the variable is completely
343      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
344      // live interval.
345      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346               E = vi.AliveBlocks.end(); I != E; ++I) {
347        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349        interval.addRange(LR);
350        DEBUG(dbgs() << " +" << LR);
351      }
352    }
353
354    // Finally, this virtual register is live from the start of any killing
355    // block to the 'use' slot of the killing instruction.
356    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357      MachineInstr *Kill = vi.Kills[i];
358      SlotIndex Start = getMBBStartIdx(Kill->getParent());
359      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
360
361      // Create interval with one of a NEW value number.  Note that this value
362      // number isn't actually defined by an instruction, weird huh? :)
363      if (PHIJoin) {
364        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
365                                      VNInfoAllocator);
366        ValNo->setIsPHIDef(true);
367      }
368      LiveRange LR(Start, killIdx, ValNo);
369      interval.addRange(LR);
370      ValNo->addKill(killIdx);
371      DEBUG(dbgs() << " +" << LR);
372    }
373
374  } else {
375    // If this is the second time we see a virtual register definition, it
376    // must be due to phi elimination or two addr elimination.  If this is
377    // the result of two address elimination, then the vreg is one of the
378    // def-and-use register operand.
379    if (mi->isRegTiedToUseOperand(MOIdx)) {
380      // If this is a two-address definition, then we have already processed
381      // the live range.  The only problem is that we didn't realize there
382      // are actually two values in the live interval.  Because of this we
383      // need to take the LiveRegion that defines this register and split it
384      // into two values.
385      assert(interval.containsOneValue());
386      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387      SlotIndex RedefIndex = MIIdx.getDefIndex();
388      if (MO.isEarlyClobber())
389        RedefIndex = MIIdx.getUseIndex();
390
391      const LiveRange *OldLR =
392        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393      VNInfo *OldValNo = OldLR->valno;
394
395      // Delete the initial value, which should be short and continuous,
396      // because the 2-addr copy must be in the same MBB as the redef.
397      interval.removeRange(DefIndex, RedefIndex);
398
399      // Two-address vregs should always only be redefined once.  This means
400      // that at this point, there should be exactly one value number in it.
401      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
402
403      // The new value number (#1) is defined by the instruction we claimed
404      // defined value #0.
405      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406                                            false, // update at *
407                                            VNInfoAllocator);
408      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
409
410      // Value#0 is now defined by the 2-addr instruction.
411      OldValNo->def  = RedefIndex;
412      OldValNo->setCopy(0);
413
414      // Add the new live interval which replaces the range for the input copy.
415      LiveRange LR(DefIndex, RedefIndex, ValNo);
416      DEBUG(dbgs() << " replace range with " << LR);
417      interval.addRange(LR);
418      ValNo->addKill(RedefIndex);
419
420      // If this redefinition is dead, we need to add a dummy unit live
421      // range covering the def slot.
422      if (MO.isDead())
423        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
424                                    OldValNo));
425
426      DEBUG({
427          dbgs() << " RESULT: ";
428          interval.print(dbgs(), tri_);
429        });
430    } else {
431      assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432      // In the case of PHI elimination, each variable definition is only
433      // live until the end of the block.  We've already taken care of the
434      // rest of the live range.
435
436      SlotIndex defIndex = MIIdx.getDefIndex();
437      if (MO.isEarlyClobber())
438        defIndex = MIIdx.getUseIndex();
439
440      VNInfo *ValNo;
441      MachineInstr *CopyMI = NULL;
442      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
445        CopyMI = mi;
446      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
447
448      SlotIndex killIndex = getMBBEndIdx(mbb);
449      LiveRange LR(defIndex, killIndex, ValNo);
450      interval.addRange(LR);
451      ValNo->addKill(indexes_->getTerminatorGap(mbb));
452      ValNo->setHasPHIKill(true);
453      DEBUG(dbgs() << " phi-join +" << LR);
454    }
455  }
456
457  DEBUG(dbgs() << '\n');
458}
459
460void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461                                              MachineBasicBlock::iterator mi,
462                                              SlotIndex MIIdx,
463                                              MachineOperand& MO,
464                                              LiveInterval &interval,
465                                              MachineInstr *CopyMI) {
466  // A physical register cannot be live across basic block, so its
467  // lifetime must end somewhere in its defining basic block.
468  DEBUG({
469      dbgs() << "\t\tregister: ";
470      printRegName(interval.reg, tri_);
471    });
472
473  SlotIndex baseIndex = MIIdx;
474  SlotIndex start = baseIndex.getDefIndex();
475  // Earlyclobbers move back one.
476  if (MO.isEarlyClobber())
477    start = MIIdx.getUseIndex();
478  SlotIndex end = start;
479
480  // If it is not used after definition, it is considered dead at
481  // the instruction defining it. Hence its interval is:
482  // [defSlot(def), defSlot(def)+1)
483  // For earlyclobbers, the defSlot was pushed back one; the extra
484  // advance below compensates.
485  if (MO.isDead()) {
486    DEBUG(dbgs() << " dead");
487    end = start.getStoreIndex();
488    goto exit;
489  }
490
491  // If it is not dead on definition, it must be killed by a
492  // subsequent instruction. Hence its interval is:
493  // [defSlot(def), useSlot(kill)+1)
494  baseIndex = baseIndex.getNextIndex();
495  while (++mi != MBB->end()) {
496
497    if (mi->isDebugValue())
498      continue;
499    if (getInstructionFromIndex(baseIndex) == 0)
500      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
501
502    if (mi->killsRegister(interval.reg, tri_)) {
503      DEBUG(dbgs() << " killed");
504      end = baseIndex.getDefIndex();
505      goto exit;
506    } else {
507      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
508      if (DefIdx != -1) {
509        if (mi->isRegTiedToUseOperand(DefIdx)) {
510          // Two-address instruction.
511          end = baseIndex.getDefIndex();
512        } else {
513          // Another instruction redefines the register before it is ever read.
514          // Then the register is essentially dead at the instruction that
515          // defines it. Hence its interval is:
516          // [defSlot(def), defSlot(def)+1)
517          DEBUG(dbgs() << " dead");
518          end = start.getStoreIndex();
519        }
520        goto exit;
521      }
522    }
523
524    baseIndex = baseIndex.getNextIndex();
525  }
526
527  // The only case we should have a dead physreg here without a killing or
528  // instruction where we know it's dead is if it is live-in to the function
529  // and never used. Another possible case is the implicit use of the
530  // physical register has been deleted by two-address pass.
531  end = start.getStoreIndex();
532
533exit:
534  assert(start < end && "did not find end of interval?");
535
536  // Already exists? Extend old live interval.
537  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538  bool Extend = OldLR != interval.end();
539  VNInfo *ValNo = Extend
540    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541  if (MO.isEarlyClobber() && Extend)
542    ValNo->setHasRedefByEC(true);
543  LiveRange LR(start, end, ValNo);
544  interval.addRange(LR);
545  LR.valno->addKill(end);
546  DEBUG(dbgs() << " +" << LR << '\n');
547}
548
549void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550                                      MachineBasicBlock::iterator MI,
551                                      SlotIndex MIIdx,
552                                      MachineOperand& MO,
553                                      unsigned MOIdx) {
554  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556                             getOrCreateInterval(MO.getReg()));
557  else if (allocatableRegs_[MO.getReg()]) {
558    MachineInstr *CopyMI = NULL;
559    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
562      CopyMI = MI;
563    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564                              getOrCreateInterval(MO.getReg()), CopyMI);
565    // Def of a register also defines its sub-registers.
566    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567      // If MI also modifies the sub-register explicitly, avoid processing it
568      // more than once. Do not pass in TRI here so it checks for exact match.
569      if (!MI->modifiesRegister(*AS))
570        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571                                  getOrCreateInterval(*AS), 0);
572  }
573}
574
575void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
576                                         SlotIndex MIIdx,
577                                         LiveInterval &interval, bool isAlias) {
578  DEBUG({
579      dbgs() << "\t\tlivein register: ";
580      printRegName(interval.reg, tri_);
581    });
582
583  // Look for kills, if it reaches a def before it's killed, then it shouldn't
584  // be considered a livein.
585  MachineBasicBlock::iterator mi = MBB->begin();
586  MachineBasicBlock::iterator E = MBB->end();
587  // Skip over DBG_VALUE at the start of the MBB.
588  if (mi != E && mi->isDebugValue()) {
589    while (++mi != E && mi->isDebugValue())
590      ;
591    if (mi == E)
592      // MBB is empty except for DBG_VALUE's.
593      return;
594  }
595
596  SlotIndex baseIndex = MIIdx;
597  SlotIndex start = baseIndex;
598  if (getInstructionFromIndex(baseIndex) == 0)
599    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
600
601  SlotIndex end = baseIndex;
602  bool SeenDefUse = false;
603
604  while (mi != E) {
605    if (mi->killsRegister(interval.reg, tri_)) {
606      DEBUG(dbgs() << " killed");
607      end = baseIndex.getDefIndex();
608      SeenDefUse = true;
609      break;
610    } else if (mi->modifiesRegister(interval.reg, tri_)) {
611      // Another instruction redefines the register before it is ever read.
612      // Then the register is essentially dead at the instruction that defines
613      // it. Hence its interval is:
614      // [defSlot(def), defSlot(def)+1)
615      DEBUG(dbgs() << " dead");
616      end = start.getStoreIndex();
617      SeenDefUse = true;
618      break;
619    }
620
621    while (++mi != E && mi->isDebugValue())
622      // Skip over DBG_VALUE.
623      ;
624    if (mi != E)
625      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
626  }
627
628  // Live-in register might not be used at all.
629  if (!SeenDefUse) {
630    if (isAlias) {
631      DEBUG(dbgs() << " dead");
632      end = MIIdx.getStoreIndex();
633    } else {
634      DEBUG(dbgs() << " live through");
635      end = baseIndex;
636    }
637  }
638
639  VNInfo *vni =
640    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
641                          0, false, VNInfoAllocator);
642  vni->setIsPHIDef(true);
643  LiveRange LR(start, end, vni);
644
645  interval.addRange(LR);
646  LR.valno->addKill(end);
647  DEBUG(dbgs() << " +" << LR << '\n');
648}
649
650/// computeIntervals - computes the live intervals for virtual
651/// registers. for some ordering of the machine instructions [1,N] a
652/// live interval is an interval [i, j) where 1 <= i <= j < N for
653/// which a variable is live
654void LiveIntervals::computeIntervals() {
655  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
656               << "********** Function: "
657               << ((Value*)mf_->getFunction())->getName() << '\n');
658
659  SmallVector<unsigned, 8> UndefUses;
660  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
661       MBBI != E; ++MBBI) {
662    MachineBasicBlock *MBB = MBBI;
663    if (MBB->empty())
664      continue;
665
666    // Track the index of the current machine instr.
667    SlotIndex MIIndex = getMBBStartIdx(MBB);
668    DEBUG(dbgs() << MBB->getName() << ":\n");
669
670    // Create intervals for live-ins to this BB first.
671    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
672           LE = MBB->livein_end(); LI != LE; ++LI) {
673      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
674      // Multiple live-ins can alias the same register.
675      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
676        if (!hasInterval(*AS))
677          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
678                               true);
679    }
680
681    // Skip over empty initial indices.
682    if (getInstructionFromIndex(MIIndex) == 0)
683      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
684
685    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
686         MI != miEnd; ++MI) {
687      DEBUG(dbgs() << MIIndex << "\t" << *MI);
688      if (MI->isDebugValue())
689        continue;
690
691      // Handle defs.
692      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
693        MachineOperand &MO = MI->getOperand(i);
694        if (!MO.isReg() || !MO.getReg())
695          continue;
696
697        // handle register defs - build intervals
698        if (MO.isDef())
699          handleRegisterDef(MBB, MI, MIIndex, MO, i);
700        else if (MO.isUndef())
701          UndefUses.push_back(MO.getReg());
702      }
703
704      // Move to the next instr slot.
705      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
706    }
707  }
708
709  // Create empty intervals for registers defined by implicit_def's (except
710  // for those implicit_def that define values which are liveout of their
711  // blocks.
712  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
713    unsigned UndefReg = UndefUses[i];
714    (void)getOrCreateInterval(UndefReg);
715  }
716}
717
718LiveInterval* LiveIntervals::createInterval(unsigned reg) {
719  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
720  return new LiveInterval(reg, Weight);
721}
722
723/// dupInterval - Duplicate a live interval. The caller is responsible for
724/// managing the allocated memory.
725LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
726  LiveInterval *NewLI = createInterval(li->reg);
727  NewLI->Copy(*li, mri_, getVNInfoAllocator());
728  return NewLI;
729}
730
731/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
732/// copy field and returns the source register that defines it.
733unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
734  if (!VNI->getCopy())
735    return 0;
736
737  if (VNI->getCopy()->isExtractSubreg()) {
738    // If it's extracting out of a physical register, return the sub-register.
739    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
740    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
741      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
742      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
743      if (SrcSubReg == DstSubReg)
744        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
745        // reg1034 can still be coalesced to EDX.
746        return Reg;
747      assert(DstSubReg == 0);
748      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
749    }
750    return Reg;
751  } else if (VNI->getCopy()->isInsertSubreg() ||
752             VNI->getCopy()->isSubregToReg())
753    return VNI->getCopy()->getOperand(2).getReg();
754
755  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
756  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
757    return SrcReg;
758  llvm_unreachable("Unrecognized copy instruction!");
759  return 0;
760}
761
762//===----------------------------------------------------------------------===//
763// Register allocator hooks.
764//
765
766/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
767/// allow one) virtual register operand, then its uses are implicitly using
768/// the register. Returns the virtual register.
769unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
770                                            MachineInstr *MI) const {
771  unsigned RegOp = 0;
772  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
773    MachineOperand &MO = MI->getOperand(i);
774    if (!MO.isReg() || !MO.isUse())
775      continue;
776    unsigned Reg = MO.getReg();
777    if (Reg == 0 || Reg == li.reg)
778      continue;
779
780    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
781        !allocatableRegs_[Reg])
782      continue;
783    // FIXME: For now, only remat MI with at most one register operand.
784    assert(!RegOp &&
785           "Can't rematerialize instruction with multiple register operand!");
786    RegOp = MO.getReg();
787#ifndef NDEBUG
788    break;
789#endif
790  }
791  return RegOp;
792}
793
794/// isValNoAvailableAt - Return true if the val# of the specified interval
795/// which reaches the given instruction also reaches the specified use index.
796bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
797                                       SlotIndex UseIdx) const {
798  SlotIndex Index = getInstructionIndex(MI);
799  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
800  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
801  return UI != li.end() && UI->valno == ValNo;
802}
803
804/// isReMaterializable - Returns true if the definition MI of the specified
805/// val# of the specified interval is re-materializable.
806bool LiveIntervals::isReMaterializable(const LiveInterval &li,
807                                       const VNInfo *ValNo, MachineInstr *MI,
808                                       SmallVectorImpl<LiveInterval*> &SpillIs,
809                                       bool &isLoad) {
810  if (DisableReMat)
811    return false;
812
813  if (!tii_->isTriviallyReMaterializable(MI, aa_))
814    return false;
815
816  // Target-specific code can mark an instruction as being rematerializable
817  // if it has one virtual reg use, though it had better be something like
818  // a PIC base register which is likely to be live everywhere.
819  unsigned ImpUse = getReMatImplicitUse(li, MI);
820  if (ImpUse) {
821    const LiveInterval &ImpLi = getInterval(ImpUse);
822    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
823           re = mri_->use_end(); ri != re; ++ri) {
824      MachineInstr *UseMI = &*ri;
825      SlotIndex UseIdx = getInstructionIndex(UseMI);
826      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
827        continue;
828      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
829        return false;
830    }
831
832    // If a register operand of the re-materialized instruction is going to
833    // be spilled next, then it's not legal to re-materialize this instruction.
834    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
835      if (ImpUse == SpillIs[i]->reg)
836        return false;
837  }
838  return true;
839}
840
841/// isReMaterializable - Returns true if the definition MI of the specified
842/// val# of the specified interval is re-materializable.
843bool LiveIntervals::isReMaterializable(const LiveInterval &li,
844                                       const VNInfo *ValNo, MachineInstr *MI) {
845  SmallVector<LiveInterval*, 4> Dummy1;
846  bool Dummy2;
847  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
848}
849
850/// isReMaterializable - Returns true if every definition of MI of every
851/// val# of the specified interval is re-materializable.
852bool LiveIntervals::isReMaterializable(const LiveInterval &li,
853                                       SmallVectorImpl<LiveInterval*> &SpillIs,
854                                       bool &isLoad) {
855  isLoad = false;
856  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
857       i != e; ++i) {
858    const VNInfo *VNI = *i;
859    if (VNI->isUnused())
860      continue; // Dead val#.
861    // Is the def for the val# rematerializable?
862    if (!VNI->isDefAccurate())
863      return false;
864    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
865    bool DefIsLoad = false;
866    if (!ReMatDefMI ||
867        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
868      return false;
869    isLoad |= DefIsLoad;
870  }
871  return true;
872}
873
874/// FilterFoldedOps - Filter out two-address use operands. Return
875/// true if it finds any issue with the operands that ought to prevent
876/// folding.
877static bool FilterFoldedOps(MachineInstr *MI,
878                            SmallVector<unsigned, 2> &Ops,
879                            unsigned &MRInfo,
880                            SmallVector<unsigned, 2> &FoldOps) {
881  MRInfo = 0;
882  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
883    unsigned OpIdx = Ops[i];
884    MachineOperand &MO = MI->getOperand(OpIdx);
885    // FIXME: fold subreg use.
886    if (MO.getSubReg())
887      return true;
888    if (MO.isDef())
889      MRInfo |= (unsigned)VirtRegMap::isMod;
890    else {
891      // Filter out two-address use operand(s).
892      if (MI->isRegTiedToDefOperand(OpIdx)) {
893        MRInfo = VirtRegMap::isModRef;
894        continue;
895      }
896      MRInfo |= (unsigned)VirtRegMap::isRef;
897    }
898    FoldOps.push_back(OpIdx);
899  }
900  return false;
901}
902
903
904/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
905/// slot / to reg or any rematerialized load into ith operand of specified
906/// MI. If it is successul, MI is updated with the newly created MI and
907/// returns true.
908bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
909                                         VirtRegMap &vrm, MachineInstr *DefMI,
910                                         SlotIndex InstrIdx,
911                                         SmallVector<unsigned, 2> &Ops,
912                                         bool isSS, int Slot, unsigned Reg) {
913  // If it is an implicit def instruction, just delete it.
914  if (MI->isImplicitDef()) {
915    RemoveMachineInstrFromMaps(MI);
916    vrm.RemoveMachineInstrFromMaps(MI);
917    MI->eraseFromParent();
918    ++numFolds;
919    return true;
920  }
921
922  // Filter the list of operand indexes that are to be folded. Abort if
923  // any operand will prevent folding.
924  unsigned MRInfo = 0;
925  SmallVector<unsigned, 2> FoldOps;
926  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
927    return false;
928
929  // The only time it's safe to fold into a two address instruction is when
930  // it's folding reload and spill from / into a spill stack slot.
931  if (DefMI && (MRInfo & VirtRegMap::isMod))
932    return false;
933
934  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
935                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
936  if (fmi) {
937    // Remember this instruction uses the spill slot.
938    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
939
940    // Attempt to fold the memory reference into the instruction. If
941    // we can do this, we don't need to insert spill code.
942    MachineBasicBlock &MBB = *MI->getParent();
943    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
944      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
945    vrm.transferSpillPts(MI, fmi);
946    vrm.transferRestorePts(MI, fmi);
947    vrm.transferEmergencySpills(MI, fmi);
948    ReplaceMachineInstrInMaps(MI, fmi);
949    MI = MBB.insert(MBB.erase(MI), fmi);
950    ++numFolds;
951    return true;
952  }
953  return false;
954}
955
956/// canFoldMemoryOperand - Returns true if the specified load / store
957/// folding is possible.
958bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
959                                         SmallVector<unsigned, 2> &Ops,
960                                         bool ReMat) const {
961  // Filter the list of operand indexes that are to be folded. Abort if
962  // any operand will prevent folding.
963  unsigned MRInfo = 0;
964  SmallVector<unsigned, 2> FoldOps;
965  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
966    return false;
967
968  // It's only legal to remat for a use, not a def.
969  if (ReMat && (MRInfo & VirtRegMap::isMod))
970    return false;
971
972  return tii_->canFoldMemoryOperand(MI, FoldOps);
973}
974
975bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
976  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
977
978  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
979
980  if (mbb == 0)
981    return false;
982
983  for (++itr; itr != li.ranges.end(); ++itr) {
984    MachineBasicBlock *mbb2 =
985      indexes_->getMBBCoveringRange(itr->start, itr->end);
986
987    if (mbb2 != mbb)
988      return false;
989  }
990
991  return true;
992}
993
994/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
995/// interval on to-be re-materialized operands of MI) with new register.
996void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
997                                       MachineInstr *MI, unsigned NewVReg,
998                                       VirtRegMap &vrm) {
999  // There is an implicit use. That means one of the other operand is
1000  // being remat'ed and the remat'ed instruction has li.reg as an
1001  // use operand. Make sure we rewrite that as well.
1002  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1003    MachineOperand &MO = MI->getOperand(i);
1004    if (!MO.isReg())
1005      continue;
1006    unsigned Reg = MO.getReg();
1007    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1008      continue;
1009    if (!vrm.isReMaterialized(Reg))
1010      continue;
1011    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1012    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1013    if (UseMO)
1014      UseMO->setReg(NewVReg);
1015  }
1016}
1017
1018/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1019/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1020bool LiveIntervals::
1021rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1022                 bool TrySplit, SlotIndex index, SlotIndex end,
1023                 MachineInstr *MI,
1024                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1025                 unsigned Slot, int LdSlot,
1026                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1027                 VirtRegMap &vrm,
1028                 const TargetRegisterClass* rc,
1029                 SmallVector<int, 4> &ReMatIds,
1030                 const MachineLoopInfo *loopInfo,
1031                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1032                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1033                 std::vector<LiveInterval*> &NewLIs) {
1034  bool CanFold = false;
1035 RestartInstruction:
1036  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1037    MachineOperand& mop = MI->getOperand(i);
1038    if (!mop.isReg())
1039      continue;
1040    unsigned Reg = mop.getReg();
1041    unsigned RegI = Reg;
1042    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1043      continue;
1044    if (Reg != li.reg)
1045      continue;
1046
1047    bool TryFold = !DefIsReMat;
1048    bool FoldSS = true; // Default behavior unless it's a remat.
1049    int FoldSlot = Slot;
1050    if (DefIsReMat) {
1051      // If this is the rematerializable definition MI itself and
1052      // all of its uses are rematerialized, simply delete it.
1053      if (MI == ReMatOrigDefMI && CanDelete) {
1054        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1055                     << MI << '\n');
1056        RemoveMachineInstrFromMaps(MI);
1057        vrm.RemoveMachineInstrFromMaps(MI);
1058        MI->eraseFromParent();
1059        break;
1060      }
1061
1062      // If def for this use can't be rematerialized, then try folding.
1063      // If def is rematerializable and it's a load, also try folding.
1064      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1065      if (isLoad) {
1066        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1067        FoldSS = isLoadSS;
1068        FoldSlot = LdSlot;
1069      }
1070    }
1071
1072    // Scan all of the operands of this instruction rewriting operands
1073    // to use NewVReg instead of li.reg as appropriate.  We do this for
1074    // two reasons:
1075    //
1076    //   1. If the instr reads the same spilled vreg multiple times, we
1077    //      want to reuse the NewVReg.
1078    //   2. If the instr is a two-addr instruction, we are required to
1079    //      keep the src/dst regs pinned.
1080    //
1081    // Keep track of whether we replace a use and/or def so that we can
1082    // create the spill interval with the appropriate range.
1083
1084    HasUse = mop.isUse();
1085    HasDef = mop.isDef();
1086    SmallVector<unsigned, 2> Ops;
1087    Ops.push_back(i);
1088    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1089      const MachineOperand &MOj = MI->getOperand(j);
1090      if (!MOj.isReg())
1091        continue;
1092      unsigned RegJ = MOj.getReg();
1093      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1094        continue;
1095      if (RegJ == RegI) {
1096        Ops.push_back(j);
1097        if (!MOj.isUndef()) {
1098          HasUse |= MOj.isUse();
1099          HasDef |= MOj.isDef();
1100        }
1101      }
1102    }
1103
1104    // Create a new virtual register for the spill interval.
1105    // Create the new register now so we can map the fold instruction
1106    // to the new register so when it is unfolded we get the correct
1107    // answer.
1108    bool CreatedNewVReg = false;
1109    if (NewVReg == 0) {
1110      NewVReg = mri_->createVirtualRegister(rc);
1111      vrm.grow();
1112      CreatedNewVReg = true;
1113
1114      // The new virtual register should get the same allocation hints as the
1115      // old one.
1116      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1117      if (Hint.first || Hint.second)
1118        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1119    }
1120
1121    if (!TryFold)
1122      CanFold = false;
1123    else {
1124      // Do not fold load / store here if we are splitting. We'll find an
1125      // optimal point to insert a load / store later.
1126      if (!TrySplit) {
1127        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1128                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1129          // Folding the load/store can completely change the instruction in
1130          // unpredictable ways, rescan it from the beginning.
1131
1132          if (FoldSS) {
1133            // We need to give the new vreg the same stack slot as the
1134            // spilled interval.
1135            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1136          }
1137
1138          HasUse = false;
1139          HasDef = false;
1140          CanFold = false;
1141          if (isNotInMIMap(MI))
1142            break;
1143          goto RestartInstruction;
1144        }
1145      } else {
1146        // We'll try to fold it later if it's profitable.
1147        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1148      }
1149    }
1150
1151    mop.setReg(NewVReg);
1152    if (mop.isImplicit())
1153      rewriteImplicitOps(li, MI, NewVReg, vrm);
1154
1155    // Reuse NewVReg for other reads.
1156    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1157      MachineOperand &mopj = MI->getOperand(Ops[j]);
1158      mopj.setReg(NewVReg);
1159      if (mopj.isImplicit())
1160        rewriteImplicitOps(li, MI, NewVReg, vrm);
1161    }
1162
1163    if (CreatedNewVReg) {
1164      if (DefIsReMat) {
1165        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1166        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1167          // Each valnum may have its own remat id.
1168          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1169        } else {
1170          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1171        }
1172        if (!CanDelete || (HasUse && HasDef)) {
1173          // If this is a two-addr instruction then its use operands are
1174          // rematerializable but its def is not. It should be assigned a
1175          // stack slot.
1176          vrm.assignVirt2StackSlot(NewVReg, Slot);
1177        }
1178      } else {
1179        vrm.assignVirt2StackSlot(NewVReg, Slot);
1180      }
1181    } else if (HasUse && HasDef &&
1182               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1183      // If this interval hasn't been assigned a stack slot (because earlier
1184      // def is a deleted remat def), do it now.
1185      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1186      vrm.assignVirt2StackSlot(NewVReg, Slot);
1187    }
1188
1189    // Re-matting an instruction with virtual register use. Add the
1190    // register as an implicit use on the use MI.
1191    if (DefIsReMat && ImpUse)
1192      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1193
1194    // Create a new register interval for this spill / remat.
1195    LiveInterval &nI = getOrCreateInterval(NewVReg);
1196    if (CreatedNewVReg) {
1197      NewLIs.push_back(&nI);
1198      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1199      if (TrySplit)
1200        vrm.setIsSplitFromReg(NewVReg, li.reg);
1201    }
1202
1203    if (HasUse) {
1204      if (CreatedNewVReg) {
1205        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1206                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1207        DEBUG(dbgs() << " +" << LR);
1208        nI.addRange(LR);
1209      } else {
1210        // Extend the split live interval to this def / use.
1211        SlotIndex End = index.getDefIndex();
1212        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1213                     nI.getValNumInfo(nI.getNumValNums()-1));
1214        DEBUG(dbgs() << " +" << LR);
1215        nI.addRange(LR);
1216      }
1217    }
1218    if (HasDef) {
1219      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1220                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1221      DEBUG(dbgs() << " +" << LR);
1222      nI.addRange(LR);
1223    }
1224
1225    DEBUG({
1226        dbgs() << "\t\t\t\tAdded new interval: ";
1227        nI.print(dbgs(), tri_);
1228        dbgs() << '\n';
1229      });
1230  }
1231  return CanFold;
1232}
1233bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1234                                   const VNInfo *VNI,
1235                                   MachineBasicBlock *MBB,
1236                                   SlotIndex Idx) const {
1237  SlotIndex End = getMBBEndIdx(MBB);
1238  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1239    if (VNI->kills[j].isPHI())
1240      continue;
1241
1242    SlotIndex KillIdx = VNI->kills[j];
1243    if (KillIdx > Idx && KillIdx <= End)
1244      return true;
1245  }
1246  return false;
1247}
1248
1249/// RewriteInfo - Keep track of machine instrs that will be rewritten
1250/// during spilling.
1251namespace {
1252  struct RewriteInfo {
1253    SlotIndex Index;
1254    MachineInstr *MI;
1255    bool HasUse;
1256    bool HasDef;
1257    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1258      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1259  };
1260
1261  struct RewriteInfoCompare {
1262    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1263      return LHS.Index < RHS.Index;
1264    }
1265  };
1266}
1267
1268void LiveIntervals::
1269rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1270                    LiveInterval::Ranges::const_iterator &I,
1271                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1272                    unsigned Slot, int LdSlot,
1273                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1274                    VirtRegMap &vrm,
1275                    const TargetRegisterClass* rc,
1276                    SmallVector<int, 4> &ReMatIds,
1277                    const MachineLoopInfo *loopInfo,
1278                    BitVector &SpillMBBs,
1279                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1280                    BitVector &RestoreMBBs,
1281                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1282                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1283                    std::vector<LiveInterval*> &NewLIs) {
1284  bool AllCanFold = true;
1285  unsigned NewVReg = 0;
1286  SlotIndex start = I->start.getBaseIndex();
1287  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1288
1289  // First collect all the def / use in this live range that will be rewritten.
1290  // Make sure they are sorted according to instruction index.
1291  std::vector<RewriteInfo> RewriteMIs;
1292  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1293         re = mri_->reg_end(); ri != re; ) {
1294    MachineInstr *MI = &*ri;
1295    MachineOperand &O = ri.getOperand();
1296    ++ri;
1297    if (MI->isDebugValue()) {
1298      // Remove debug info for now.
1299      O.setReg(0U);
1300      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1301      continue;
1302    }
1303    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1304    SlotIndex index = getInstructionIndex(MI);
1305    if (index < start || index >= end)
1306      continue;
1307
1308    if (O.isUndef())
1309      // Must be defined by an implicit def. It should not be spilled. Note,
1310      // this is for correctness reason. e.g.
1311      // 8   %reg1024<def> = IMPLICIT_DEF
1312      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1313      // The live range [12, 14) are not part of the r1024 live interval since
1314      // it's defined by an implicit def. It will not conflicts with live
1315      // interval of r1025. Now suppose both registers are spilled, you can
1316      // easily see a situation where both registers are reloaded before
1317      // the INSERT_SUBREG and both target registers that would overlap.
1318      continue;
1319    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1320  }
1321  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1322
1323  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1324  // Now rewrite the defs and uses.
1325  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1326    RewriteInfo &rwi = RewriteMIs[i];
1327    ++i;
1328    SlotIndex index = rwi.Index;
1329    bool MIHasUse = rwi.HasUse;
1330    bool MIHasDef = rwi.HasDef;
1331    MachineInstr *MI = rwi.MI;
1332    // If MI def and/or use the same register multiple times, then there
1333    // are multiple entries.
1334    unsigned NumUses = MIHasUse;
1335    while (i != e && RewriteMIs[i].MI == MI) {
1336      assert(RewriteMIs[i].Index == index);
1337      bool isUse = RewriteMIs[i].HasUse;
1338      if (isUse) ++NumUses;
1339      MIHasUse |= isUse;
1340      MIHasDef |= RewriteMIs[i].HasDef;
1341      ++i;
1342    }
1343    MachineBasicBlock *MBB = MI->getParent();
1344
1345    if (ImpUse && MI != ReMatDefMI) {
1346      // Re-matting an instruction with virtual register use. Prevent interval
1347      // from being spilled.
1348      getInterval(ImpUse).markNotSpillable();
1349    }
1350
1351    unsigned MBBId = MBB->getNumber();
1352    unsigned ThisVReg = 0;
1353    if (TrySplit) {
1354      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1355      if (NVI != MBBVRegsMap.end()) {
1356        ThisVReg = NVI->second;
1357        // One common case:
1358        // x = use
1359        // ...
1360        // ...
1361        // def = ...
1362        //     = use
1363        // It's better to start a new interval to avoid artifically
1364        // extend the new interval.
1365        if (MIHasDef && !MIHasUse) {
1366          MBBVRegsMap.erase(MBB->getNumber());
1367          ThisVReg = 0;
1368        }
1369      }
1370    }
1371
1372    bool IsNew = ThisVReg == 0;
1373    if (IsNew) {
1374      // This ends the previous live interval. If all of its def / use
1375      // can be folded, give it a low spill weight.
1376      if (NewVReg && TrySplit && AllCanFold) {
1377        LiveInterval &nI = getOrCreateInterval(NewVReg);
1378        nI.weight /= 10.0F;
1379      }
1380      AllCanFold = true;
1381    }
1382    NewVReg = ThisVReg;
1383
1384    bool HasDef = false;
1385    bool HasUse = false;
1386    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1387                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1388                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1389                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1390                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1391    if (!HasDef && !HasUse)
1392      continue;
1393
1394    AllCanFold &= CanFold;
1395
1396    // Update weight of spill interval.
1397    LiveInterval &nI = getOrCreateInterval(NewVReg);
1398    if (!TrySplit) {
1399      // The spill weight is now infinity as it cannot be spilled again.
1400      nI.markNotSpillable();
1401      continue;
1402    }
1403
1404    // Keep track of the last def and first use in each MBB.
1405    if (HasDef) {
1406      if (MI != ReMatOrigDefMI || !CanDelete) {
1407        bool HasKill = false;
1408        if (!HasUse)
1409          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1410        else {
1411          // If this is a two-address code, then this index starts a new VNInfo.
1412          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1413          if (VNI)
1414            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1415        }
1416        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1417          SpillIdxes.find(MBBId);
1418        if (!HasKill) {
1419          if (SII == SpillIdxes.end()) {
1420            std::vector<SRInfo> S;
1421            S.push_back(SRInfo(index, NewVReg, true));
1422            SpillIdxes.insert(std::make_pair(MBBId, S));
1423          } else if (SII->second.back().vreg != NewVReg) {
1424            SII->second.push_back(SRInfo(index, NewVReg, true));
1425          } else if (index > SII->second.back().index) {
1426            // If there is an earlier def and this is a two-address
1427            // instruction, then it's not possible to fold the store (which
1428            // would also fold the load).
1429            SRInfo &Info = SII->second.back();
1430            Info.index = index;
1431            Info.canFold = !HasUse;
1432          }
1433          SpillMBBs.set(MBBId);
1434        } else if (SII != SpillIdxes.end() &&
1435                   SII->second.back().vreg == NewVReg &&
1436                   index > SII->second.back().index) {
1437          // There is an earlier def that's not killed (must be two-address).
1438          // The spill is no longer needed.
1439          SII->second.pop_back();
1440          if (SII->second.empty()) {
1441            SpillIdxes.erase(MBBId);
1442            SpillMBBs.reset(MBBId);
1443          }
1444        }
1445      }
1446    }
1447
1448    if (HasUse) {
1449      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1450        SpillIdxes.find(MBBId);
1451      if (SII != SpillIdxes.end() &&
1452          SII->second.back().vreg == NewVReg &&
1453          index > SII->second.back().index)
1454        // Use(s) following the last def, it's not safe to fold the spill.
1455        SII->second.back().canFold = false;
1456      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1457        RestoreIdxes.find(MBBId);
1458      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1459        // If we are splitting live intervals, only fold if it's the first
1460        // use and there isn't another use later in the MBB.
1461        RII->second.back().canFold = false;
1462      else if (IsNew) {
1463        // Only need a reload if there isn't an earlier def / use.
1464        if (RII == RestoreIdxes.end()) {
1465          std::vector<SRInfo> Infos;
1466          Infos.push_back(SRInfo(index, NewVReg, true));
1467          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1468        } else {
1469          RII->second.push_back(SRInfo(index, NewVReg, true));
1470        }
1471        RestoreMBBs.set(MBBId);
1472      }
1473    }
1474
1475    // Update spill weight.
1476    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1477    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1478  }
1479
1480  if (NewVReg && TrySplit && AllCanFold) {
1481    // If all of its def / use can be folded, give it a low spill weight.
1482    LiveInterval &nI = getOrCreateInterval(NewVReg);
1483    nI.weight /= 10.0F;
1484  }
1485}
1486
1487bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1488                        unsigned vr, BitVector &RestoreMBBs,
1489                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1490  if (!RestoreMBBs[Id])
1491    return false;
1492  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1493  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1494    if (Restores[i].index == index &&
1495        Restores[i].vreg == vr &&
1496        Restores[i].canFold)
1497      return true;
1498  return false;
1499}
1500
1501void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1502                        unsigned vr, BitVector &RestoreMBBs,
1503                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1504  if (!RestoreMBBs[Id])
1505    return;
1506  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1507  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1508    if (Restores[i].index == index && Restores[i].vreg)
1509      Restores[i].index = SlotIndex();
1510}
1511
1512/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1513/// spilled and create empty intervals for their uses.
1514void
1515LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1516                                    const TargetRegisterClass* rc,
1517                                    std::vector<LiveInterval*> &NewLIs) {
1518  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1519         re = mri_->reg_end(); ri != re; ) {
1520    MachineOperand &O = ri.getOperand();
1521    MachineInstr *MI = &*ri;
1522    ++ri;
1523    if (O.isDef()) {
1524      assert(MI->isImplicitDef() &&
1525             "Register def was not rewritten?");
1526      RemoveMachineInstrFromMaps(MI);
1527      vrm.RemoveMachineInstrFromMaps(MI);
1528      MI->eraseFromParent();
1529    } else {
1530      // This must be an use of an implicit_def so it's not part of the live
1531      // interval. Create a new empty live interval for it.
1532      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1533      unsigned NewVReg = mri_->createVirtualRegister(rc);
1534      vrm.grow();
1535      vrm.setIsImplicitlyDefined(NewVReg);
1536      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1537      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1538        MachineOperand &MO = MI->getOperand(i);
1539        if (MO.isReg() && MO.getReg() == li.reg) {
1540          MO.setReg(NewVReg);
1541          MO.setIsUndef();
1542        }
1543      }
1544    }
1545  }
1546}
1547
1548float
1549LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1550  // Limit the loop depth ridiculousness.
1551  if (loopDepth > 200)
1552    loopDepth = 200;
1553
1554  // The loop depth is used to roughly estimate the number of times the
1555  // instruction is executed. Something like 10^d is simple, but will quickly
1556  // overflow a float. This expression behaves like 10^d for small d, but is
1557  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1558  // headroom before overflow.
1559  float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1560
1561  return (isDef + isUse) * lc;
1562}
1563
1564void
1565LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1566  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1567    normalizeSpillWeight(*NewLIs[i]);
1568}
1569
1570std::vector<LiveInterval*> LiveIntervals::
1571addIntervalsForSpillsFast(const LiveInterval &li,
1572                          const MachineLoopInfo *loopInfo,
1573                          VirtRegMap &vrm) {
1574  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1575
1576  std::vector<LiveInterval*> added;
1577
1578  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1579
1580  DEBUG({
1581      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1582      li.dump();
1583      dbgs() << '\n';
1584    });
1585
1586  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1587
1588  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1589  while (RI != mri_->reg_end()) {
1590    MachineInstr* MI = &*RI;
1591
1592    SmallVector<unsigned, 2> Indices;
1593    bool HasUse = false;
1594    bool HasDef = false;
1595
1596    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1597      MachineOperand& mop = MI->getOperand(i);
1598      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1599
1600      HasUse |= MI->getOperand(i).isUse();
1601      HasDef |= MI->getOperand(i).isDef();
1602
1603      Indices.push_back(i);
1604    }
1605
1606    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1607                              Indices, true, slot, li.reg)) {
1608      unsigned NewVReg = mri_->createVirtualRegister(rc);
1609      vrm.grow();
1610      vrm.assignVirt2StackSlot(NewVReg, slot);
1611
1612      // create a new register for this spill
1613      LiveInterval &nI = getOrCreateInterval(NewVReg);
1614      nI.markNotSpillable();
1615
1616      // Rewrite register operands to use the new vreg.
1617      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1618           E = Indices.end(); I != E; ++I) {
1619        MI->getOperand(*I).setReg(NewVReg);
1620
1621        if (MI->getOperand(*I).isUse())
1622          MI->getOperand(*I).setIsKill(true);
1623      }
1624
1625      // Fill in  the new live interval.
1626      SlotIndex index = getInstructionIndex(MI);
1627      if (HasUse) {
1628        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1629                     nI.getNextValue(SlotIndex(), 0, false,
1630                                     getVNInfoAllocator()));
1631        DEBUG(dbgs() << " +" << LR);
1632        nI.addRange(LR);
1633        vrm.addRestorePoint(NewVReg, MI);
1634      }
1635      if (HasDef) {
1636        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1637                     nI.getNextValue(SlotIndex(), 0, false,
1638                                     getVNInfoAllocator()));
1639        DEBUG(dbgs() << " +" << LR);
1640        nI.addRange(LR);
1641        vrm.addSpillPoint(NewVReg, true, MI);
1642      }
1643
1644      added.push_back(&nI);
1645
1646      DEBUG({
1647          dbgs() << "\t\t\t\tadded new interval: ";
1648          nI.dump();
1649          dbgs() << '\n';
1650        });
1651    }
1652
1653
1654    RI = mri_->reg_begin(li.reg);
1655  }
1656
1657  return added;
1658}
1659
1660std::vector<LiveInterval*> LiveIntervals::
1661addIntervalsForSpills(const LiveInterval &li,
1662                      SmallVectorImpl<LiveInterval*> &SpillIs,
1663                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1664
1665  if (EnableFastSpilling)
1666    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1667
1668  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1669
1670  DEBUG({
1671      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1672      li.print(dbgs(), tri_);
1673      dbgs() << '\n';
1674    });
1675
1676  // Each bit specify whether a spill is required in the MBB.
1677  BitVector SpillMBBs(mf_->getNumBlockIDs());
1678  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1679  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1680  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1681  DenseMap<unsigned,unsigned> MBBVRegsMap;
1682  std::vector<LiveInterval*> NewLIs;
1683  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1684
1685  unsigned NumValNums = li.getNumValNums();
1686  SmallVector<MachineInstr*, 4> ReMatDefs;
1687  ReMatDefs.resize(NumValNums, NULL);
1688  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1689  ReMatOrigDefs.resize(NumValNums, NULL);
1690  SmallVector<int, 4> ReMatIds;
1691  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1692  BitVector ReMatDelete(NumValNums);
1693  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1694
1695  // Spilling a split live interval. It cannot be split any further. Also,
1696  // it's also guaranteed to be a single val# / range interval.
1697  if (vrm.getPreSplitReg(li.reg)) {
1698    vrm.setIsSplitFromReg(li.reg, 0);
1699    // Unset the split kill marker on the last use.
1700    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1701    if (KillIdx != SlotIndex()) {
1702      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1703      assert(KillMI && "Last use disappeared?");
1704      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1705      assert(KillOp != -1 && "Last use disappeared?");
1706      KillMI->getOperand(KillOp).setIsKill(false);
1707    }
1708    vrm.removeKillPoint(li.reg);
1709    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1710    Slot = vrm.getStackSlot(li.reg);
1711    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1712    MachineInstr *ReMatDefMI = DefIsReMat ?
1713      vrm.getReMaterializedMI(li.reg) : NULL;
1714    int LdSlot = 0;
1715    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1716    bool isLoad = isLoadSS ||
1717      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1718    bool IsFirstRange = true;
1719    for (LiveInterval::Ranges::const_iterator
1720           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1721      // If this is a split live interval with multiple ranges, it means there
1722      // are two-address instructions that re-defined the value. Only the
1723      // first def can be rematerialized!
1724      if (IsFirstRange) {
1725        // Note ReMatOrigDefMI has already been deleted.
1726        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1727                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1728                             false, vrm, rc, ReMatIds, loopInfo,
1729                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1730                             MBBVRegsMap, NewLIs);
1731      } else {
1732        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1733                             Slot, 0, false, false, false,
1734                             false, vrm, rc, ReMatIds, loopInfo,
1735                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1736                             MBBVRegsMap, NewLIs);
1737      }
1738      IsFirstRange = false;
1739    }
1740
1741    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1742    normalizeSpillWeights(NewLIs);
1743    return NewLIs;
1744  }
1745
1746  bool TrySplit = !intervalIsInOneMBB(li);
1747  if (TrySplit)
1748    ++numSplits;
1749  bool NeedStackSlot = false;
1750  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1751       i != e; ++i) {
1752    const VNInfo *VNI = *i;
1753    unsigned VN = VNI->id;
1754    if (VNI->isUnused())
1755      continue; // Dead val#.
1756    // Is the def for the val# rematerializable?
1757    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1758      ? getInstructionFromIndex(VNI->def) : 0;
1759    bool dummy;
1760    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1761      // Remember how to remat the def of this val#.
1762      ReMatOrigDefs[VN] = ReMatDefMI;
1763      // Original def may be modified so we have to make a copy here.
1764      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1765      CloneMIs.push_back(Clone);
1766      ReMatDefs[VN] = Clone;
1767
1768      bool CanDelete = true;
1769      if (VNI->hasPHIKill()) {
1770        // A kill is a phi node, not all of its uses can be rematerialized.
1771        // It must not be deleted.
1772        CanDelete = false;
1773        // Need a stack slot if there is any live range where uses cannot be
1774        // rematerialized.
1775        NeedStackSlot = true;
1776      }
1777      if (CanDelete)
1778        ReMatDelete.set(VN);
1779    } else {
1780      // Need a stack slot if there is any live range where uses cannot be
1781      // rematerialized.
1782      NeedStackSlot = true;
1783    }
1784  }
1785
1786  // One stack slot per live interval.
1787  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1788    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1789      Slot = vrm.assignVirt2StackSlot(li.reg);
1790
1791    // This case only occurs when the prealloc splitter has already assigned
1792    // a stack slot to this vreg.
1793    else
1794      Slot = vrm.getStackSlot(li.reg);
1795  }
1796
1797  // Create new intervals and rewrite defs and uses.
1798  for (LiveInterval::Ranges::const_iterator
1799         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1800    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1801    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1802    bool DefIsReMat = ReMatDefMI != NULL;
1803    bool CanDelete = ReMatDelete[I->valno->id];
1804    int LdSlot = 0;
1805    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1806    bool isLoad = isLoadSS ||
1807      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1808    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1809                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1810                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1811                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1812                               MBBVRegsMap, NewLIs);
1813  }
1814
1815  // Insert spills / restores if we are splitting.
1816  if (!TrySplit) {
1817    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1818    normalizeSpillWeights(NewLIs);
1819    return NewLIs;
1820  }
1821
1822  SmallPtrSet<LiveInterval*, 4> AddedKill;
1823  SmallVector<unsigned, 2> Ops;
1824  if (NeedStackSlot) {
1825    int Id = SpillMBBs.find_first();
1826    while (Id != -1) {
1827      std::vector<SRInfo> &spills = SpillIdxes[Id];
1828      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1829        SlotIndex index = spills[i].index;
1830        unsigned VReg = spills[i].vreg;
1831        LiveInterval &nI = getOrCreateInterval(VReg);
1832        bool isReMat = vrm.isReMaterialized(VReg);
1833        MachineInstr *MI = getInstructionFromIndex(index);
1834        bool CanFold = false;
1835        bool FoundUse = false;
1836        Ops.clear();
1837        if (spills[i].canFold) {
1838          CanFold = true;
1839          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1840            MachineOperand &MO = MI->getOperand(j);
1841            if (!MO.isReg() || MO.getReg() != VReg)
1842              continue;
1843
1844            Ops.push_back(j);
1845            if (MO.isDef())
1846              continue;
1847            if (isReMat ||
1848                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1849                                                RestoreMBBs, RestoreIdxes))) {
1850              // MI has two-address uses of the same register. If the use
1851              // isn't the first and only use in the BB, then we can't fold
1852              // it. FIXME: Move this to rewriteInstructionsForSpills.
1853              CanFold = false;
1854              break;
1855            }
1856            FoundUse = true;
1857          }
1858        }
1859        // Fold the store into the def if possible.
1860        bool Folded = false;
1861        if (CanFold && !Ops.empty()) {
1862          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1863            Folded = true;
1864            if (FoundUse) {
1865              // Also folded uses, do not issue a load.
1866              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1867              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1868            }
1869            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1870          }
1871        }
1872
1873        // Otherwise tell the spiller to issue a spill.
1874        if (!Folded) {
1875          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1876          bool isKill = LR->end == index.getStoreIndex();
1877          if (!MI->registerDefIsDead(nI.reg))
1878            // No need to spill a dead def.
1879            vrm.addSpillPoint(VReg, isKill, MI);
1880          if (isKill)
1881            AddedKill.insert(&nI);
1882        }
1883      }
1884      Id = SpillMBBs.find_next(Id);
1885    }
1886  }
1887
1888  int Id = RestoreMBBs.find_first();
1889  while (Id != -1) {
1890    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1891    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1892      SlotIndex index = restores[i].index;
1893      if (index == SlotIndex())
1894        continue;
1895      unsigned VReg = restores[i].vreg;
1896      LiveInterval &nI = getOrCreateInterval(VReg);
1897      bool isReMat = vrm.isReMaterialized(VReg);
1898      MachineInstr *MI = getInstructionFromIndex(index);
1899      bool CanFold = false;
1900      Ops.clear();
1901      if (restores[i].canFold) {
1902        CanFold = true;
1903        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1904          MachineOperand &MO = MI->getOperand(j);
1905          if (!MO.isReg() || MO.getReg() != VReg)
1906            continue;
1907
1908          if (MO.isDef()) {
1909            // If this restore were to be folded, it would have been folded
1910            // already.
1911            CanFold = false;
1912            break;
1913          }
1914          Ops.push_back(j);
1915        }
1916      }
1917
1918      // Fold the load into the use if possible.
1919      bool Folded = false;
1920      if (CanFold && !Ops.empty()) {
1921        if (!isReMat)
1922          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1923        else {
1924          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1925          int LdSlot = 0;
1926          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1927          // If the rematerializable def is a load, also try to fold it.
1928          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1929            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1930                                          Ops, isLoadSS, LdSlot, VReg);
1931          if (!Folded) {
1932            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1933            if (ImpUse) {
1934              // Re-matting an instruction with virtual register use. Add the
1935              // register as an implicit use on the use MI and mark the register
1936              // interval as unspillable.
1937              LiveInterval &ImpLi = getInterval(ImpUse);
1938              ImpLi.markNotSpillable();
1939              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1940            }
1941          }
1942        }
1943      }
1944      // If folding is not possible / failed, then tell the spiller to issue a
1945      // load / rematerialization for us.
1946      if (Folded)
1947        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1948      else
1949        vrm.addRestorePoint(VReg, MI);
1950    }
1951    Id = RestoreMBBs.find_next(Id);
1952  }
1953
1954  // Finalize intervals: add kills, finalize spill weights, and filter out
1955  // dead intervals.
1956  std::vector<LiveInterval*> RetNewLIs;
1957  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1958    LiveInterval *LI = NewLIs[i];
1959    if (!LI->empty()) {
1960      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1961      if (!AddedKill.count(LI)) {
1962        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1963        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1964        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1965        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1966        assert(UseIdx != -1);
1967        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1968          LastUse->getOperand(UseIdx).setIsKill();
1969          vrm.addKillPoint(LI->reg, LastUseIdx);
1970        }
1971      }
1972      RetNewLIs.push_back(LI);
1973    }
1974  }
1975
1976  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1977  normalizeSpillWeights(RetNewLIs);
1978  return RetNewLIs;
1979}
1980
1981/// hasAllocatableSuperReg - Return true if the specified physical register has
1982/// any super register that's allocatable.
1983bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1984  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1985    if (allocatableRegs_[*AS] && hasInterval(*AS))
1986      return true;
1987  return false;
1988}
1989
1990/// getRepresentativeReg - Find the largest super register of the specified
1991/// physical register.
1992unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1993  // Find the largest super-register that is allocatable.
1994  unsigned BestReg = Reg;
1995  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1996    unsigned SuperReg = *AS;
1997    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1998      BestReg = SuperReg;
1999      break;
2000    }
2001  }
2002  return BestReg;
2003}
2004
2005/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2006/// specified interval that conflicts with the specified physical register.
2007unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2008                                                   unsigned PhysReg) const {
2009  unsigned NumConflicts = 0;
2010  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2011  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2012         E = mri_->reg_end(); I != E; ++I) {
2013    MachineOperand &O = I.getOperand();
2014    MachineInstr *MI = O.getParent();
2015    SlotIndex Index = getInstructionIndex(MI);
2016    if (pli.liveAt(Index))
2017      ++NumConflicts;
2018  }
2019  return NumConflicts;
2020}
2021
2022/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2023/// around all defs and uses of the specified interval. Return true if it
2024/// was able to cut its interval.
2025bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2026                                            unsigned PhysReg, VirtRegMap &vrm) {
2027  unsigned SpillReg = getRepresentativeReg(PhysReg);
2028
2029  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2030    // If there are registers which alias PhysReg, but which are not a
2031    // sub-register of the chosen representative super register. Assert
2032    // since we can't handle it yet.
2033    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2034           tri_->isSuperRegister(*AS, SpillReg));
2035
2036  bool Cut = false;
2037  SmallVector<unsigned, 4> PRegs;
2038  if (hasInterval(SpillReg))
2039    PRegs.push_back(SpillReg);
2040  else {
2041    SmallSet<unsigned, 4> Added;
2042    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2043      if (Added.insert(*AS) && hasInterval(*AS)) {
2044        PRegs.push_back(*AS);
2045        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2046          Added.insert(*ASS);
2047      }
2048  }
2049
2050  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2051  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2052         E = mri_->reg_end(); I != E; ++I) {
2053    MachineOperand &O = I.getOperand();
2054    MachineInstr *MI = O.getParent();
2055    if (SeenMIs.count(MI))
2056      continue;
2057    SeenMIs.insert(MI);
2058    SlotIndex Index = getInstructionIndex(MI);
2059    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2060      unsigned PReg = PRegs[i];
2061      LiveInterval &pli = getInterval(PReg);
2062      if (!pli.liveAt(Index))
2063        continue;
2064      vrm.addEmergencySpill(PReg, MI);
2065      SlotIndex StartIdx = Index.getLoadIndex();
2066      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2067      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2068        pli.removeRange(StartIdx, EndIdx);
2069        Cut = true;
2070      } else {
2071        std::string msg;
2072        raw_string_ostream Msg(msg);
2073        Msg << "Ran out of registers during register allocation!";
2074        if (MI->isInlineAsm()) {
2075          Msg << "\nPlease check your inline asm statement for invalid "
2076              << "constraints:\n";
2077          MI->print(Msg, tm_);
2078        }
2079        llvm_report_error(Msg.str());
2080      }
2081      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2082        if (!hasInterval(*AS))
2083          continue;
2084        LiveInterval &spli = getInterval(*AS);
2085        if (spli.liveAt(Index))
2086          spli.removeRange(Index.getLoadIndex(),
2087                           Index.getNextIndex().getBaseIndex());
2088      }
2089    }
2090  }
2091  return Cut;
2092}
2093
2094LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2095                                                  MachineInstr* startInst) {
2096  LiveInterval& Interval = getOrCreateInterval(reg);
2097  VNInfo* VN = Interval.getNextValue(
2098    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2099    startInst, true, getVNInfoAllocator());
2100  VN->setHasPHIKill(true);
2101  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2102  LiveRange LR(
2103     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2104     getMBBEndIdx(startInst->getParent()), VN);
2105  Interval.addRange(LR);
2106
2107  return LR;
2108}
2109
2110