LiveIntervalAnalysis.cpp revision 15417383b96f9035a00bab5c13746f59e91e35e0
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I) {
89    I->second->clear();
90    delete I->second;
91  }
92
93  r2iMap_.clear();
94
95  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
96  VNInfoAllocator.Reset();
97  while (!CloneMIs.empty()) {
98    MachineInstr *MI = CloneMIs.back();
99    CloneMIs.pop_back();
100    mf_->DeleteMachineInstr(MI);
101  }
102}
103
104/// runOnMachineFunction - Register allocate the whole function
105///
106bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
107  mf_ = &fn;
108  mri_ = &mf_->getRegInfo();
109  tm_ = &fn.getTarget();
110  tri_ = tm_->getRegisterInfo();
111  tii_ = tm_->getInstrInfo();
112  aa_ = &getAnalysis<AliasAnalysis>();
113  lv_ = &getAnalysis<LiveVariables>();
114  indexes_ = &getAnalysis<SlotIndexes>();
115  allocatableRegs_ = tri_->getAllocatableSet(fn);
116
117  computeIntervals();
118
119  numIntervals += getNumIntervals();
120
121  DEBUG(dump());
122  return true;
123}
124
125/// print - Implement the dump method.
126void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
127  OS << "********** INTERVALS **********\n";
128  for (const_iterator I = begin(), E = end(); I != E; ++I) {
129    I->second->print(OS, tri_);
130    OS << "\n";
131  }
132
133  printInstrs(OS);
134}
135
136void LiveIntervals::printInstrs(raw_ostream &OS) const {
137  OS << "********** MACHINEINSTRS **********\n";
138
139  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
140       mbbi != mbbe; ++mbbi) {
141    OS << "BB#" << mbbi->getNumber()
142       << ":\t\t# derived from " << mbbi->getName() << "\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      if (mii->isDebugValue())
146        OS << "    \t" << *mii;
147      else
148        OS << getInstructionIndex(mii) << '\t' << *mii;
149    }
150  }
151}
152
153void LiveIntervals::dumpInstrs() const {
154  printInstrs(dbgs());
155}
156
157bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
158                                         VirtRegMap &vrm, unsigned reg) {
159  // We don't handle fancy stuff crossing basic block boundaries
160  if (li.ranges.size() != 1)
161    return true;
162  const LiveRange &range = li.ranges.front();
163  SlotIndex idx = range.start.getBaseIndex();
164  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
165
166  // Skip deleted instructions
167  MachineInstr *firstMI = getInstructionFromIndex(idx);
168  while (!firstMI && idx != end) {
169    idx = idx.getNextIndex();
170    firstMI = getInstructionFromIndex(idx);
171  }
172  if (!firstMI)
173    return false;
174
175  // Find last instruction in range
176  SlotIndex lastIdx = end.getPrevIndex();
177  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
178  while (!lastMI && lastIdx != idx) {
179    lastIdx = lastIdx.getPrevIndex();
180    lastMI = getInstructionFromIndex(lastIdx);
181  }
182  if (!lastMI)
183    return false;
184
185  // Range cannot cross basic block boundaries or terminators
186  MachineBasicBlock *MBB = firstMI->getParent();
187  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188    return true;
189
190  MachineBasicBlock::const_iterator E = lastMI;
191  ++E;
192  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
193    const MachineInstr &MI = *I;
194
195    // Allow copies to and from li.reg
196    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
197    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
198      if (SrcReg == li.reg || DstReg == li.reg)
199        continue;
200
201    // Check for operands using reg
202    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
203      const MachineOperand& mop = MI.getOperand(i);
204      if (!mop.isReg())
205        continue;
206      unsigned PhysReg = mop.getReg();
207      if (PhysReg == 0 || PhysReg == li.reg)
208        continue;
209      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
210        if (!vrm.hasPhys(PhysReg))
211          continue;
212        PhysReg = vrm.getPhys(PhysReg);
213      }
214      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
215        return true;
216    }
217  }
218
219  // No conflicts found.
220  return false;
221}
222
223/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
224/// it checks for sub-register reference and it can check use as well.
225bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
226                                            unsigned Reg, bool CheckUse,
227                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
228  for (LiveInterval::Ranges::const_iterator
229         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
230    for (SlotIndex index = I->start.getBaseIndex(),
231           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
232           index != end;
233           index = index.getNextIndex()) {
234      MachineInstr *MI = getInstructionFromIndex(index);
235      if (!MI)
236        continue;               // skip deleted instructions
237
238      if (JoinedCopies.count(MI))
239        continue;
240      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
241        MachineOperand& MO = MI->getOperand(i);
242        if (!MO.isReg())
243          continue;
244        if (MO.isUse() && !CheckUse)
245          continue;
246        unsigned PhysReg = MO.getReg();
247        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
248          continue;
249        if (tri_->isSubRegister(Reg, PhysReg))
250          return true;
251      }
252    }
253  }
254
255  return false;
256}
257
258#ifndef NDEBUG
259static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
260  if (TargetRegisterInfo::isPhysicalRegister(reg))
261    dbgs() << tri_->getName(reg);
262  else
263    dbgs() << "%reg" << reg;
264}
265#endif
266
267void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
268                                             MachineBasicBlock::iterator mi,
269                                             SlotIndex MIIdx,
270                                             MachineOperand& MO,
271                                             unsigned MOIdx,
272                                             LiveInterval &interval) {
273  DEBUG({
274      dbgs() << "\t\tregister: ";
275      printRegName(interval.reg, tri_);
276    });
277
278  // Virtual registers may be defined multiple times (due to phi
279  // elimination and 2-addr elimination).  Much of what we do only has to be
280  // done once for the vreg.  We use an empty interval to detect the first
281  // time we see a vreg.
282  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
283  if (interval.empty()) {
284    // Get the Idx of the defining instructions.
285    SlotIndex defIndex = MIIdx.getDefIndex();
286    // Earlyclobbers move back one, so that they overlap the live range
287    // of inputs.
288    if (MO.isEarlyClobber())
289      defIndex = MIIdx.getUseIndex();
290    VNInfo *ValNo;
291    MachineInstr *CopyMI = NULL;
292    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
293    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
294        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
295      CopyMI = mi;
296    // Earlyclobbers move back one.
297    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
298
299    assert(ValNo->id == 0 && "First value in interval is not 0?");
300
301    // Loop over all of the blocks that the vreg is defined in.  There are
302    // two cases we have to handle here.  The most common case is a vreg
303    // whose lifetime is contained within a basic block.  In this case there
304    // will be a single kill, in MBB, which comes after the definition.
305    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
306      // FIXME: what about dead vars?
307      SlotIndex killIdx;
308      if (vi.Kills[0] != mi)
309        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
310      else
311        killIdx = defIndex.getStoreIndex();
312
313      // If the kill happens after the definition, we have an intra-block
314      // live range.
315      if (killIdx > defIndex) {
316        assert(vi.AliveBlocks.empty() &&
317               "Shouldn't be alive across any blocks!");
318        LiveRange LR(defIndex, killIdx, ValNo);
319        interval.addRange(LR);
320        DEBUG(dbgs() << " +" << LR << "\n");
321        ValNo->addKill(killIdx);
322        return;
323      }
324    }
325
326    // The other case we handle is when a virtual register lives to the end
327    // of the defining block, potentially live across some blocks, then is
328    // live into some number of blocks, but gets killed.  Start by adding a
329    // range that goes from this definition to the end of the defining block.
330    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
331    DEBUG(dbgs() << " +" << NewLR);
332    interval.addRange(NewLR);
333
334    bool PHIJoin = lv_->isPHIJoin(interval.reg);
335
336    if (PHIJoin) {
337      // A phi join register is killed at the end of the MBB and revived as a new
338      // valno in the killing blocks.
339      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
340      DEBUG(dbgs() << " phi-join");
341      ValNo->addKill(indexes_->getTerminatorGap(mbb));
342      ValNo->setHasPHIKill(true);
343    } else {
344      // Iterate over all of the blocks that the variable is completely
345      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
346      // live interval.
347      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
348               E = vi.AliveBlocks.end(); I != E; ++I) {
349        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
350        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
351        interval.addRange(LR);
352        DEBUG(dbgs() << " +" << LR);
353      }
354    }
355
356    // Finally, this virtual register is live from the start of any killing
357    // block to the 'use' slot of the killing instruction.
358    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
359      MachineInstr *Kill = vi.Kills[i];
360      SlotIndex Start = getMBBStartIdx(Kill->getParent());
361      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
362
363      // Create interval with one of a NEW value number.  Note that this value
364      // number isn't actually defined by an instruction, weird huh? :)
365      if (PHIJoin) {
366        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
367                                      VNInfoAllocator);
368        ValNo->setIsPHIDef(true);
369      }
370      LiveRange LR(Start, killIdx, ValNo);
371      interval.addRange(LR);
372      ValNo->addKill(killIdx);
373      DEBUG(dbgs() << " +" << LR);
374    }
375
376  } else {
377    // If this is the second time we see a virtual register definition, it
378    // must be due to phi elimination or two addr elimination.  If this is
379    // the result of two address elimination, then the vreg is one of the
380    // def-and-use register operand.
381    if (mi->isRegTiedToUseOperand(MOIdx)) {
382      // If this is a two-address definition, then we have already processed
383      // the live range.  The only problem is that we didn't realize there
384      // are actually two values in the live interval.  Because of this we
385      // need to take the LiveRegion that defines this register and split it
386      // into two values.
387      assert(interval.containsOneValue());
388      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
389      SlotIndex RedefIndex = MIIdx.getDefIndex();
390      if (MO.isEarlyClobber())
391        RedefIndex = MIIdx.getUseIndex();
392
393      const LiveRange *OldLR =
394        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
395      VNInfo *OldValNo = OldLR->valno;
396
397      // Delete the initial value, which should be short and continuous,
398      // because the 2-addr copy must be in the same MBB as the redef.
399      interval.removeRange(DefIndex, RedefIndex);
400
401      // Two-address vregs should always only be redefined once.  This means
402      // that at this point, there should be exactly one value number in it.
403      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
404
405      // The new value number (#1) is defined by the instruction we claimed
406      // defined value #0.
407      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
408                                            false, // update at *
409                                            VNInfoAllocator);
410      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
411
412      // Value#0 is now defined by the 2-addr instruction.
413      OldValNo->def  = RedefIndex;
414      OldValNo->setCopy(0);
415
416      // Add the new live interval which replaces the range for the input copy.
417      LiveRange LR(DefIndex, RedefIndex, ValNo);
418      DEBUG(dbgs() << " replace range with " << LR);
419      interval.addRange(LR);
420      ValNo->addKill(RedefIndex);
421
422      // If this redefinition is dead, we need to add a dummy unit live
423      // range covering the def slot.
424      if (MO.isDead())
425        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
426                                    OldValNo));
427
428      DEBUG({
429          dbgs() << " RESULT: ";
430          interval.print(dbgs(), tri_);
431        });
432    } else {
433      assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
434      // In the case of PHI elimination, each variable definition is only
435      // live until the end of the block.  We've already taken care of the
436      // rest of the live range.
437
438      SlotIndex defIndex = MIIdx.getDefIndex();
439      if (MO.isEarlyClobber())
440        defIndex = MIIdx.getUseIndex();
441
442      VNInfo *ValNo;
443      MachineInstr *CopyMI = NULL;
444      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
445      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
446          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
447        CopyMI = mi;
448      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
449
450      SlotIndex killIndex = getMBBEndIdx(mbb);
451      LiveRange LR(defIndex, killIndex, ValNo);
452      interval.addRange(LR);
453      ValNo->addKill(indexes_->getTerminatorGap(mbb));
454      ValNo->setHasPHIKill(true);
455      DEBUG(dbgs() << " phi-join +" << LR);
456    }
457  }
458
459  DEBUG(dbgs() << '\n');
460}
461
462void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
463                                              MachineBasicBlock::iterator mi,
464                                              SlotIndex MIIdx,
465                                              MachineOperand& MO,
466                                              LiveInterval &interval,
467                                              MachineInstr *CopyMI) {
468  // A physical register cannot be live across basic block, so its
469  // lifetime must end somewhere in its defining basic block.
470  DEBUG({
471      dbgs() << "\t\tregister: ";
472      printRegName(interval.reg, tri_);
473    });
474
475  SlotIndex baseIndex = MIIdx;
476  SlotIndex start = baseIndex.getDefIndex();
477  // Earlyclobbers move back one.
478  if (MO.isEarlyClobber())
479    start = MIIdx.getUseIndex();
480  SlotIndex end = start;
481
482  // If it is not used after definition, it is considered dead at
483  // the instruction defining it. Hence its interval is:
484  // [defSlot(def), defSlot(def)+1)
485  // For earlyclobbers, the defSlot was pushed back one; the extra
486  // advance below compensates.
487  if (MO.isDead()) {
488    DEBUG(dbgs() << " dead");
489    end = start.getStoreIndex();
490    goto exit;
491  }
492
493  // If it is not dead on definition, it must be killed by a
494  // subsequent instruction. Hence its interval is:
495  // [defSlot(def), useSlot(kill)+1)
496  baseIndex = baseIndex.getNextIndex();
497  while (++mi != MBB->end()) {
498
499    if (mi->isDebugValue())
500      continue;
501    if (getInstructionFromIndex(baseIndex) == 0)
502      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
503
504    if (mi->killsRegister(interval.reg, tri_)) {
505      DEBUG(dbgs() << " killed");
506      end = baseIndex.getDefIndex();
507      goto exit;
508    } else {
509      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
510      if (DefIdx != -1) {
511        if (mi->isRegTiedToUseOperand(DefIdx)) {
512          // Two-address instruction.
513          end = baseIndex.getDefIndex();
514        } else {
515          // Another instruction redefines the register before it is ever read.
516          // Then the register is essentially dead at the instruction that
517          // defines it. Hence its interval is:
518          // [defSlot(def), defSlot(def)+1)
519          DEBUG(dbgs() << " dead");
520          end = start.getStoreIndex();
521        }
522        goto exit;
523      }
524    }
525
526    baseIndex = baseIndex.getNextIndex();
527  }
528
529  // The only case we should have a dead physreg here without a killing or
530  // instruction where we know it's dead is if it is live-in to the function
531  // and never used. Another possible case is the implicit use of the
532  // physical register has been deleted by two-address pass.
533  end = start.getStoreIndex();
534
535exit:
536  assert(start < end && "did not find end of interval?");
537
538  // Already exists? Extend old live interval.
539  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
540  bool Extend = OldLR != interval.end();
541  VNInfo *ValNo = Extend
542    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
543  if (MO.isEarlyClobber() && Extend)
544    ValNo->setHasRedefByEC(true);
545  LiveRange LR(start, end, ValNo);
546  interval.addRange(LR);
547  LR.valno->addKill(end);
548  DEBUG(dbgs() << " +" << LR << '\n');
549}
550
551void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
552                                      MachineBasicBlock::iterator MI,
553                                      SlotIndex MIIdx,
554                                      MachineOperand& MO,
555                                      unsigned MOIdx) {
556  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
557    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
558                             getOrCreateInterval(MO.getReg()));
559  else if (allocatableRegs_[MO.getReg()]) {
560    MachineInstr *CopyMI = NULL;
561    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
562    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
563        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
564      CopyMI = MI;
565    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
566                              getOrCreateInterval(MO.getReg()), CopyMI);
567    // Def of a register also defines its sub-registers.
568    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
569      // If MI also modifies the sub-register explicitly, avoid processing it
570      // more than once. Do not pass in TRI here so it checks for exact match.
571      if (!MI->modifiesRegister(*AS))
572        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
573                                  getOrCreateInterval(*AS), 0);
574  }
575}
576
577void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
578                                         SlotIndex MIIdx,
579                                         LiveInterval &interval, bool isAlias) {
580  DEBUG({
581      dbgs() << "\t\tlivein register: ";
582      printRegName(interval.reg, tri_);
583    });
584
585  // Look for kills, if it reaches a def before it's killed, then it shouldn't
586  // be considered a livein.
587  MachineBasicBlock::iterator mi = MBB->begin();
588  MachineBasicBlock::iterator E = MBB->end();
589  // Skip over DBG_VALUE at the start of the MBB.
590  if (mi != E && mi->isDebugValue()) {
591    while (++mi != E && mi->isDebugValue())
592      ;
593    if (mi == E)
594      // MBB is empty except for DBG_VALUE's.
595      return;
596  }
597
598  SlotIndex baseIndex = MIIdx;
599  SlotIndex start = baseIndex;
600  if (getInstructionFromIndex(baseIndex) == 0)
601    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
602
603  SlotIndex end = baseIndex;
604  bool SeenDefUse = false;
605
606  while (mi != E) {
607    if (mi->killsRegister(interval.reg, tri_)) {
608      DEBUG(dbgs() << " killed");
609      end = baseIndex.getDefIndex();
610      SeenDefUse = true;
611      break;
612    } else if (mi->modifiesRegister(interval.reg, tri_)) {
613      // Another instruction redefines the register before it is ever read.
614      // Then the register is essentially dead at the instruction that defines
615      // it. Hence its interval is:
616      // [defSlot(def), defSlot(def)+1)
617      DEBUG(dbgs() << " dead");
618      end = start.getStoreIndex();
619      SeenDefUse = true;
620      break;
621    }
622
623    while (++mi != E && mi->isDebugValue())
624      // Skip over DBG_VALUE.
625      ;
626    if (mi != E)
627      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
628  }
629
630  // Live-in register might not be used at all.
631  if (!SeenDefUse) {
632    if (isAlias) {
633      DEBUG(dbgs() << " dead");
634      end = MIIdx.getStoreIndex();
635    } else {
636      DEBUG(dbgs() << " live through");
637      end = baseIndex;
638    }
639  }
640
641  VNInfo *vni =
642    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
643                          0, false, VNInfoAllocator);
644  vni->setIsPHIDef(true);
645  LiveRange LR(start, end, vni);
646
647  interval.addRange(LR);
648  LR.valno->addKill(end);
649  DEBUG(dbgs() << " +" << LR << '\n');
650}
651
652/// computeIntervals - computes the live intervals for virtual
653/// registers. for some ordering of the machine instructions [1,N] a
654/// live interval is an interval [i, j) where 1 <= i <= j < N for
655/// which a variable is live
656void LiveIntervals::computeIntervals() {
657  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
658               << "********** Function: "
659               << ((Value*)mf_->getFunction())->getName() << '\n');
660
661  SmallVector<unsigned, 8> UndefUses;
662  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
663       MBBI != E; ++MBBI) {
664    MachineBasicBlock *MBB = MBBI;
665    if (MBB->empty())
666      continue;
667
668    // Track the index of the current machine instr.
669    SlotIndex MIIndex = getMBBStartIdx(MBB);
670    DEBUG(dbgs() << MBB->getName() << ":\n");
671
672    // Create intervals for live-ins to this BB first.
673    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
674           LE = MBB->livein_end(); LI != LE; ++LI) {
675      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
676      // Multiple live-ins can alias the same register.
677      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
678        if (!hasInterval(*AS))
679          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
680                               true);
681    }
682
683    // Skip over empty initial indices.
684    if (getInstructionFromIndex(MIIndex) == 0)
685      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
686
687    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
688         MI != miEnd; ++MI) {
689      DEBUG(dbgs() << MIIndex << "\t" << *MI);
690      if (MI->isDebugValue())
691        continue;
692
693      // Handle defs.
694      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
695        MachineOperand &MO = MI->getOperand(i);
696        if (!MO.isReg() || !MO.getReg())
697          continue;
698
699        // handle register defs - build intervals
700        if (MO.isDef())
701          handleRegisterDef(MBB, MI, MIIndex, MO, i);
702        else if (MO.isUndef())
703          UndefUses.push_back(MO.getReg());
704      }
705
706      // Move to the next instr slot.
707      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
708    }
709  }
710
711  // Create empty intervals for registers defined by implicit_def's (except
712  // for those implicit_def that define values which are liveout of their
713  // blocks.
714  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
715    unsigned UndefReg = UndefUses[i];
716    (void)getOrCreateInterval(UndefReg);
717  }
718}
719
720LiveInterval* LiveIntervals::createInterval(unsigned reg) {
721  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
722  return new LiveInterval(reg, Weight);
723}
724
725/// dupInterval - Duplicate a live interval. The caller is responsible for
726/// managing the allocated memory.
727LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
728  LiveInterval *NewLI = createInterval(li->reg);
729  NewLI->Copy(*li, mri_, getVNInfoAllocator());
730  return NewLI;
731}
732
733/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
734/// copy field and returns the source register that defines it.
735unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
736  if (!VNI->getCopy())
737    return 0;
738
739  if (VNI->getCopy()->isExtractSubreg()) {
740    // If it's extracting out of a physical register, return the sub-register.
741    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
742    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
743      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
744      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
745      if (SrcSubReg == DstSubReg)
746        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
747        // reg1034 can still be coalesced to EDX.
748        return Reg;
749      assert(DstSubReg == 0);
750      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
751    }
752    return Reg;
753  } else if (VNI->getCopy()->isInsertSubreg() ||
754             VNI->getCopy()->isSubregToReg())
755    return VNI->getCopy()->getOperand(2).getReg();
756
757  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
758  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
759    return SrcReg;
760  llvm_unreachable("Unrecognized copy instruction!");
761  return 0;
762}
763
764//===----------------------------------------------------------------------===//
765// Register allocator hooks.
766//
767
768/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
769/// allow one) virtual register operand, then its uses are implicitly using
770/// the register. Returns the virtual register.
771unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
772                                            MachineInstr *MI) const {
773  unsigned RegOp = 0;
774  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
775    MachineOperand &MO = MI->getOperand(i);
776    if (!MO.isReg() || !MO.isUse())
777      continue;
778    unsigned Reg = MO.getReg();
779    if (Reg == 0 || Reg == li.reg)
780      continue;
781
782    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
783        !allocatableRegs_[Reg])
784      continue;
785    // FIXME: For now, only remat MI with at most one register operand.
786    assert(!RegOp &&
787           "Can't rematerialize instruction with multiple register operand!");
788    RegOp = MO.getReg();
789#ifndef NDEBUG
790    break;
791#endif
792  }
793  return RegOp;
794}
795
796/// isValNoAvailableAt - Return true if the val# of the specified interval
797/// which reaches the given instruction also reaches the specified use index.
798bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
799                                       SlotIndex UseIdx) const {
800  SlotIndex Index = getInstructionIndex(MI);
801  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
802  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
803  return UI != li.end() && UI->valno == ValNo;
804}
805
806/// isReMaterializable - Returns true if the definition MI of the specified
807/// val# of the specified interval is re-materializable.
808bool LiveIntervals::isReMaterializable(const LiveInterval &li,
809                                       const VNInfo *ValNo, MachineInstr *MI,
810                                       SmallVectorImpl<LiveInterval*> &SpillIs,
811                                       bool &isLoad) {
812  if (DisableReMat)
813    return false;
814
815  if (!tii_->isTriviallyReMaterializable(MI, aa_))
816    return false;
817
818  // Target-specific code can mark an instruction as being rematerializable
819  // if it has one virtual reg use, though it had better be something like
820  // a PIC base register which is likely to be live everywhere.
821  unsigned ImpUse = getReMatImplicitUse(li, MI);
822  if (ImpUse) {
823    const LiveInterval &ImpLi = getInterval(ImpUse);
824    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
825           re = mri_->use_end(); ri != re; ++ri) {
826      MachineInstr *UseMI = &*ri;
827      SlotIndex UseIdx = getInstructionIndex(UseMI);
828      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
829        continue;
830      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
831        return false;
832    }
833
834    // If a register operand of the re-materialized instruction is going to
835    // be spilled next, then it's not legal to re-materialize this instruction.
836    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
837      if (ImpUse == SpillIs[i]->reg)
838        return false;
839  }
840  return true;
841}
842
843/// isReMaterializable - Returns true if the definition MI of the specified
844/// val# of the specified interval is re-materializable.
845bool LiveIntervals::isReMaterializable(const LiveInterval &li,
846                                       const VNInfo *ValNo, MachineInstr *MI) {
847  SmallVector<LiveInterval*, 4> Dummy1;
848  bool Dummy2;
849  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
850}
851
852/// isReMaterializable - Returns true if every definition of MI of every
853/// val# of the specified interval is re-materializable.
854bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855                                       SmallVectorImpl<LiveInterval*> &SpillIs,
856                                       bool &isLoad) {
857  isLoad = false;
858  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
859       i != e; ++i) {
860    const VNInfo *VNI = *i;
861    if (VNI->isUnused())
862      continue; // Dead val#.
863    // Is the def for the val# rematerializable?
864    if (!VNI->isDefAccurate())
865      return false;
866    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
867    bool DefIsLoad = false;
868    if (!ReMatDefMI ||
869        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
870      return false;
871    isLoad |= DefIsLoad;
872  }
873  return true;
874}
875
876/// FilterFoldedOps - Filter out two-address use operands. Return
877/// true if it finds any issue with the operands that ought to prevent
878/// folding.
879static bool FilterFoldedOps(MachineInstr *MI,
880                            SmallVector<unsigned, 2> &Ops,
881                            unsigned &MRInfo,
882                            SmallVector<unsigned, 2> &FoldOps) {
883  MRInfo = 0;
884  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885    unsigned OpIdx = Ops[i];
886    MachineOperand &MO = MI->getOperand(OpIdx);
887    // FIXME: fold subreg use.
888    if (MO.getSubReg())
889      return true;
890    if (MO.isDef())
891      MRInfo |= (unsigned)VirtRegMap::isMod;
892    else {
893      // Filter out two-address use operand(s).
894      if (MI->isRegTiedToDefOperand(OpIdx)) {
895        MRInfo = VirtRegMap::isModRef;
896        continue;
897      }
898      MRInfo |= (unsigned)VirtRegMap::isRef;
899    }
900    FoldOps.push_back(OpIdx);
901  }
902  return false;
903}
904
905
906/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907/// slot / to reg or any rematerialized load into ith operand of specified
908/// MI. If it is successul, MI is updated with the newly created MI and
909/// returns true.
910bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911                                         VirtRegMap &vrm, MachineInstr *DefMI,
912                                         SlotIndex InstrIdx,
913                                         SmallVector<unsigned, 2> &Ops,
914                                         bool isSS, int Slot, unsigned Reg) {
915  // If it is an implicit def instruction, just delete it.
916  if (MI->isImplicitDef()) {
917    RemoveMachineInstrFromMaps(MI);
918    vrm.RemoveMachineInstrFromMaps(MI);
919    MI->eraseFromParent();
920    ++numFolds;
921    return true;
922  }
923
924  // Filter the list of operand indexes that are to be folded. Abort if
925  // any operand will prevent folding.
926  unsigned MRInfo = 0;
927  SmallVector<unsigned, 2> FoldOps;
928  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
929    return false;
930
931  // The only time it's safe to fold into a two address instruction is when
932  // it's folding reload and spill from / into a spill stack slot.
933  if (DefMI && (MRInfo & VirtRegMap::isMod))
934    return false;
935
936  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
937                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
938  if (fmi) {
939    // Remember this instruction uses the spill slot.
940    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
941
942    // Attempt to fold the memory reference into the instruction. If
943    // we can do this, we don't need to insert spill code.
944    MachineBasicBlock &MBB = *MI->getParent();
945    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
946      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
947    vrm.transferSpillPts(MI, fmi);
948    vrm.transferRestorePts(MI, fmi);
949    vrm.transferEmergencySpills(MI, fmi);
950    ReplaceMachineInstrInMaps(MI, fmi);
951    MI = MBB.insert(MBB.erase(MI), fmi);
952    ++numFolds;
953    return true;
954  }
955  return false;
956}
957
958/// canFoldMemoryOperand - Returns true if the specified load / store
959/// folding is possible.
960bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961                                         SmallVector<unsigned, 2> &Ops,
962                                         bool ReMat) const {
963  // Filter the list of operand indexes that are to be folded. Abort if
964  // any operand will prevent folding.
965  unsigned MRInfo = 0;
966  SmallVector<unsigned, 2> FoldOps;
967  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
968    return false;
969
970  // It's only legal to remat for a use, not a def.
971  if (ReMat && (MRInfo & VirtRegMap::isMod))
972    return false;
973
974  return tii_->canFoldMemoryOperand(MI, FoldOps);
975}
976
977bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
979
980  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
981
982  if (mbb == 0)
983    return false;
984
985  for (++itr; itr != li.ranges.end(); ++itr) {
986    MachineBasicBlock *mbb2 =
987      indexes_->getMBBCoveringRange(itr->start, itr->end);
988
989    if (mbb2 != mbb)
990      return false;
991  }
992
993  return true;
994}
995
996/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997/// interval on to-be re-materialized operands of MI) with new register.
998void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999                                       MachineInstr *MI, unsigned NewVReg,
1000                                       VirtRegMap &vrm) {
1001  // There is an implicit use. That means one of the other operand is
1002  // being remat'ed and the remat'ed instruction has li.reg as an
1003  // use operand. Make sure we rewrite that as well.
1004  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005    MachineOperand &MO = MI->getOperand(i);
1006    if (!MO.isReg())
1007      continue;
1008    unsigned Reg = MO.getReg();
1009    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1010      continue;
1011    if (!vrm.isReMaterialized(Reg))
1012      continue;
1013    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1015    if (UseMO)
1016      UseMO->setReg(NewVReg);
1017  }
1018}
1019
1020/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022bool LiveIntervals::
1023rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024                 bool TrySplit, SlotIndex index, SlotIndex end,
1025                 MachineInstr *MI,
1026                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027                 unsigned Slot, int LdSlot,
1028                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1029                 VirtRegMap &vrm,
1030                 const TargetRegisterClass* rc,
1031                 SmallVector<int, 4> &ReMatIds,
1032                 const MachineLoopInfo *loopInfo,
1033                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035                 std::vector<LiveInterval*> &NewLIs) {
1036  bool CanFold = false;
1037 RestartInstruction:
1038  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039    MachineOperand& mop = MI->getOperand(i);
1040    if (!mop.isReg())
1041      continue;
1042    unsigned Reg = mop.getReg();
1043    unsigned RegI = Reg;
1044    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1045      continue;
1046    if (Reg != li.reg)
1047      continue;
1048
1049    bool TryFold = !DefIsReMat;
1050    bool FoldSS = true; // Default behavior unless it's a remat.
1051    int FoldSlot = Slot;
1052    if (DefIsReMat) {
1053      // If this is the rematerializable definition MI itself and
1054      // all of its uses are rematerialized, simply delete it.
1055      if (MI == ReMatOrigDefMI && CanDelete) {
1056        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1057                     << MI << '\n');
1058        RemoveMachineInstrFromMaps(MI);
1059        vrm.RemoveMachineInstrFromMaps(MI);
1060        MI->eraseFromParent();
1061        break;
1062      }
1063
1064      // If def for this use can't be rematerialized, then try folding.
1065      // If def is rematerializable and it's a load, also try folding.
1066      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1067      if (isLoad) {
1068        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1069        FoldSS = isLoadSS;
1070        FoldSlot = LdSlot;
1071      }
1072    }
1073
1074    // Scan all of the operands of this instruction rewriting operands
1075    // to use NewVReg instead of li.reg as appropriate.  We do this for
1076    // two reasons:
1077    //
1078    //   1. If the instr reads the same spilled vreg multiple times, we
1079    //      want to reuse the NewVReg.
1080    //   2. If the instr is a two-addr instruction, we are required to
1081    //      keep the src/dst regs pinned.
1082    //
1083    // Keep track of whether we replace a use and/or def so that we can
1084    // create the spill interval with the appropriate range.
1085
1086    HasUse = mop.isUse();
1087    HasDef = mop.isDef();
1088    SmallVector<unsigned, 2> Ops;
1089    Ops.push_back(i);
1090    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1091      const MachineOperand &MOj = MI->getOperand(j);
1092      if (!MOj.isReg())
1093        continue;
1094      unsigned RegJ = MOj.getReg();
1095      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1096        continue;
1097      if (RegJ == RegI) {
1098        Ops.push_back(j);
1099        if (!MOj.isUndef()) {
1100          HasUse |= MOj.isUse();
1101          HasDef |= MOj.isDef();
1102        }
1103      }
1104    }
1105
1106    // Create a new virtual register for the spill interval.
1107    // Create the new register now so we can map the fold instruction
1108    // to the new register so when it is unfolded we get the correct
1109    // answer.
1110    bool CreatedNewVReg = false;
1111    if (NewVReg == 0) {
1112      NewVReg = mri_->createVirtualRegister(rc);
1113      vrm.grow();
1114      CreatedNewVReg = true;
1115
1116      // The new virtual register should get the same allocation hints as the
1117      // old one.
1118      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1119      if (Hint.first || Hint.second)
1120        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1121    }
1122
1123    if (!TryFold)
1124      CanFold = false;
1125    else {
1126      // Do not fold load / store here if we are splitting. We'll find an
1127      // optimal point to insert a load / store later.
1128      if (!TrySplit) {
1129        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1130                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1131          // Folding the load/store can completely change the instruction in
1132          // unpredictable ways, rescan it from the beginning.
1133
1134          if (FoldSS) {
1135            // We need to give the new vreg the same stack slot as the
1136            // spilled interval.
1137            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1138          }
1139
1140          HasUse = false;
1141          HasDef = false;
1142          CanFold = false;
1143          if (isNotInMIMap(MI))
1144            break;
1145          goto RestartInstruction;
1146        }
1147      } else {
1148        // We'll try to fold it later if it's profitable.
1149        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1150      }
1151    }
1152
1153    mop.setReg(NewVReg);
1154    if (mop.isImplicit())
1155      rewriteImplicitOps(li, MI, NewVReg, vrm);
1156
1157    // Reuse NewVReg for other reads.
1158    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1159      MachineOperand &mopj = MI->getOperand(Ops[j]);
1160      mopj.setReg(NewVReg);
1161      if (mopj.isImplicit())
1162        rewriteImplicitOps(li, MI, NewVReg, vrm);
1163    }
1164
1165    if (CreatedNewVReg) {
1166      if (DefIsReMat) {
1167        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1168        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1169          // Each valnum may have its own remat id.
1170          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1171        } else {
1172          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1173        }
1174        if (!CanDelete || (HasUse && HasDef)) {
1175          // If this is a two-addr instruction then its use operands are
1176          // rematerializable but its def is not. It should be assigned a
1177          // stack slot.
1178          vrm.assignVirt2StackSlot(NewVReg, Slot);
1179        }
1180      } else {
1181        vrm.assignVirt2StackSlot(NewVReg, Slot);
1182      }
1183    } else if (HasUse && HasDef &&
1184               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1185      // If this interval hasn't been assigned a stack slot (because earlier
1186      // def is a deleted remat def), do it now.
1187      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1188      vrm.assignVirt2StackSlot(NewVReg, Slot);
1189    }
1190
1191    // Re-matting an instruction with virtual register use. Add the
1192    // register as an implicit use on the use MI.
1193    if (DefIsReMat && ImpUse)
1194      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1195
1196    // Create a new register interval for this spill / remat.
1197    LiveInterval &nI = getOrCreateInterval(NewVReg);
1198    if (CreatedNewVReg) {
1199      NewLIs.push_back(&nI);
1200      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1201      if (TrySplit)
1202        vrm.setIsSplitFromReg(NewVReg, li.reg);
1203    }
1204
1205    if (HasUse) {
1206      if (CreatedNewVReg) {
1207        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1208                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1209        DEBUG(dbgs() << " +" << LR);
1210        nI.addRange(LR);
1211      } else {
1212        // Extend the split live interval to this def / use.
1213        SlotIndex End = index.getDefIndex();
1214        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1215                     nI.getValNumInfo(nI.getNumValNums()-1));
1216        DEBUG(dbgs() << " +" << LR);
1217        nI.addRange(LR);
1218      }
1219    }
1220    if (HasDef) {
1221      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1222                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1223      DEBUG(dbgs() << " +" << LR);
1224      nI.addRange(LR);
1225    }
1226
1227    DEBUG({
1228        dbgs() << "\t\t\t\tAdded new interval: ";
1229        nI.print(dbgs(), tri_);
1230        dbgs() << '\n';
1231      });
1232  }
1233  return CanFold;
1234}
1235bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1236                                   const VNInfo *VNI,
1237                                   MachineBasicBlock *MBB,
1238                                   SlotIndex Idx) const {
1239  SlotIndex End = getMBBEndIdx(MBB);
1240  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1241    if (VNI->kills[j].isPHI())
1242      continue;
1243
1244    SlotIndex KillIdx = VNI->kills[j];
1245    if (KillIdx > Idx && KillIdx <= End)
1246      return true;
1247  }
1248  return false;
1249}
1250
1251/// RewriteInfo - Keep track of machine instrs that will be rewritten
1252/// during spilling.
1253namespace {
1254  struct RewriteInfo {
1255    SlotIndex Index;
1256    MachineInstr *MI;
1257    bool HasUse;
1258    bool HasDef;
1259    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1260      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1261  };
1262
1263  struct RewriteInfoCompare {
1264    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1265      return LHS.Index < RHS.Index;
1266    }
1267  };
1268}
1269
1270void LiveIntervals::
1271rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1272                    LiveInterval::Ranges::const_iterator &I,
1273                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1274                    unsigned Slot, int LdSlot,
1275                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1276                    VirtRegMap &vrm,
1277                    const TargetRegisterClass* rc,
1278                    SmallVector<int, 4> &ReMatIds,
1279                    const MachineLoopInfo *loopInfo,
1280                    BitVector &SpillMBBs,
1281                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1282                    BitVector &RestoreMBBs,
1283                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1284                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1285                    std::vector<LiveInterval*> &NewLIs) {
1286  bool AllCanFold = true;
1287  unsigned NewVReg = 0;
1288  SlotIndex start = I->start.getBaseIndex();
1289  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1290
1291  // First collect all the def / use in this live range that will be rewritten.
1292  // Make sure they are sorted according to instruction index.
1293  std::vector<RewriteInfo> RewriteMIs;
1294  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1295         re = mri_->reg_end(); ri != re; ) {
1296    MachineInstr *MI = &*ri;
1297    MachineOperand &O = ri.getOperand();
1298    ++ri;
1299    if (MI->isDebugValue()) {
1300      // Remove debug info for now.
1301      O.setReg(0U);
1302      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1303      continue;
1304    }
1305    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1306    SlotIndex index = getInstructionIndex(MI);
1307    if (index < start || index >= end)
1308      continue;
1309
1310    if (O.isUndef())
1311      // Must be defined by an implicit def. It should not be spilled. Note,
1312      // this is for correctness reason. e.g.
1313      // 8   %reg1024<def> = IMPLICIT_DEF
1314      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1315      // The live range [12, 14) are not part of the r1024 live interval since
1316      // it's defined by an implicit def. It will not conflicts with live
1317      // interval of r1025. Now suppose both registers are spilled, you can
1318      // easily see a situation where both registers are reloaded before
1319      // the INSERT_SUBREG and both target registers that would overlap.
1320      continue;
1321    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1322  }
1323  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1324
1325  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1326  // Now rewrite the defs and uses.
1327  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1328    RewriteInfo &rwi = RewriteMIs[i];
1329    ++i;
1330    SlotIndex index = rwi.Index;
1331    bool MIHasUse = rwi.HasUse;
1332    bool MIHasDef = rwi.HasDef;
1333    MachineInstr *MI = rwi.MI;
1334    // If MI def and/or use the same register multiple times, then there
1335    // are multiple entries.
1336    unsigned NumUses = MIHasUse;
1337    while (i != e && RewriteMIs[i].MI == MI) {
1338      assert(RewriteMIs[i].Index == index);
1339      bool isUse = RewriteMIs[i].HasUse;
1340      if (isUse) ++NumUses;
1341      MIHasUse |= isUse;
1342      MIHasDef |= RewriteMIs[i].HasDef;
1343      ++i;
1344    }
1345    MachineBasicBlock *MBB = MI->getParent();
1346
1347    if (ImpUse && MI != ReMatDefMI) {
1348      // Re-matting an instruction with virtual register use. Prevent interval
1349      // from being spilled.
1350      getInterval(ImpUse).markNotSpillable();
1351    }
1352
1353    unsigned MBBId = MBB->getNumber();
1354    unsigned ThisVReg = 0;
1355    if (TrySplit) {
1356      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1357      if (NVI != MBBVRegsMap.end()) {
1358        ThisVReg = NVI->second;
1359        // One common case:
1360        // x = use
1361        // ...
1362        // ...
1363        // def = ...
1364        //     = use
1365        // It's better to start a new interval to avoid artifically
1366        // extend the new interval.
1367        if (MIHasDef && !MIHasUse) {
1368          MBBVRegsMap.erase(MBB->getNumber());
1369          ThisVReg = 0;
1370        }
1371      }
1372    }
1373
1374    bool IsNew = ThisVReg == 0;
1375    if (IsNew) {
1376      // This ends the previous live interval. If all of its def / use
1377      // can be folded, give it a low spill weight.
1378      if (NewVReg && TrySplit && AllCanFold) {
1379        LiveInterval &nI = getOrCreateInterval(NewVReg);
1380        nI.weight /= 10.0F;
1381      }
1382      AllCanFold = true;
1383    }
1384    NewVReg = ThisVReg;
1385
1386    bool HasDef = false;
1387    bool HasUse = false;
1388    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1389                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1390                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1391                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1392                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1393    if (!HasDef && !HasUse)
1394      continue;
1395
1396    AllCanFold &= CanFold;
1397
1398    // Update weight of spill interval.
1399    LiveInterval &nI = getOrCreateInterval(NewVReg);
1400    if (!TrySplit) {
1401      // The spill weight is now infinity as it cannot be spilled again.
1402      nI.markNotSpillable();
1403      continue;
1404    }
1405
1406    // Keep track of the last def and first use in each MBB.
1407    if (HasDef) {
1408      if (MI != ReMatOrigDefMI || !CanDelete) {
1409        bool HasKill = false;
1410        if (!HasUse)
1411          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1412        else {
1413          // If this is a two-address code, then this index starts a new VNInfo.
1414          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1415          if (VNI)
1416            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1417        }
1418        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1419          SpillIdxes.find(MBBId);
1420        if (!HasKill) {
1421          if (SII == SpillIdxes.end()) {
1422            std::vector<SRInfo> S;
1423            S.push_back(SRInfo(index, NewVReg, true));
1424            SpillIdxes.insert(std::make_pair(MBBId, S));
1425          } else if (SII->second.back().vreg != NewVReg) {
1426            SII->second.push_back(SRInfo(index, NewVReg, true));
1427          } else if (index > SII->second.back().index) {
1428            // If there is an earlier def and this is a two-address
1429            // instruction, then it's not possible to fold the store (which
1430            // would also fold the load).
1431            SRInfo &Info = SII->second.back();
1432            Info.index = index;
1433            Info.canFold = !HasUse;
1434          }
1435          SpillMBBs.set(MBBId);
1436        } else if (SII != SpillIdxes.end() &&
1437                   SII->second.back().vreg == NewVReg &&
1438                   index > SII->second.back().index) {
1439          // There is an earlier def that's not killed (must be two-address).
1440          // The spill is no longer needed.
1441          SII->second.pop_back();
1442          if (SII->second.empty()) {
1443            SpillIdxes.erase(MBBId);
1444            SpillMBBs.reset(MBBId);
1445          }
1446        }
1447      }
1448    }
1449
1450    if (HasUse) {
1451      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1452        SpillIdxes.find(MBBId);
1453      if (SII != SpillIdxes.end() &&
1454          SII->second.back().vreg == NewVReg &&
1455          index > SII->second.back().index)
1456        // Use(s) following the last def, it's not safe to fold the spill.
1457        SII->second.back().canFold = false;
1458      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1459        RestoreIdxes.find(MBBId);
1460      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1461        // If we are splitting live intervals, only fold if it's the first
1462        // use and there isn't another use later in the MBB.
1463        RII->second.back().canFold = false;
1464      else if (IsNew) {
1465        // Only need a reload if there isn't an earlier def / use.
1466        if (RII == RestoreIdxes.end()) {
1467          std::vector<SRInfo> Infos;
1468          Infos.push_back(SRInfo(index, NewVReg, true));
1469          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1470        } else {
1471          RII->second.push_back(SRInfo(index, NewVReg, true));
1472        }
1473        RestoreMBBs.set(MBBId);
1474      }
1475    }
1476
1477    // Update spill weight.
1478    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1479    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1480  }
1481
1482  if (NewVReg && TrySplit && AllCanFold) {
1483    // If all of its def / use can be folded, give it a low spill weight.
1484    LiveInterval &nI = getOrCreateInterval(NewVReg);
1485    nI.weight /= 10.0F;
1486  }
1487}
1488
1489bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1490                        unsigned vr, BitVector &RestoreMBBs,
1491                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1492  if (!RestoreMBBs[Id])
1493    return false;
1494  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1495  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1496    if (Restores[i].index == index &&
1497        Restores[i].vreg == vr &&
1498        Restores[i].canFold)
1499      return true;
1500  return false;
1501}
1502
1503void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1504                        unsigned vr, BitVector &RestoreMBBs,
1505                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1506  if (!RestoreMBBs[Id])
1507    return;
1508  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1509  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1510    if (Restores[i].index == index && Restores[i].vreg)
1511      Restores[i].index = SlotIndex();
1512}
1513
1514/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1515/// spilled and create empty intervals for their uses.
1516void
1517LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1518                                    const TargetRegisterClass* rc,
1519                                    std::vector<LiveInterval*> &NewLIs) {
1520  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1521         re = mri_->reg_end(); ri != re; ) {
1522    MachineOperand &O = ri.getOperand();
1523    MachineInstr *MI = &*ri;
1524    ++ri;
1525    if (O.isDef()) {
1526      assert(MI->isImplicitDef() &&
1527             "Register def was not rewritten?");
1528      RemoveMachineInstrFromMaps(MI);
1529      vrm.RemoveMachineInstrFromMaps(MI);
1530      MI->eraseFromParent();
1531    } else {
1532      // This must be an use of an implicit_def so it's not part of the live
1533      // interval. Create a new empty live interval for it.
1534      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1535      unsigned NewVReg = mri_->createVirtualRegister(rc);
1536      vrm.grow();
1537      vrm.setIsImplicitlyDefined(NewVReg);
1538      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1539      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1540        MachineOperand &MO = MI->getOperand(i);
1541        if (MO.isReg() && MO.getReg() == li.reg) {
1542          MO.setReg(NewVReg);
1543          MO.setIsUndef();
1544        }
1545      }
1546    }
1547  }
1548}
1549
1550float
1551LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1552  // Limit the loop depth ridiculousness.
1553  if (loopDepth > 200)
1554    loopDepth = 200;
1555
1556  // The loop depth is used to roughly estimate the number of times the
1557  // instruction is executed. Something like 10^d is simple, but will quickly
1558  // overflow a float. This expression behaves like 10^d for small d, but is
1559  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1560  // headroom before overflow.
1561  float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1562
1563  return (isDef + isUse) * lc;
1564}
1565
1566void
1567LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1568  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1569    normalizeSpillWeight(*NewLIs[i]);
1570}
1571
1572std::vector<LiveInterval*> LiveIntervals::
1573addIntervalsForSpillsFast(const LiveInterval &li,
1574                          const MachineLoopInfo *loopInfo,
1575                          VirtRegMap &vrm) {
1576  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1577
1578  std::vector<LiveInterval*> added;
1579
1580  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1581
1582  DEBUG({
1583      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1584      li.dump();
1585      dbgs() << '\n';
1586    });
1587
1588  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1589
1590  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1591  while (RI != mri_->reg_end()) {
1592    MachineInstr* MI = &*RI;
1593
1594    SmallVector<unsigned, 2> Indices;
1595    bool HasUse = false;
1596    bool HasDef = false;
1597
1598    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1599      MachineOperand& mop = MI->getOperand(i);
1600      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1601
1602      HasUse |= MI->getOperand(i).isUse();
1603      HasDef |= MI->getOperand(i).isDef();
1604
1605      Indices.push_back(i);
1606    }
1607
1608    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1609                              Indices, true, slot, li.reg)) {
1610      unsigned NewVReg = mri_->createVirtualRegister(rc);
1611      vrm.grow();
1612      vrm.assignVirt2StackSlot(NewVReg, slot);
1613
1614      // create a new register for this spill
1615      LiveInterval &nI = getOrCreateInterval(NewVReg);
1616      nI.markNotSpillable();
1617
1618      // Rewrite register operands to use the new vreg.
1619      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1620           E = Indices.end(); I != E; ++I) {
1621        MI->getOperand(*I).setReg(NewVReg);
1622
1623        if (MI->getOperand(*I).isUse())
1624          MI->getOperand(*I).setIsKill(true);
1625      }
1626
1627      // Fill in  the new live interval.
1628      SlotIndex index = getInstructionIndex(MI);
1629      if (HasUse) {
1630        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1631                     nI.getNextValue(SlotIndex(), 0, false,
1632                                     getVNInfoAllocator()));
1633        DEBUG(dbgs() << " +" << LR);
1634        nI.addRange(LR);
1635        vrm.addRestorePoint(NewVReg, MI);
1636      }
1637      if (HasDef) {
1638        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1639                     nI.getNextValue(SlotIndex(), 0, false,
1640                                     getVNInfoAllocator()));
1641        DEBUG(dbgs() << " +" << LR);
1642        nI.addRange(LR);
1643        vrm.addSpillPoint(NewVReg, true, MI);
1644      }
1645
1646      added.push_back(&nI);
1647
1648      DEBUG({
1649          dbgs() << "\t\t\t\tadded new interval: ";
1650          nI.dump();
1651          dbgs() << '\n';
1652        });
1653    }
1654
1655
1656    RI = mri_->reg_begin(li.reg);
1657  }
1658
1659  return added;
1660}
1661
1662std::vector<LiveInterval*> LiveIntervals::
1663addIntervalsForSpills(const LiveInterval &li,
1664                      SmallVectorImpl<LiveInterval*> &SpillIs,
1665                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1666
1667  if (EnableFastSpilling)
1668    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1669
1670  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1671
1672  DEBUG({
1673      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1674      li.print(dbgs(), tri_);
1675      dbgs() << '\n';
1676    });
1677
1678  // Each bit specify whether a spill is required in the MBB.
1679  BitVector SpillMBBs(mf_->getNumBlockIDs());
1680  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1681  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1682  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1683  DenseMap<unsigned,unsigned> MBBVRegsMap;
1684  std::vector<LiveInterval*> NewLIs;
1685  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1686
1687  unsigned NumValNums = li.getNumValNums();
1688  SmallVector<MachineInstr*, 4> ReMatDefs;
1689  ReMatDefs.resize(NumValNums, NULL);
1690  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1691  ReMatOrigDefs.resize(NumValNums, NULL);
1692  SmallVector<int, 4> ReMatIds;
1693  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1694  BitVector ReMatDelete(NumValNums);
1695  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1696
1697  // Spilling a split live interval. It cannot be split any further. Also,
1698  // it's also guaranteed to be a single val# / range interval.
1699  if (vrm.getPreSplitReg(li.reg)) {
1700    vrm.setIsSplitFromReg(li.reg, 0);
1701    // Unset the split kill marker on the last use.
1702    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1703    if (KillIdx != SlotIndex()) {
1704      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1705      assert(KillMI && "Last use disappeared?");
1706      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1707      assert(KillOp != -1 && "Last use disappeared?");
1708      KillMI->getOperand(KillOp).setIsKill(false);
1709    }
1710    vrm.removeKillPoint(li.reg);
1711    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1712    Slot = vrm.getStackSlot(li.reg);
1713    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1714    MachineInstr *ReMatDefMI = DefIsReMat ?
1715      vrm.getReMaterializedMI(li.reg) : NULL;
1716    int LdSlot = 0;
1717    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1718    bool isLoad = isLoadSS ||
1719      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1720    bool IsFirstRange = true;
1721    for (LiveInterval::Ranges::const_iterator
1722           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1723      // If this is a split live interval with multiple ranges, it means there
1724      // are two-address instructions that re-defined the value. Only the
1725      // first def can be rematerialized!
1726      if (IsFirstRange) {
1727        // Note ReMatOrigDefMI has already been deleted.
1728        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1729                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1730                             false, vrm, rc, ReMatIds, loopInfo,
1731                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1732                             MBBVRegsMap, NewLIs);
1733      } else {
1734        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1735                             Slot, 0, false, false, false,
1736                             false, vrm, rc, ReMatIds, loopInfo,
1737                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1738                             MBBVRegsMap, NewLIs);
1739      }
1740      IsFirstRange = false;
1741    }
1742
1743    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1744    normalizeSpillWeights(NewLIs);
1745    return NewLIs;
1746  }
1747
1748  bool TrySplit = !intervalIsInOneMBB(li);
1749  if (TrySplit)
1750    ++numSplits;
1751  bool NeedStackSlot = false;
1752  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1753       i != e; ++i) {
1754    const VNInfo *VNI = *i;
1755    unsigned VN = VNI->id;
1756    if (VNI->isUnused())
1757      continue; // Dead val#.
1758    // Is the def for the val# rematerializable?
1759    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1760      ? getInstructionFromIndex(VNI->def) : 0;
1761    bool dummy;
1762    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1763      // Remember how to remat the def of this val#.
1764      ReMatOrigDefs[VN] = ReMatDefMI;
1765      // Original def may be modified so we have to make a copy here.
1766      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1767      CloneMIs.push_back(Clone);
1768      ReMatDefs[VN] = Clone;
1769
1770      bool CanDelete = true;
1771      if (VNI->hasPHIKill()) {
1772        // A kill is a phi node, not all of its uses can be rematerialized.
1773        // It must not be deleted.
1774        CanDelete = false;
1775        // Need a stack slot if there is any live range where uses cannot be
1776        // rematerialized.
1777        NeedStackSlot = true;
1778      }
1779      if (CanDelete)
1780        ReMatDelete.set(VN);
1781    } else {
1782      // Need a stack slot if there is any live range where uses cannot be
1783      // rematerialized.
1784      NeedStackSlot = true;
1785    }
1786  }
1787
1788  // One stack slot per live interval.
1789  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1790    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1791      Slot = vrm.assignVirt2StackSlot(li.reg);
1792
1793    // This case only occurs when the prealloc splitter has already assigned
1794    // a stack slot to this vreg.
1795    else
1796      Slot = vrm.getStackSlot(li.reg);
1797  }
1798
1799  // Create new intervals and rewrite defs and uses.
1800  for (LiveInterval::Ranges::const_iterator
1801         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1802    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1803    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1804    bool DefIsReMat = ReMatDefMI != NULL;
1805    bool CanDelete = ReMatDelete[I->valno->id];
1806    int LdSlot = 0;
1807    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1808    bool isLoad = isLoadSS ||
1809      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1810    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1811                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1812                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1813                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1814                               MBBVRegsMap, NewLIs);
1815  }
1816
1817  // Insert spills / restores if we are splitting.
1818  if (!TrySplit) {
1819    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1820    normalizeSpillWeights(NewLIs);
1821    return NewLIs;
1822  }
1823
1824  SmallPtrSet<LiveInterval*, 4> AddedKill;
1825  SmallVector<unsigned, 2> Ops;
1826  if (NeedStackSlot) {
1827    int Id = SpillMBBs.find_first();
1828    while (Id != -1) {
1829      std::vector<SRInfo> &spills = SpillIdxes[Id];
1830      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1831        SlotIndex index = spills[i].index;
1832        unsigned VReg = spills[i].vreg;
1833        LiveInterval &nI = getOrCreateInterval(VReg);
1834        bool isReMat = vrm.isReMaterialized(VReg);
1835        MachineInstr *MI = getInstructionFromIndex(index);
1836        bool CanFold = false;
1837        bool FoundUse = false;
1838        Ops.clear();
1839        if (spills[i].canFold) {
1840          CanFold = true;
1841          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1842            MachineOperand &MO = MI->getOperand(j);
1843            if (!MO.isReg() || MO.getReg() != VReg)
1844              continue;
1845
1846            Ops.push_back(j);
1847            if (MO.isDef())
1848              continue;
1849            if (isReMat ||
1850                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1851                                                RestoreMBBs, RestoreIdxes))) {
1852              // MI has two-address uses of the same register. If the use
1853              // isn't the first and only use in the BB, then we can't fold
1854              // it. FIXME: Move this to rewriteInstructionsForSpills.
1855              CanFold = false;
1856              break;
1857            }
1858            FoundUse = true;
1859          }
1860        }
1861        // Fold the store into the def if possible.
1862        bool Folded = false;
1863        if (CanFold && !Ops.empty()) {
1864          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1865            Folded = true;
1866            if (FoundUse) {
1867              // Also folded uses, do not issue a load.
1868              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1869              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1870            }
1871            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1872          }
1873        }
1874
1875        // Otherwise tell the spiller to issue a spill.
1876        if (!Folded) {
1877          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1878          bool isKill = LR->end == index.getStoreIndex();
1879          if (!MI->registerDefIsDead(nI.reg))
1880            // No need to spill a dead def.
1881            vrm.addSpillPoint(VReg, isKill, MI);
1882          if (isKill)
1883            AddedKill.insert(&nI);
1884        }
1885      }
1886      Id = SpillMBBs.find_next(Id);
1887    }
1888  }
1889
1890  int Id = RestoreMBBs.find_first();
1891  while (Id != -1) {
1892    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1893    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1894      SlotIndex index = restores[i].index;
1895      if (index == SlotIndex())
1896        continue;
1897      unsigned VReg = restores[i].vreg;
1898      LiveInterval &nI = getOrCreateInterval(VReg);
1899      bool isReMat = vrm.isReMaterialized(VReg);
1900      MachineInstr *MI = getInstructionFromIndex(index);
1901      bool CanFold = false;
1902      Ops.clear();
1903      if (restores[i].canFold) {
1904        CanFold = true;
1905        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1906          MachineOperand &MO = MI->getOperand(j);
1907          if (!MO.isReg() || MO.getReg() != VReg)
1908            continue;
1909
1910          if (MO.isDef()) {
1911            // If this restore were to be folded, it would have been folded
1912            // already.
1913            CanFold = false;
1914            break;
1915          }
1916          Ops.push_back(j);
1917        }
1918      }
1919
1920      // Fold the load into the use if possible.
1921      bool Folded = false;
1922      if (CanFold && !Ops.empty()) {
1923        if (!isReMat)
1924          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1925        else {
1926          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1927          int LdSlot = 0;
1928          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1929          // If the rematerializable def is a load, also try to fold it.
1930          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1931            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1932                                          Ops, isLoadSS, LdSlot, VReg);
1933          if (!Folded) {
1934            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1935            if (ImpUse) {
1936              // Re-matting an instruction with virtual register use. Add the
1937              // register as an implicit use on the use MI and mark the register
1938              // interval as unspillable.
1939              LiveInterval &ImpLi = getInterval(ImpUse);
1940              ImpLi.markNotSpillable();
1941              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1942            }
1943          }
1944        }
1945      }
1946      // If folding is not possible / failed, then tell the spiller to issue a
1947      // load / rematerialization for us.
1948      if (Folded)
1949        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1950      else
1951        vrm.addRestorePoint(VReg, MI);
1952    }
1953    Id = RestoreMBBs.find_next(Id);
1954  }
1955
1956  // Finalize intervals: add kills, finalize spill weights, and filter out
1957  // dead intervals.
1958  std::vector<LiveInterval*> RetNewLIs;
1959  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1960    LiveInterval *LI = NewLIs[i];
1961    if (!LI->empty()) {
1962      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1963      if (!AddedKill.count(LI)) {
1964        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1965        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1966        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1967        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1968        assert(UseIdx != -1);
1969        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1970          LastUse->getOperand(UseIdx).setIsKill();
1971          vrm.addKillPoint(LI->reg, LastUseIdx);
1972        }
1973      }
1974      RetNewLIs.push_back(LI);
1975    }
1976  }
1977
1978  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1979  normalizeSpillWeights(RetNewLIs);
1980  return RetNewLIs;
1981}
1982
1983/// hasAllocatableSuperReg - Return true if the specified physical register has
1984/// any super register that's allocatable.
1985bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1986  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1987    if (allocatableRegs_[*AS] && hasInterval(*AS))
1988      return true;
1989  return false;
1990}
1991
1992/// getRepresentativeReg - Find the largest super register of the specified
1993/// physical register.
1994unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1995  // Find the largest super-register that is allocatable.
1996  unsigned BestReg = Reg;
1997  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1998    unsigned SuperReg = *AS;
1999    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2000      BestReg = SuperReg;
2001      break;
2002    }
2003  }
2004  return BestReg;
2005}
2006
2007/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2008/// specified interval that conflicts with the specified physical register.
2009unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2010                                                   unsigned PhysReg) const {
2011  unsigned NumConflicts = 0;
2012  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2013  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2014         E = mri_->reg_end(); I != E; ++I) {
2015    MachineOperand &O = I.getOperand();
2016    MachineInstr *MI = O.getParent();
2017    SlotIndex Index = getInstructionIndex(MI);
2018    if (pli.liveAt(Index))
2019      ++NumConflicts;
2020  }
2021  return NumConflicts;
2022}
2023
2024/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2025/// around all defs and uses of the specified interval. Return true if it
2026/// was able to cut its interval.
2027bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2028                                            unsigned PhysReg, VirtRegMap &vrm) {
2029  unsigned SpillReg = getRepresentativeReg(PhysReg);
2030
2031  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2032    // If there are registers which alias PhysReg, but which are not a
2033    // sub-register of the chosen representative super register. Assert
2034    // since we can't handle it yet.
2035    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2036           tri_->isSuperRegister(*AS, SpillReg));
2037
2038  bool Cut = false;
2039  SmallVector<unsigned, 4> PRegs;
2040  if (hasInterval(SpillReg))
2041    PRegs.push_back(SpillReg);
2042  else {
2043    SmallSet<unsigned, 4> Added;
2044    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2045      if (Added.insert(*AS) && hasInterval(*AS)) {
2046        PRegs.push_back(*AS);
2047        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2048          Added.insert(*ASS);
2049      }
2050  }
2051
2052  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2053  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2054         E = mri_->reg_end(); I != E; ++I) {
2055    MachineOperand &O = I.getOperand();
2056    MachineInstr *MI = O.getParent();
2057    if (SeenMIs.count(MI))
2058      continue;
2059    SeenMIs.insert(MI);
2060    SlotIndex Index = getInstructionIndex(MI);
2061    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2062      unsigned PReg = PRegs[i];
2063      LiveInterval &pli = getInterval(PReg);
2064      if (!pli.liveAt(Index))
2065        continue;
2066      vrm.addEmergencySpill(PReg, MI);
2067      SlotIndex StartIdx = Index.getLoadIndex();
2068      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2069      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2070        pli.removeRange(StartIdx, EndIdx);
2071        Cut = true;
2072      } else {
2073        std::string msg;
2074        raw_string_ostream Msg(msg);
2075        Msg << "Ran out of registers during register allocation!";
2076        if (MI->isInlineAsm()) {
2077          Msg << "\nPlease check your inline asm statement for invalid "
2078              << "constraints:\n";
2079          MI->print(Msg, tm_);
2080        }
2081        llvm_report_error(Msg.str());
2082      }
2083      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2084        if (!hasInterval(*AS))
2085          continue;
2086        LiveInterval &spli = getInterval(*AS);
2087        if (spli.liveAt(Index))
2088          spli.removeRange(Index.getLoadIndex(),
2089                           Index.getNextIndex().getBaseIndex());
2090      }
2091    }
2092  }
2093  return Cut;
2094}
2095
2096LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2097                                                  MachineInstr* startInst) {
2098  LiveInterval& Interval = getOrCreateInterval(reg);
2099  VNInfo* VN = Interval.getNextValue(
2100    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2101    startInst, true, getVNInfoAllocator());
2102  VN->setHasPHIKill(true);
2103  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2104  LiveRange LR(
2105     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2106     getMBBEndIdx(startInst->getParent()), VN);
2107  Interval.addRange(LR);
2108
2109  return LR;
2110}
2111
2112