LiveIntervalAnalysis.cpp revision 87450c87ff52d57481df9e6263f9a1b68cc8005a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.Reset();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      OS << getInstructionIndex(mii) << '\t' << *mii;
144    }
145  }
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(errs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193      if (SrcReg == li.reg || DstReg == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219/// it can check use as well.
220bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221                                            unsigned Reg, bool CheckUse,
222                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223  for (LiveInterval::Ranges::const_iterator
224         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225    for (SlotIndex index = I->start.getBaseIndex(),
226           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227           index != end;
228           index = index.getNextIndex()) {
229      MachineInstr *MI = getInstructionFromIndex(index);
230      if (!MI)
231        continue;               // skip deleted instructions
232
233      if (JoinedCopies.count(MI))
234        continue;
235      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236        MachineOperand& MO = MI->getOperand(i);
237        if (!MO.isReg())
238          continue;
239        if (MO.isUse() && !CheckUse)
240          continue;
241        unsigned PhysReg = MO.getReg();
242        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
243          continue;
244        if (tri_->isSubRegister(Reg, PhysReg))
245          return true;
246      }
247    }
248  }
249
250  return false;
251}
252
253#ifndef NDEBUG
254static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255  if (TargetRegisterInfo::isPhysicalRegister(reg))
256    errs() << tri_->getName(reg);
257  else
258    errs() << "%reg" << reg;
259}
260#endif
261
262void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263                                             MachineBasicBlock::iterator mi,
264                                             SlotIndex MIIdx,
265                                             MachineOperand& MO,
266                                             unsigned MOIdx,
267                                             LiveInterval &interval) {
268  DEBUG({
269      errs() << "\t\tregister: ";
270      printRegName(interval.reg, tri_);
271    });
272
273  // Virtual registers may be defined multiple times (due to phi
274  // elimination and 2-addr elimination).  Much of what we do only has to be
275  // done once for the vreg.  We use an empty interval to detect the first
276  // time we see a vreg.
277  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278  if (interval.empty()) {
279    // Get the Idx of the defining instructions.
280    SlotIndex defIndex = MIIdx.getDefIndex();
281    // Earlyclobbers move back one, so that they overlap the live range
282    // of inputs.
283    if (MO.isEarlyClobber())
284      defIndex = MIIdx.getUseIndex();
285    VNInfo *ValNo;
286    MachineInstr *CopyMI = NULL;
287    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
292      CopyMI = mi;
293    // Earlyclobbers move back one.
294    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
295
296    assert(ValNo->id == 0 && "First value in interval is not 0?");
297
298    // Loop over all of the blocks that the vreg is defined in.  There are
299    // two cases we have to handle here.  The most common case is a vreg
300    // whose lifetime is contained within a basic block.  In this case there
301    // will be a single kill, in MBB, which comes after the definition.
302    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303      // FIXME: what about dead vars?
304      SlotIndex killIdx;
305      if (vi.Kills[0] != mi)
306        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
307      else
308        killIdx = defIndex.getStoreIndex();
309
310      // If the kill happens after the definition, we have an intra-block
311      // live range.
312      if (killIdx > defIndex) {
313        assert(vi.AliveBlocks.empty() &&
314               "Shouldn't be alive across any blocks!");
315        LiveRange LR(defIndex, killIdx, ValNo);
316        interval.addRange(LR);
317        DEBUG(errs() << " +" << LR << "\n");
318        ValNo->addKill(killIdx);
319        return;
320      }
321    }
322
323    // The other case we handle is when a virtual register lives to the end
324    // of the defining block, potentially live across some blocks, then is
325    // live into some number of blocks, but gets killed.  Start by adding a
326    // range that goes from this definition to the end of the defining block.
327    LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
328                    ValNo);
329    DEBUG(errs() << " +" << NewLR);
330    interval.addRange(NewLR);
331
332    // Iterate over all of the blocks that the variable is completely
333    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
334    // live interval.
335    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336             E = vi.AliveBlocks.end(); I != E; ++I) {
337      LiveRange LR(
338          getMBBStartIdx(mf_->getBlockNumbered(*I)),
339          getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
340          ValNo);
341      interval.addRange(LR);
342      DEBUG(errs() << " +" << LR);
343    }
344
345    // Finally, this virtual register is live from the start of any killing
346    // block to the 'use' slot of the killing instruction.
347    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348      MachineInstr *Kill = vi.Kills[i];
349      SlotIndex killIdx =
350        getInstructionIndex(Kill).getDefIndex();
351      LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352      interval.addRange(LR);
353      ValNo->addKill(killIdx);
354      DEBUG(errs() << " +" << LR);
355    }
356
357  } else {
358    // If this is the second time we see a virtual register definition, it
359    // must be due to phi elimination or two addr elimination.  If this is
360    // the result of two address elimination, then the vreg is one of the
361    // def-and-use register operand.
362    if (mi->isRegTiedToUseOperand(MOIdx)) {
363      // If this is a two-address definition, then we have already processed
364      // the live range.  The only problem is that we didn't realize there
365      // are actually two values in the live interval.  Because of this we
366      // need to take the LiveRegion that defines this register and split it
367      // into two values.
368      assert(interval.containsOneValue());
369      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370      SlotIndex RedefIndex = MIIdx.getDefIndex();
371      if (MO.isEarlyClobber())
372        RedefIndex = MIIdx.getUseIndex();
373
374      const LiveRange *OldLR =
375        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376      VNInfo *OldValNo = OldLR->valno;
377
378      // Delete the initial value, which should be short and continuous,
379      // because the 2-addr copy must be in the same MBB as the redef.
380      interval.removeRange(DefIndex, RedefIndex);
381
382      // Two-address vregs should always only be redefined once.  This means
383      // that at this point, there should be exactly one value number in it.
384      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
385
386      // The new value number (#1) is defined by the instruction we claimed
387      // defined value #0.
388      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389                                            false, // update at *
390                                            VNInfoAllocator);
391      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
392
393      // Value#0 is now defined by the 2-addr instruction.
394      OldValNo->def  = RedefIndex;
395      OldValNo->setCopy(0);
396
397      // Add the new live interval which replaces the range for the input copy.
398      LiveRange LR(DefIndex, RedefIndex, ValNo);
399      DEBUG(errs() << " replace range with " << LR);
400      interval.addRange(LR);
401      ValNo->addKill(RedefIndex);
402
403      // If this redefinition is dead, we need to add a dummy unit live
404      // range covering the def slot.
405      if (MO.isDead())
406        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
407                                    OldValNo));
408
409      DEBUG({
410          errs() << " RESULT: ";
411          interval.print(errs(), tri_);
412        });
413    } else {
414      // Otherwise, this must be because of phi elimination.  If this is the
415      // first redefinition of the vreg that we have seen, go back and change
416      // the live range in the PHI block to be a different value number.
417      if (interval.containsOneValue()) {
418
419        VNInfo *VNI = interval.getValNumInfo(0);
420        // Phi elimination may have reused the register for multiple identical
421        // phi nodes. There will be a kill per phi. Remove the old ranges that
422        // we now know have an incorrect number.
423        for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
424          MachineInstr *Killer = vi.Kills[ki];
425          SlotIndex Start = getMBBStartIdx(Killer->getParent());
426          SlotIndex End = getInstructionIndex(Killer).getDefIndex();
427          DEBUG({
428              errs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
429              interval.print(errs(), tri_);
430            });
431          interval.removeRange(Start, End);
432
433          // Replace the interval with one of a NEW value number.  Note that
434          // this value number isn't actually defined by an instruction, weird
435          // huh? :)
436          LiveRange LR(Start, End,
437                       interval.getNextValue(SlotIndex(Start, true),
438                                             0, false, VNInfoAllocator));
439          LR.valno->setIsPHIDef(true);
440          interval.addRange(LR);
441          LR.valno->addKill(End);
442        }
443
444        MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
445        VNI->addKill(indexes_->getTerminatorGap(killMBB));
446        VNI->setHasPHIKill(true);
447        DEBUG({
448            errs() << " RESULT: ";
449            interval.print(errs(), tri_);
450          });
451      }
452
453      // In the case of PHI elimination, each variable definition is only
454      // live until the end of the block.  We've already taken care of the
455      // rest of the live range.
456      SlotIndex defIndex = MIIdx.getDefIndex();
457      if (MO.isEarlyClobber())
458        defIndex = MIIdx.getUseIndex();
459
460      VNInfo *ValNo;
461      MachineInstr *CopyMI = NULL;
462      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
463      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
464          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
465          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
466          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
467        CopyMI = mi;
468      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
469
470      SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
471      LiveRange LR(defIndex, killIndex, ValNo);
472      interval.addRange(LR);
473      ValNo->addKill(indexes_->getTerminatorGap(mbb));
474      ValNo->setHasPHIKill(true);
475      DEBUG(errs() << " +" << LR);
476    }
477  }
478
479  DEBUG(errs() << '\n');
480}
481
482void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483                                              MachineBasicBlock::iterator mi,
484                                              SlotIndex MIIdx,
485                                              MachineOperand& MO,
486                                              LiveInterval &interval,
487                                              MachineInstr *CopyMI) {
488  // A physical register cannot be live across basic block, so its
489  // lifetime must end somewhere in its defining basic block.
490  DEBUG({
491      errs() << "\t\tregister: ";
492      printRegName(interval.reg, tri_);
493    });
494
495  SlotIndex baseIndex = MIIdx;
496  SlotIndex start = baseIndex.getDefIndex();
497  // Earlyclobbers move back one.
498  if (MO.isEarlyClobber())
499    start = MIIdx.getUseIndex();
500  SlotIndex end = start;
501
502  // If it is not used after definition, it is considered dead at
503  // the instruction defining it. Hence its interval is:
504  // [defSlot(def), defSlot(def)+1)
505  // For earlyclobbers, the defSlot was pushed back one; the extra
506  // advance below compensates.
507  if (MO.isDead()) {
508    DEBUG(errs() << " dead");
509    end = start.getStoreIndex();
510    goto exit;
511  }
512
513  // If it is not dead on definition, it must be killed by a
514  // subsequent instruction. Hence its interval is:
515  // [defSlot(def), useSlot(kill)+1)
516  baseIndex = baseIndex.getNextIndex();
517  while (++mi != MBB->end()) {
518
519    if (getInstructionFromIndex(baseIndex) == 0)
520      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
521
522    if (mi->killsRegister(interval.reg, tri_)) {
523      DEBUG(errs() << " killed");
524      end = baseIndex.getDefIndex();
525      goto exit;
526    } else {
527      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
528      if (DefIdx != -1) {
529        if (mi->isRegTiedToUseOperand(DefIdx)) {
530          // Two-address instruction.
531          end = baseIndex.getDefIndex();
532        } else {
533          // Another instruction redefines the register before it is ever read.
534          // Then the register is essentially dead at the instruction that defines
535          // it. Hence its interval is:
536          // [defSlot(def), defSlot(def)+1)
537          DEBUG(errs() << " dead");
538          end = start.getStoreIndex();
539        }
540        goto exit;
541      }
542    }
543
544    baseIndex = baseIndex.getNextIndex();
545  }
546
547  // The only case we should have a dead physreg here without a killing or
548  // instruction where we know it's dead is if it is live-in to the function
549  // and never used. Another possible case is the implicit use of the
550  // physical register has been deleted by two-address pass.
551  end = start.getStoreIndex();
552
553exit:
554  assert(start < end && "did not find end of interval?");
555
556  // Already exists? Extend old live interval.
557  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
558  bool Extend = OldLR != interval.end();
559  VNInfo *ValNo = Extend
560    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
561  if (MO.isEarlyClobber() && Extend)
562    ValNo->setHasRedefByEC(true);
563  LiveRange LR(start, end, ValNo);
564  interval.addRange(LR);
565  LR.valno->addKill(end);
566  DEBUG(errs() << " +" << LR << '\n');
567}
568
569void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
570                                      MachineBasicBlock::iterator MI,
571                                      SlotIndex MIIdx,
572                                      MachineOperand& MO,
573                                      unsigned MOIdx) {
574  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
575    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
576                             getOrCreateInterval(MO.getReg()));
577  else if (allocatableRegs_[MO.getReg()]) {
578    MachineInstr *CopyMI = NULL;
579    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
580    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
581        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
582        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
583        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
584      CopyMI = MI;
585    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586                              getOrCreateInterval(MO.getReg()), CopyMI);
587    // Def of a register also defines its sub-registers.
588    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
589      // If MI also modifies the sub-register explicitly, avoid processing it
590      // more than once. Do not pass in TRI here so it checks for exact match.
591      if (!MI->modifiesRegister(*AS))
592        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
593                                  getOrCreateInterval(*AS), 0);
594  }
595}
596
597void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
598                                         SlotIndex MIIdx,
599                                         LiveInterval &interval, bool isAlias) {
600  DEBUG({
601      errs() << "\t\tlivein register: ";
602      printRegName(interval.reg, tri_);
603    });
604
605  // Look for kills, if it reaches a def before it's killed, then it shouldn't
606  // be considered a livein.
607  MachineBasicBlock::iterator mi = MBB->begin();
608  SlotIndex baseIndex = MIIdx;
609  SlotIndex start = baseIndex;
610  if (getInstructionFromIndex(baseIndex) == 0)
611    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
612
613  SlotIndex end = baseIndex;
614  bool SeenDefUse = false;
615
616  while (mi != MBB->end()) {
617    if (mi->killsRegister(interval.reg, tri_)) {
618      DEBUG(errs() << " killed");
619      end = baseIndex.getDefIndex();
620      SeenDefUse = true;
621      break;
622    } else if (mi->modifiesRegister(interval.reg, tri_)) {
623      // Another instruction redefines the register before it is ever read.
624      // Then the register is essentially dead at the instruction that defines
625      // it. Hence its interval is:
626      // [defSlot(def), defSlot(def)+1)
627      DEBUG(errs() << " dead");
628      end = start.getStoreIndex();
629      SeenDefUse = true;
630      break;
631    }
632
633    ++mi;
634    if (mi != MBB->end()) {
635      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
636    }
637  }
638
639  // Live-in register might not be used at all.
640  if (!SeenDefUse) {
641    if (isAlias) {
642      DEBUG(errs() << " dead");
643      end = MIIdx.getStoreIndex();
644    } else {
645      DEBUG(errs() << " live through");
646      end = baseIndex;
647    }
648  }
649
650  VNInfo *vni =
651    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
652                          0, false, VNInfoAllocator);
653  vni->setIsPHIDef(true);
654  LiveRange LR(start, end, vni);
655
656  interval.addRange(LR);
657  LR.valno->addKill(end);
658  DEBUG(errs() << " +" << LR << '\n');
659}
660
661/// computeIntervals - computes the live intervals for virtual
662/// registers. for some ordering of the machine instructions [1,N] a
663/// live interval is an interval [i, j) where 1 <= i <= j < N for
664/// which a variable is live
665void LiveIntervals::computeIntervals() {
666  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
667               << "********** Function: "
668               << ((Value*)mf_->getFunction())->getName() << '\n');
669
670  SmallVector<unsigned, 8> UndefUses;
671  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
672       MBBI != E; ++MBBI) {
673    MachineBasicBlock *MBB = MBBI;
674    // Track the index of the current machine instr.
675    SlotIndex MIIndex = getMBBStartIdx(MBB);
676    DEBUG(errs() << MBB->getName() << ":\n");
677
678    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
679
680    // Create intervals for live-ins to this BB first.
681    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
682           LE = MBB->livein_end(); LI != LE; ++LI) {
683      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
684      // Multiple live-ins can alias the same register.
685      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
686        if (!hasInterval(*AS))
687          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
688                               true);
689    }
690
691    // Skip over empty initial indices.
692    if (getInstructionFromIndex(MIIndex) == 0)
693      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
694
695    for (; MI != miEnd; ++MI) {
696      DEBUG(errs() << MIIndex << "\t" << *MI);
697
698      // Handle defs.
699      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
700        MachineOperand &MO = MI->getOperand(i);
701        if (!MO.isReg() || !MO.getReg())
702          continue;
703
704        // handle register defs - build intervals
705        if (MO.isDef())
706          handleRegisterDef(MBB, MI, MIIndex, MO, i);
707        else if (MO.isUndef())
708          UndefUses.push_back(MO.getReg());
709      }
710
711      // Move to the next instr slot.
712      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
713    }
714  }
715
716  // Create empty intervals for registers defined by implicit_def's (except
717  // for those implicit_def that define values which are liveout of their
718  // blocks.
719  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
720    unsigned UndefReg = UndefUses[i];
721    (void)getOrCreateInterval(UndefReg);
722  }
723}
724
725LiveInterval* LiveIntervals::createInterval(unsigned reg) {
726  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
727  return new LiveInterval(reg, Weight);
728}
729
730/// dupInterval - Duplicate a live interval. The caller is responsible for
731/// managing the allocated memory.
732LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
733  LiveInterval *NewLI = createInterval(li->reg);
734  NewLI->Copy(*li, mri_, getVNInfoAllocator());
735  return NewLI;
736}
737
738/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
739/// copy field and returns the source register that defines it.
740unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
741  if (!VNI->getCopy())
742    return 0;
743
744  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
745    // If it's extracting out of a physical register, return the sub-register.
746    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
747    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
748      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
749      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
750      if (SrcSubReg == DstSubReg)
751        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
752        // reg1034 can still be coalesced to EDX.
753        return Reg;
754      assert(DstSubReg == 0);
755      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
756    }
757    return Reg;
758  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
759             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
760    return VNI->getCopy()->getOperand(2).getReg();
761
762  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
763  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
764    return SrcReg;
765  llvm_unreachable("Unrecognized copy instruction!");
766  return 0;
767}
768
769//===----------------------------------------------------------------------===//
770// Register allocator hooks.
771//
772
773/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
774/// allow one) virtual register operand, then its uses are implicitly using
775/// the register. Returns the virtual register.
776unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
777                                            MachineInstr *MI) const {
778  unsigned RegOp = 0;
779  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
780    MachineOperand &MO = MI->getOperand(i);
781    if (!MO.isReg() || !MO.isUse())
782      continue;
783    unsigned Reg = MO.getReg();
784    if (Reg == 0 || Reg == li.reg)
785      continue;
786
787    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
788        !allocatableRegs_[Reg])
789      continue;
790    // FIXME: For now, only remat MI with at most one register operand.
791    assert(!RegOp &&
792           "Can't rematerialize instruction with multiple register operand!");
793    RegOp = MO.getReg();
794#ifndef NDEBUG
795    break;
796#endif
797  }
798  return RegOp;
799}
800
801/// isValNoAvailableAt - Return true if the val# of the specified interval
802/// which reaches the given instruction also reaches the specified use index.
803bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
804                                       SlotIndex UseIdx) const {
805  SlotIndex Index = getInstructionIndex(MI);
806  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
807  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
808  return UI != li.end() && UI->valno == ValNo;
809}
810
811/// isReMaterializable - Returns true if the definition MI of the specified
812/// val# of the specified interval is re-materializable.
813bool LiveIntervals::isReMaterializable(const LiveInterval &li,
814                                       const VNInfo *ValNo, MachineInstr *MI,
815                                       SmallVectorImpl<LiveInterval*> &SpillIs,
816                                       bool &isLoad) {
817  if (DisableReMat)
818    return false;
819
820  if (!tii_->isTriviallyReMaterializable(MI, aa_))
821    return false;
822
823  // Target-specific code can mark an instruction as being rematerializable
824  // if it has one virtual reg use, though it had better be something like
825  // a PIC base register which is likely to be live everywhere.
826  unsigned ImpUse = getReMatImplicitUse(li, MI);
827  if (ImpUse) {
828    const LiveInterval &ImpLi = getInterval(ImpUse);
829    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
830           re = mri_->use_end(); ri != re; ++ri) {
831      MachineInstr *UseMI = &*ri;
832      SlotIndex UseIdx = getInstructionIndex(UseMI);
833      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
834        continue;
835      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
836        return false;
837    }
838
839    // If a register operand of the re-materialized instruction is going to
840    // be spilled next, then it's not legal to re-materialize this instruction.
841    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
842      if (ImpUse == SpillIs[i]->reg)
843        return false;
844  }
845  return true;
846}
847
848/// isReMaterializable - Returns true if the definition MI of the specified
849/// val# of the specified interval is re-materializable.
850bool LiveIntervals::isReMaterializable(const LiveInterval &li,
851                                       const VNInfo *ValNo, MachineInstr *MI) {
852  SmallVector<LiveInterval*, 4> Dummy1;
853  bool Dummy2;
854  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
855}
856
857/// isReMaterializable - Returns true if every definition of MI of every
858/// val# of the specified interval is re-materializable.
859bool LiveIntervals::isReMaterializable(const LiveInterval &li,
860                                       SmallVectorImpl<LiveInterval*> &SpillIs,
861                                       bool &isLoad) {
862  isLoad = false;
863  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
864       i != e; ++i) {
865    const VNInfo *VNI = *i;
866    if (VNI->isUnused())
867      continue; // Dead val#.
868    // Is the def for the val# rematerializable?
869    if (!VNI->isDefAccurate())
870      return false;
871    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
872    bool DefIsLoad = false;
873    if (!ReMatDefMI ||
874        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
875      return false;
876    isLoad |= DefIsLoad;
877  }
878  return true;
879}
880
881/// FilterFoldedOps - Filter out two-address use operands. Return
882/// true if it finds any issue with the operands that ought to prevent
883/// folding.
884static bool FilterFoldedOps(MachineInstr *MI,
885                            SmallVector<unsigned, 2> &Ops,
886                            unsigned &MRInfo,
887                            SmallVector<unsigned, 2> &FoldOps) {
888  MRInfo = 0;
889  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
890    unsigned OpIdx = Ops[i];
891    MachineOperand &MO = MI->getOperand(OpIdx);
892    // FIXME: fold subreg use.
893    if (MO.getSubReg())
894      return true;
895    if (MO.isDef())
896      MRInfo |= (unsigned)VirtRegMap::isMod;
897    else {
898      // Filter out two-address use operand(s).
899      if (MI->isRegTiedToDefOperand(OpIdx)) {
900        MRInfo = VirtRegMap::isModRef;
901        continue;
902      }
903      MRInfo |= (unsigned)VirtRegMap::isRef;
904    }
905    FoldOps.push_back(OpIdx);
906  }
907  return false;
908}
909
910
911/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
912/// slot / to reg or any rematerialized load into ith operand of specified
913/// MI. If it is successul, MI is updated with the newly created MI and
914/// returns true.
915bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
916                                         VirtRegMap &vrm, MachineInstr *DefMI,
917                                         SlotIndex InstrIdx,
918                                         SmallVector<unsigned, 2> &Ops,
919                                         bool isSS, int Slot, unsigned Reg) {
920  // If it is an implicit def instruction, just delete it.
921  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
922    RemoveMachineInstrFromMaps(MI);
923    vrm.RemoveMachineInstrFromMaps(MI);
924    MI->eraseFromParent();
925    ++numFolds;
926    return true;
927  }
928
929  // Filter the list of operand indexes that are to be folded. Abort if
930  // any operand will prevent folding.
931  unsigned MRInfo = 0;
932  SmallVector<unsigned, 2> FoldOps;
933  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
934    return false;
935
936  // The only time it's safe to fold into a two address instruction is when
937  // it's folding reload and spill from / into a spill stack slot.
938  if (DefMI && (MRInfo & VirtRegMap::isMod))
939    return false;
940
941  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
942                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
943  if (fmi) {
944    // Remember this instruction uses the spill slot.
945    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
946
947    // Attempt to fold the memory reference into the instruction. If
948    // we can do this, we don't need to insert spill code.
949    MachineBasicBlock &MBB = *MI->getParent();
950    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
951      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
952    vrm.transferSpillPts(MI, fmi);
953    vrm.transferRestorePts(MI, fmi);
954    vrm.transferEmergencySpills(MI, fmi);
955    ReplaceMachineInstrInMaps(MI, fmi);
956    MI = MBB.insert(MBB.erase(MI), fmi);
957    ++numFolds;
958    return true;
959  }
960  return false;
961}
962
963/// canFoldMemoryOperand - Returns true if the specified load / store
964/// folding is possible.
965bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
966                                         SmallVector<unsigned, 2> &Ops,
967                                         bool ReMat) const {
968  // Filter the list of operand indexes that are to be folded. Abort if
969  // any operand will prevent folding.
970  unsigned MRInfo = 0;
971  SmallVector<unsigned, 2> FoldOps;
972  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
973    return false;
974
975  // It's only legal to remat for a use, not a def.
976  if (ReMat && (MRInfo & VirtRegMap::isMod))
977    return false;
978
979  return tii_->canFoldMemoryOperand(MI, FoldOps);
980}
981
982bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
983  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
984
985  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
986
987  if (mbb == 0)
988    return false;
989
990  for (++itr; itr != li.ranges.end(); ++itr) {
991    MachineBasicBlock *mbb2 =
992      indexes_->getMBBCoveringRange(itr->start, itr->end);
993
994    if (mbb2 != mbb)
995      return false;
996  }
997
998  return true;
999}
1000
1001/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1002/// interval on to-be re-materialized operands of MI) with new register.
1003void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1004                                       MachineInstr *MI, unsigned NewVReg,
1005                                       VirtRegMap &vrm) {
1006  // There is an implicit use. That means one of the other operand is
1007  // being remat'ed and the remat'ed instruction has li.reg as an
1008  // use operand. Make sure we rewrite that as well.
1009  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1010    MachineOperand &MO = MI->getOperand(i);
1011    if (!MO.isReg())
1012      continue;
1013    unsigned Reg = MO.getReg();
1014    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1015      continue;
1016    if (!vrm.isReMaterialized(Reg))
1017      continue;
1018    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1019    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1020    if (UseMO)
1021      UseMO->setReg(NewVReg);
1022  }
1023}
1024
1025/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1026/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1027bool LiveIntervals::
1028rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1029                 bool TrySplit, SlotIndex index, SlotIndex end,
1030                 MachineInstr *MI,
1031                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1032                 unsigned Slot, int LdSlot,
1033                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1034                 VirtRegMap &vrm,
1035                 const TargetRegisterClass* rc,
1036                 SmallVector<int, 4> &ReMatIds,
1037                 const MachineLoopInfo *loopInfo,
1038                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1039                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1040                 std::vector<LiveInterval*> &NewLIs) {
1041  bool CanFold = false;
1042 RestartInstruction:
1043  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1044    MachineOperand& mop = MI->getOperand(i);
1045    if (!mop.isReg())
1046      continue;
1047    unsigned Reg = mop.getReg();
1048    unsigned RegI = Reg;
1049    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1050      continue;
1051    if (Reg != li.reg)
1052      continue;
1053
1054    bool TryFold = !DefIsReMat;
1055    bool FoldSS = true; // Default behavior unless it's a remat.
1056    int FoldSlot = Slot;
1057    if (DefIsReMat) {
1058      // If this is the rematerializable definition MI itself and
1059      // all of its uses are rematerialized, simply delete it.
1060      if (MI == ReMatOrigDefMI && CanDelete) {
1061        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1062                     << MI << '\n');
1063        RemoveMachineInstrFromMaps(MI);
1064        vrm.RemoveMachineInstrFromMaps(MI);
1065        MI->eraseFromParent();
1066        break;
1067      }
1068
1069      // If def for this use can't be rematerialized, then try folding.
1070      // If def is rematerializable and it's a load, also try folding.
1071      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1072      if (isLoad) {
1073        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1074        FoldSS = isLoadSS;
1075        FoldSlot = LdSlot;
1076      }
1077    }
1078
1079    // Scan all of the operands of this instruction rewriting operands
1080    // to use NewVReg instead of li.reg as appropriate.  We do this for
1081    // two reasons:
1082    //
1083    //   1. If the instr reads the same spilled vreg multiple times, we
1084    //      want to reuse the NewVReg.
1085    //   2. If the instr is a two-addr instruction, we are required to
1086    //      keep the src/dst regs pinned.
1087    //
1088    // Keep track of whether we replace a use and/or def so that we can
1089    // create the spill interval with the appropriate range.
1090
1091    HasUse = mop.isUse();
1092    HasDef = mop.isDef();
1093    SmallVector<unsigned, 2> Ops;
1094    Ops.push_back(i);
1095    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1096      const MachineOperand &MOj = MI->getOperand(j);
1097      if (!MOj.isReg())
1098        continue;
1099      unsigned RegJ = MOj.getReg();
1100      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1101        continue;
1102      if (RegJ == RegI) {
1103        Ops.push_back(j);
1104        if (!MOj.isUndef()) {
1105          HasUse |= MOj.isUse();
1106          HasDef |= MOj.isDef();
1107        }
1108      }
1109    }
1110
1111    // Create a new virtual register for the spill interval.
1112    // Create the new register now so we can map the fold instruction
1113    // to the new register so when it is unfolded we get the correct
1114    // answer.
1115    bool CreatedNewVReg = false;
1116    if (NewVReg == 0) {
1117      NewVReg = mri_->createVirtualRegister(rc);
1118      vrm.grow();
1119      CreatedNewVReg = true;
1120
1121      // The new virtual register should get the same allocation hints as the
1122      // old one.
1123      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1124      if (Hint.first || Hint.second)
1125        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1126    }
1127
1128    if (!TryFold)
1129      CanFold = false;
1130    else {
1131      // Do not fold load / store here if we are splitting. We'll find an
1132      // optimal point to insert a load / store later.
1133      if (!TrySplit) {
1134        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1135                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1136          // Folding the load/store can completely change the instruction in
1137          // unpredictable ways, rescan it from the beginning.
1138
1139          if (FoldSS) {
1140            // We need to give the new vreg the same stack slot as the
1141            // spilled interval.
1142            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1143          }
1144
1145          HasUse = false;
1146          HasDef = false;
1147          CanFold = false;
1148          if (isNotInMIMap(MI))
1149            break;
1150          goto RestartInstruction;
1151        }
1152      } else {
1153        // We'll try to fold it later if it's profitable.
1154        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1155      }
1156    }
1157
1158    mop.setReg(NewVReg);
1159    if (mop.isImplicit())
1160      rewriteImplicitOps(li, MI, NewVReg, vrm);
1161
1162    // Reuse NewVReg for other reads.
1163    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1164      MachineOperand &mopj = MI->getOperand(Ops[j]);
1165      mopj.setReg(NewVReg);
1166      if (mopj.isImplicit())
1167        rewriteImplicitOps(li, MI, NewVReg, vrm);
1168    }
1169
1170    if (CreatedNewVReg) {
1171      if (DefIsReMat) {
1172        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1173        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1174          // Each valnum may have its own remat id.
1175          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1176        } else {
1177          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1178        }
1179        if (!CanDelete || (HasUse && HasDef)) {
1180          // If this is a two-addr instruction then its use operands are
1181          // rematerializable but its def is not. It should be assigned a
1182          // stack slot.
1183          vrm.assignVirt2StackSlot(NewVReg, Slot);
1184        }
1185      } else {
1186        vrm.assignVirt2StackSlot(NewVReg, Slot);
1187      }
1188    } else if (HasUse && HasDef &&
1189               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1190      // If this interval hasn't been assigned a stack slot (because earlier
1191      // def is a deleted remat def), do it now.
1192      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1193      vrm.assignVirt2StackSlot(NewVReg, Slot);
1194    }
1195
1196    // Re-matting an instruction with virtual register use. Add the
1197    // register as an implicit use on the use MI.
1198    if (DefIsReMat && ImpUse)
1199      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1200
1201    // Create a new register interval for this spill / remat.
1202    LiveInterval &nI = getOrCreateInterval(NewVReg);
1203    if (CreatedNewVReg) {
1204      NewLIs.push_back(&nI);
1205      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1206      if (TrySplit)
1207        vrm.setIsSplitFromReg(NewVReg, li.reg);
1208    }
1209
1210    if (HasUse) {
1211      if (CreatedNewVReg) {
1212        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1213                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1214        DEBUG(errs() << " +" << LR);
1215        nI.addRange(LR);
1216      } else {
1217        // Extend the split live interval to this def / use.
1218        SlotIndex End = index.getDefIndex();
1219        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1220                     nI.getValNumInfo(nI.getNumValNums()-1));
1221        DEBUG(errs() << " +" << LR);
1222        nI.addRange(LR);
1223      }
1224    }
1225    if (HasDef) {
1226      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1227                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1228      DEBUG(errs() << " +" << LR);
1229      nI.addRange(LR);
1230    }
1231
1232    DEBUG({
1233        errs() << "\t\t\t\tAdded new interval: ";
1234        nI.print(errs(), tri_);
1235        errs() << '\n';
1236      });
1237  }
1238  return CanFold;
1239}
1240bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1241                                   const VNInfo *VNI,
1242                                   MachineBasicBlock *MBB,
1243                                   SlotIndex Idx) const {
1244  SlotIndex End = getMBBEndIdx(MBB);
1245  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1246    if (VNI->kills[j].isPHI())
1247      continue;
1248
1249    SlotIndex KillIdx = VNI->kills[j];
1250    if (KillIdx > Idx && KillIdx < End)
1251      return true;
1252  }
1253  return false;
1254}
1255
1256/// RewriteInfo - Keep track of machine instrs that will be rewritten
1257/// during spilling.
1258namespace {
1259  struct RewriteInfo {
1260    SlotIndex Index;
1261    MachineInstr *MI;
1262    bool HasUse;
1263    bool HasDef;
1264    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1265      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1266  };
1267
1268  struct RewriteInfoCompare {
1269    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1270      return LHS.Index < RHS.Index;
1271    }
1272  };
1273}
1274
1275void LiveIntervals::
1276rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1277                    LiveInterval::Ranges::const_iterator &I,
1278                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1279                    unsigned Slot, int LdSlot,
1280                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1281                    VirtRegMap &vrm,
1282                    const TargetRegisterClass* rc,
1283                    SmallVector<int, 4> &ReMatIds,
1284                    const MachineLoopInfo *loopInfo,
1285                    BitVector &SpillMBBs,
1286                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1287                    BitVector &RestoreMBBs,
1288                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1289                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1290                    std::vector<LiveInterval*> &NewLIs) {
1291  bool AllCanFold = true;
1292  unsigned NewVReg = 0;
1293  SlotIndex start = I->start.getBaseIndex();
1294  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1295
1296  // First collect all the def / use in this live range that will be rewritten.
1297  // Make sure they are sorted according to instruction index.
1298  std::vector<RewriteInfo> RewriteMIs;
1299  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1300         re = mri_->reg_end(); ri != re; ) {
1301    MachineInstr *MI = &*ri;
1302    MachineOperand &O = ri.getOperand();
1303    ++ri;
1304    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1305    SlotIndex index = getInstructionIndex(MI);
1306    if (index < start || index >= end)
1307      continue;
1308
1309    if (O.isUndef())
1310      // Must be defined by an implicit def. It should not be spilled. Note,
1311      // this is for correctness reason. e.g.
1312      // 8   %reg1024<def> = IMPLICIT_DEF
1313      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1314      // The live range [12, 14) are not part of the r1024 live interval since
1315      // it's defined by an implicit def. It will not conflicts with live
1316      // interval of r1025. Now suppose both registers are spilled, you can
1317      // easily see a situation where both registers are reloaded before
1318      // the INSERT_SUBREG and both target registers that would overlap.
1319      continue;
1320    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1321  }
1322  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1323
1324  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1325  // Now rewrite the defs and uses.
1326  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1327    RewriteInfo &rwi = RewriteMIs[i];
1328    ++i;
1329    SlotIndex index = rwi.Index;
1330    bool MIHasUse = rwi.HasUse;
1331    bool MIHasDef = rwi.HasDef;
1332    MachineInstr *MI = rwi.MI;
1333    // If MI def and/or use the same register multiple times, then there
1334    // are multiple entries.
1335    unsigned NumUses = MIHasUse;
1336    while (i != e && RewriteMIs[i].MI == MI) {
1337      assert(RewriteMIs[i].Index == index);
1338      bool isUse = RewriteMIs[i].HasUse;
1339      if (isUse) ++NumUses;
1340      MIHasUse |= isUse;
1341      MIHasDef |= RewriteMIs[i].HasDef;
1342      ++i;
1343    }
1344    MachineBasicBlock *MBB = MI->getParent();
1345
1346    if (ImpUse && MI != ReMatDefMI) {
1347      // Re-matting an instruction with virtual register use. Update the
1348      // register interval's spill weight to HUGE_VALF to prevent it from
1349      // being spilled.
1350      LiveInterval &ImpLi = getInterval(ImpUse);
1351      ImpLi.weight = HUGE_VALF;
1352    }
1353
1354    unsigned MBBId = MBB->getNumber();
1355    unsigned ThisVReg = 0;
1356    if (TrySplit) {
1357      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1358      if (NVI != MBBVRegsMap.end()) {
1359        ThisVReg = NVI->second;
1360        // One common case:
1361        // x = use
1362        // ...
1363        // ...
1364        // def = ...
1365        //     = use
1366        // It's better to start a new interval to avoid artifically
1367        // extend the new interval.
1368        if (MIHasDef && !MIHasUse) {
1369          MBBVRegsMap.erase(MBB->getNumber());
1370          ThisVReg = 0;
1371        }
1372      }
1373    }
1374
1375    bool IsNew = ThisVReg == 0;
1376    if (IsNew) {
1377      // This ends the previous live interval. If all of its def / use
1378      // can be folded, give it a low spill weight.
1379      if (NewVReg && TrySplit && AllCanFold) {
1380        LiveInterval &nI = getOrCreateInterval(NewVReg);
1381        nI.weight /= 10.0F;
1382      }
1383      AllCanFold = true;
1384    }
1385    NewVReg = ThisVReg;
1386
1387    bool HasDef = false;
1388    bool HasUse = false;
1389    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1390                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1391                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1392                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1393                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1394    if (!HasDef && !HasUse)
1395      continue;
1396
1397    AllCanFold &= CanFold;
1398
1399    // Update weight of spill interval.
1400    LiveInterval &nI = getOrCreateInterval(NewVReg);
1401    if (!TrySplit) {
1402      // The spill weight is now infinity as it cannot be spilled again.
1403      nI.weight = HUGE_VALF;
1404      continue;
1405    }
1406
1407    // Keep track of the last def and first use in each MBB.
1408    if (HasDef) {
1409      if (MI != ReMatOrigDefMI || !CanDelete) {
1410        bool HasKill = false;
1411        if (!HasUse)
1412          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1413        else {
1414          // If this is a two-address code, then this index starts a new VNInfo.
1415          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1416          if (VNI)
1417            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1418        }
1419        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1420          SpillIdxes.find(MBBId);
1421        if (!HasKill) {
1422          if (SII == SpillIdxes.end()) {
1423            std::vector<SRInfo> S;
1424            S.push_back(SRInfo(index, NewVReg, true));
1425            SpillIdxes.insert(std::make_pair(MBBId, S));
1426          } else if (SII->second.back().vreg != NewVReg) {
1427            SII->second.push_back(SRInfo(index, NewVReg, true));
1428          } else if (index > SII->second.back().index) {
1429            // If there is an earlier def and this is a two-address
1430            // instruction, then it's not possible to fold the store (which
1431            // would also fold the load).
1432            SRInfo &Info = SII->second.back();
1433            Info.index = index;
1434            Info.canFold = !HasUse;
1435          }
1436          SpillMBBs.set(MBBId);
1437        } else if (SII != SpillIdxes.end() &&
1438                   SII->second.back().vreg == NewVReg &&
1439                   index > SII->second.back().index) {
1440          // There is an earlier def that's not killed (must be two-address).
1441          // The spill is no longer needed.
1442          SII->second.pop_back();
1443          if (SII->second.empty()) {
1444            SpillIdxes.erase(MBBId);
1445            SpillMBBs.reset(MBBId);
1446          }
1447        }
1448      }
1449    }
1450
1451    if (HasUse) {
1452      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1453        SpillIdxes.find(MBBId);
1454      if (SII != SpillIdxes.end() &&
1455          SII->second.back().vreg == NewVReg &&
1456          index > SII->second.back().index)
1457        // Use(s) following the last def, it's not safe to fold the spill.
1458        SII->second.back().canFold = false;
1459      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1460        RestoreIdxes.find(MBBId);
1461      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1462        // If we are splitting live intervals, only fold if it's the first
1463        // use and there isn't another use later in the MBB.
1464        RII->second.back().canFold = false;
1465      else if (IsNew) {
1466        // Only need a reload if there isn't an earlier def / use.
1467        if (RII == RestoreIdxes.end()) {
1468          std::vector<SRInfo> Infos;
1469          Infos.push_back(SRInfo(index, NewVReg, true));
1470          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1471        } else {
1472          RII->second.push_back(SRInfo(index, NewVReg, true));
1473        }
1474        RestoreMBBs.set(MBBId);
1475      }
1476    }
1477
1478    // Update spill weight.
1479    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1480    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1481  }
1482
1483  if (NewVReg && TrySplit && AllCanFold) {
1484    // If all of its def / use can be folded, give it a low spill weight.
1485    LiveInterval &nI = getOrCreateInterval(NewVReg);
1486    nI.weight /= 10.0F;
1487  }
1488}
1489
1490bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1491                        unsigned vr, BitVector &RestoreMBBs,
1492                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1493  if (!RestoreMBBs[Id])
1494    return false;
1495  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1496  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1497    if (Restores[i].index == index &&
1498        Restores[i].vreg == vr &&
1499        Restores[i].canFold)
1500      return true;
1501  return false;
1502}
1503
1504void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1505                        unsigned vr, BitVector &RestoreMBBs,
1506                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1507  if (!RestoreMBBs[Id])
1508    return;
1509  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1510  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1511    if (Restores[i].index == index && Restores[i].vreg)
1512      Restores[i].index = SlotIndex();
1513}
1514
1515/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1516/// spilled and create empty intervals for their uses.
1517void
1518LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1519                                    const TargetRegisterClass* rc,
1520                                    std::vector<LiveInterval*> &NewLIs) {
1521  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1522         re = mri_->reg_end(); ri != re; ) {
1523    MachineOperand &O = ri.getOperand();
1524    MachineInstr *MI = &*ri;
1525    ++ri;
1526    if (O.isDef()) {
1527      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1528             "Register def was not rewritten?");
1529      RemoveMachineInstrFromMaps(MI);
1530      vrm.RemoveMachineInstrFromMaps(MI);
1531      MI->eraseFromParent();
1532    } else {
1533      // This must be an use of an implicit_def so it's not part of the live
1534      // interval. Create a new empty live interval for it.
1535      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1536      unsigned NewVReg = mri_->createVirtualRegister(rc);
1537      vrm.grow();
1538      vrm.setIsImplicitlyDefined(NewVReg);
1539      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1540      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1541        MachineOperand &MO = MI->getOperand(i);
1542        if (MO.isReg() && MO.getReg() == li.reg) {
1543          MO.setReg(NewVReg);
1544          MO.setIsUndef();
1545        }
1546      }
1547    }
1548  }
1549}
1550
1551std::vector<LiveInterval*> LiveIntervals::
1552addIntervalsForSpillsFast(const LiveInterval &li,
1553                          const MachineLoopInfo *loopInfo,
1554                          VirtRegMap &vrm) {
1555  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1556
1557  std::vector<LiveInterval*> added;
1558
1559  assert(li.weight != HUGE_VALF &&
1560         "attempt to spill already spilled interval!");
1561
1562  DEBUG({
1563      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1564      li.dump();
1565      errs() << '\n';
1566    });
1567
1568  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1569
1570  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1571  while (RI != mri_->reg_end()) {
1572    MachineInstr* MI = &*RI;
1573
1574    SmallVector<unsigned, 2> Indices;
1575    bool HasUse = false;
1576    bool HasDef = false;
1577
1578    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1579      MachineOperand& mop = MI->getOperand(i);
1580      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1581
1582      HasUse |= MI->getOperand(i).isUse();
1583      HasDef |= MI->getOperand(i).isDef();
1584
1585      Indices.push_back(i);
1586    }
1587
1588    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1589                              Indices, true, slot, li.reg)) {
1590      unsigned NewVReg = mri_->createVirtualRegister(rc);
1591      vrm.grow();
1592      vrm.assignVirt2StackSlot(NewVReg, slot);
1593
1594      // create a new register for this spill
1595      LiveInterval &nI = getOrCreateInterval(NewVReg);
1596
1597      // the spill weight is now infinity as it
1598      // cannot be spilled again
1599      nI.weight = HUGE_VALF;
1600
1601      // Rewrite register operands to use the new vreg.
1602      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1603           E = Indices.end(); I != E; ++I) {
1604        MI->getOperand(*I).setReg(NewVReg);
1605
1606        if (MI->getOperand(*I).isUse())
1607          MI->getOperand(*I).setIsKill(true);
1608      }
1609
1610      // Fill in  the new live interval.
1611      SlotIndex index = getInstructionIndex(MI);
1612      if (HasUse) {
1613        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1614                     nI.getNextValue(SlotIndex(), 0, false,
1615                                     getVNInfoAllocator()));
1616        DEBUG(errs() << " +" << LR);
1617        nI.addRange(LR);
1618        vrm.addRestorePoint(NewVReg, MI);
1619      }
1620      if (HasDef) {
1621        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1622                     nI.getNextValue(SlotIndex(), 0, false,
1623                                     getVNInfoAllocator()));
1624        DEBUG(errs() << " +" << LR);
1625        nI.addRange(LR);
1626        vrm.addSpillPoint(NewVReg, true, MI);
1627      }
1628
1629      added.push_back(&nI);
1630
1631      DEBUG({
1632          errs() << "\t\t\t\tadded new interval: ";
1633          nI.dump();
1634          errs() << '\n';
1635        });
1636    }
1637
1638
1639    RI = mri_->reg_begin(li.reg);
1640  }
1641
1642  return added;
1643}
1644
1645std::vector<LiveInterval*> LiveIntervals::
1646addIntervalsForSpills(const LiveInterval &li,
1647                      SmallVectorImpl<LiveInterval*> &SpillIs,
1648                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1649
1650  if (EnableFastSpilling)
1651    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1652
1653  assert(li.weight != HUGE_VALF &&
1654         "attempt to spill already spilled interval!");
1655
1656  DEBUG({
1657      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1658      li.print(errs(), tri_);
1659      errs() << '\n';
1660    });
1661
1662  // Each bit specify whether a spill is required in the MBB.
1663  BitVector SpillMBBs(mf_->getNumBlockIDs());
1664  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1665  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1666  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1667  DenseMap<unsigned,unsigned> MBBVRegsMap;
1668  std::vector<LiveInterval*> NewLIs;
1669  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1670
1671  unsigned NumValNums = li.getNumValNums();
1672  SmallVector<MachineInstr*, 4> ReMatDefs;
1673  ReMatDefs.resize(NumValNums, NULL);
1674  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1675  ReMatOrigDefs.resize(NumValNums, NULL);
1676  SmallVector<int, 4> ReMatIds;
1677  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1678  BitVector ReMatDelete(NumValNums);
1679  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1680
1681  // Spilling a split live interval. It cannot be split any further. Also,
1682  // it's also guaranteed to be a single val# / range interval.
1683  if (vrm.getPreSplitReg(li.reg)) {
1684    vrm.setIsSplitFromReg(li.reg, 0);
1685    // Unset the split kill marker on the last use.
1686    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1687    if (KillIdx != SlotIndex()) {
1688      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1689      assert(KillMI && "Last use disappeared?");
1690      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1691      assert(KillOp != -1 && "Last use disappeared?");
1692      KillMI->getOperand(KillOp).setIsKill(false);
1693    }
1694    vrm.removeKillPoint(li.reg);
1695    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1696    Slot = vrm.getStackSlot(li.reg);
1697    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1698    MachineInstr *ReMatDefMI = DefIsReMat ?
1699      vrm.getReMaterializedMI(li.reg) : NULL;
1700    int LdSlot = 0;
1701    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1702    bool isLoad = isLoadSS ||
1703      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1704    bool IsFirstRange = true;
1705    for (LiveInterval::Ranges::const_iterator
1706           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1707      // If this is a split live interval with multiple ranges, it means there
1708      // are two-address instructions that re-defined the value. Only the
1709      // first def can be rematerialized!
1710      if (IsFirstRange) {
1711        // Note ReMatOrigDefMI has already been deleted.
1712        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1713                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1714                             false, vrm, rc, ReMatIds, loopInfo,
1715                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1716                             MBBVRegsMap, NewLIs);
1717      } else {
1718        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1719                             Slot, 0, false, false, false,
1720                             false, vrm, rc, ReMatIds, loopInfo,
1721                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1722                             MBBVRegsMap, NewLIs);
1723      }
1724      IsFirstRange = false;
1725    }
1726
1727    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1728    return NewLIs;
1729  }
1730
1731  bool TrySplit = !intervalIsInOneMBB(li);
1732  if (TrySplit)
1733    ++numSplits;
1734  bool NeedStackSlot = false;
1735  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1736       i != e; ++i) {
1737    const VNInfo *VNI = *i;
1738    unsigned VN = VNI->id;
1739    if (VNI->isUnused())
1740      continue; // Dead val#.
1741    // Is the def for the val# rematerializable?
1742    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1743      ? getInstructionFromIndex(VNI->def) : 0;
1744    bool dummy;
1745    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1746      // Remember how to remat the def of this val#.
1747      ReMatOrigDefs[VN] = ReMatDefMI;
1748      // Original def may be modified so we have to make a copy here.
1749      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1750      CloneMIs.push_back(Clone);
1751      ReMatDefs[VN] = Clone;
1752
1753      bool CanDelete = true;
1754      if (VNI->hasPHIKill()) {
1755        // A kill is a phi node, not all of its uses can be rematerialized.
1756        // It must not be deleted.
1757        CanDelete = false;
1758        // Need a stack slot if there is any live range where uses cannot be
1759        // rematerialized.
1760        NeedStackSlot = true;
1761      }
1762      if (CanDelete)
1763        ReMatDelete.set(VN);
1764    } else {
1765      // Need a stack slot if there is any live range where uses cannot be
1766      // rematerialized.
1767      NeedStackSlot = true;
1768    }
1769  }
1770
1771  // One stack slot per live interval.
1772  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1773    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1774      Slot = vrm.assignVirt2StackSlot(li.reg);
1775
1776    // This case only occurs when the prealloc splitter has already assigned
1777    // a stack slot to this vreg.
1778    else
1779      Slot = vrm.getStackSlot(li.reg);
1780  }
1781
1782  // Create new intervals and rewrite defs and uses.
1783  for (LiveInterval::Ranges::const_iterator
1784         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1785    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1786    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1787    bool DefIsReMat = ReMatDefMI != NULL;
1788    bool CanDelete = ReMatDelete[I->valno->id];
1789    int LdSlot = 0;
1790    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1791    bool isLoad = isLoadSS ||
1792      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1793    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1794                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1795                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1796                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1797                               MBBVRegsMap, NewLIs);
1798  }
1799
1800  // Insert spills / restores if we are splitting.
1801  if (!TrySplit) {
1802    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1803    return NewLIs;
1804  }
1805
1806  SmallPtrSet<LiveInterval*, 4> AddedKill;
1807  SmallVector<unsigned, 2> Ops;
1808  if (NeedStackSlot) {
1809    int Id = SpillMBBs.find_first();
1810    while (Id != -1) {
1811      std::vector<SRInfo> &spills = SpillIdxes[Id];
1812      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1813        SlotIndex index = spills[i].index;
1814        unsigned VReg = spills[i].vreg;
1815        LiveInterval &nI = getOrCreateInterval(VReg);
1816        bool isReMat = vrm.isReMaterialized(VReg);
1817        MachineInstr *MI = getInstructionFromIndex(index);
1818        bool CanFold = false;
1819        bool FoundUse = false;
1820        Ops.clear();
1821        if (spills[i].canFold) {
1822          CanFold = true;
1823          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1824            MachineOperand &MO = MI->getOperand(j);
1825            if (!MO.isReg() || MO.getReg() != VReg)
1826              continue;
1827
1828            Ops.push_back(j);
1829            if (MO.isDef())
1830              continue;
1831            if (isReMat ||
1832                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1833                                                RestoreMBBs, RestoreIdxes))) {
1834              // MI has two-address uses of the same register. If the use
1835              // isn't the first and only use in the BB, then we can't fold
1836              // it. FIXME: Move this to rewriteInstructionsForSpills.
1837              CanFold = false;
1838              break;
1839            }
1840            FoundUse = true;
1841          }
1842        }
1843        // Fold the store into the def if possible.
1844        bool Folded = false;
1845        if (CanFold && !Ops.empty()) {
1846          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1847            Folded = true;
1848            if (FoundUse) {
1849              // Also folded uses, do not issue a load.
1850              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1851              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1852            }
1853            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1854          }
1855        }
1856
1857        // Otherwise tell the spiller to issue a spill.
1858        if (!Folded) {
1859          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1860          bool isKill = LR->end == index.getStoreIndex();
1861          if (!MI->registerDefIsDead(nI.reg))
1862            // No need to spill a dead def.
1863            vrm.addSpillPoint(VReg, isKill, MI);
1864          if (isKill)
1865            AddedKill.insert(&nI);
1866        }
1867      }
1868      Id = SpillMBBs.find_next(Id);
1869    }
1870  }
1871
1872  int Id = RestoreMBBs.find_first();
1873  while (Id != -1) {
1874    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1875    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1876      SlotIndex index = restores[i].index;
1877      if (index == SlotIndex())
1878        continue;
1879      unsigned VReg = restores[i].vreg;
1880      LiveInterval &nI = getOrCreateInterval(VReg);
1881      bool isReMat = vrm.isReMaterialized(VReg);
1882      MachineInstr *MI = getInstructionFromIndex(index);
1883      bool CanFold = false;
1884      Ops.clear();
1885      if (restores[i].canFold) {
1886        CanFold = true;
1887        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1888          MachineOperand &MO = MI->getOperand(j);
1889          if (!MO.isReg() || MO.getReg() != VReg)
1890            continue;
1891
1892          if (MO.isDef()) {
1893            // If this restore were to be folded, it would have been folded
1894            // already.
1895            CanFold = false;
1896            break;
1897          }
1898          Ops.push_back(j);
1899        }
1900      }
1901
1902      // Fold the load into the use if possible.
1903      bool Folded = false;
1904      if (CanFold && !Ops.empty()) {
1905        if (!isReMat)
1906          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1907        else {
1908          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1909          int LdSlot = 0;
1910          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1911          // If the rematerializable def is a load, also try to fold it.
1912          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1913            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1914                                          Ops, isLoadSS, LdSlot, VReg);
1915          if (!Folded) {
1916            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1917            if (ImpUse) {
1918              // Re-matting an instruction with virtual register use. Add the
1919              // register as an implicit use on the use MI and update the register
1920              // interval's spill weight to HUGE_VALF to prevent it from being
1921              // spilled.
1922              LiveInterval &ImpLi = getInterval(ImpUse);
1923              ImpLi.weight = HUGE_VALF;
1924              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1925            }
1926          }
1927        }
1928      }
1929      // If folding is not possible / failed, then tell the spiller to issue a
1930      // load / rematerialization for us.
1931      if (Folded)
1932        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1933      else
1934        vrm.addRestorePoint(VReg, MI);
1935    }
1936    Id = RestoreMBBs.find_next(Id);
1937  }
1938
1939  // Finalize intervals: add kills, finalize spill weights, and filter out
1940  // dead intervals.
1941  std::vector<LiveInterval*> RetNewLIs;
1942  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1943    LiveInterval *LI = NewLIs[i];
1944    if (!LI->empty()) {
1945      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1946      if (!AddedKill.count(LI)) {
1947        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1948        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1949        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1950        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1951        assert(UseIdx != -1);
1952        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1953          LastUse->getOperand(UseIdx).setIsKill();
1954          vrm.addKillPoint(LI->reg, LastUseIdx);
1955        }
1956      }
1957      RetNewLIs.push_back(LI);
1958    }
1959  }
1960
1961  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1962  return RetNewLIs;
1963}
1964
1965/// hasAllocatableSuperReg - Return true if the specified physical register has
1966/// any super register that's allocatable.
1967bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1968  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1969    if (allocatableRegs_[*AS] && hasInterval(*AS))
1970      return true;
1971  return false;
1972}
1973
1974/// getRepresentativeReg - Find the largest super register of the specified
1975/// physical register.
1976unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1977  // Find the largest super-register that is allocatable.
1978  unsigned BestReg = Reg;
1979  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1980    unsigned SuperReg = *AS;
1981    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1982      BestReg = SuperReg;
1983      break;
1984    }
1985  }
1986  return BestReg;
1987}
1988
1989/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1990/// specified interval that conflicts with the specified physical register.
1991unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1992                                                   unsigned PhysReg) const {
1993  unsigned NumConflicts = 0;
1994  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1995  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1996         E = mri_->reg_end(); I != E; ++I) {
1997    MachineOperand &O = I.getOperand();
1998    MachineInstr *MI = O.getParent();
1999    SlotIndex Index = getInstructionIndex(MI);
2000    if (pli.liveAt(Index))
2001      ++NumConflicts;
2002  }
2003  return NumConflicts;
2004}
2005
2006/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2007/// around all defs and uses of the specified interval. Return true if it
2008/// was able to cut its interval.
2009bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2010                                            unsigned PhysReg, VirtRegMap &vrm) {
2011  unsigned SpillReg = getRepresentativeReg(PhysReg);
2012
2013  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2014    // If there are registers which alias PhysReg, but which are not a
2015    // sub-register of the chosen representative super register. Assert
2016    // since we can't handle it yet.
2017    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2018           tri_->isSuperRegister(*AS, SpillReg));
2019
2020  bool Cut = false;
2021  SmallVector<unsigned, 4> PRegs;
2022  if (hasInterval(SpillReg))
2023    PRegs.push_back(SpillReg);
2024  else {
2025    SmallSet<unsigned, 4> Added;
2026    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2027      if (Added.insert(*AS) && hasInterval(*AS)) {
2028        PRegs.push_back(*AS);
2029        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2030          Added.insert(*ASS);
2031      }
2032  }
2033
2034  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2035  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2036         E = mri_->reg_end(); I != E; ++I) {
2037    MachineOperand &O = I.getOperand();
2038    MachineInstr *MI = O.getParent();
2039    if (SeenMIs.count(MI))
2040      continue;
2041    SeenMIs.insert(MI);
2042    SlotIndex Index = getInstructionIndex(MI);
2043    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2044      unsigned PReg = PRegs[i];
2045      LiveInterval &pli = getInterval(PReg);
2046      if (!pli.liveAt(Index))
2047        continue;
2048      vrm.addEmergencySpill(PReg, MI);
2049      SlotIndex StartIdx = Index.getLoadIndex();
2050      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2051      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2052        pli.removeRange(StartIdx, EndIdx);
2053        Cut = true;
2054      } else {
2055        std::string msg;
2056        raw_string_ostream Msg(msg);
2057        Msg << "Ran out of registers during register allocation!";
2058        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2059          Msg << "\nPlease check your inline asm statement for invalid "
2060              << "constraints:\n";
2061          MI->print(Msg, tm_);
2062        }
2063        llvm_report_error(Msg.str());
2064      }
2065      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2066        if (!hasInterval(*AS))
2067          continue;
2068        LiveInterval &spli = getInterval(*AS);
2069        if (spli.liveAt(Index))
2070          spli.removeRange(Index.getLoadIndex(),
2071                           Index.getNextIndex().getBaseIndex());
2072      }
2073    }
2074  }
2075  return Cut;
2076}
2077
2078LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2079                                                  MachineInstr* startInst) {
2080  LiveInterval& Interval = getOrCreateInterval(reg);
2081  VNInfo* VN = Interval.getNextValue(
2082    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2083    startInst, true, getVNInfoAllocator());
2084  VN->setHasPHIKill(true);
2085  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2086  LiveRange LR(
2087     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2088     getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2089  Interval.addRange(LR);
2090
2091  return LR;
2092}
2093
2094