LiveIntervalAnalysis.cpp revision ac94863a1c090f2221ff2e21b7ee5480bd1db594
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.Reset();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      OS << getInstructionIndex(mii) << '\t' << *mii;
144    }
145  }
146}
147
148void LiveIntervals::dumpInstrs() const {
149  printInstrs(errs());
150}
151
152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153                                         VirtRegMap &vrm, unsigned reg) {
154  // We don't handle fancy stuff crossing basic block boundaries
155  if (li.ranges.size() != 1)
156    return true;
157  const LiveRange &range = li.ranges.front();
158  SlotIndex idx = range.start.getBaseIndex();
159  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
160
161  // Skip deleted instructions
162  MachineInstr *firstMI = getInstructionFromIndex(idx);
163  while (!firstMI && idx != end) {
164    idx = idx.getNextIndex();
165    firstMI = getInstructionFromIndex(idx);
166  }
167  if (!firstMI)
168    return false;
169
170  // Find last instruction in range
171  SlotIndex lastIdx = end.getPrevIndex();
172  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173  while (!lastMI && lastIdx != idx) {
174    lastIdx = lastIdx.getPrevIndex();
175    lastMI = getInstructionFromIndex(lastIdx);
176  }
177  if (!lastMI)
178    return false;
179
180  // Range cannot cross basic block boundaries or terminators
181  MachineBasicBlock *MBB = firstMI->getParent();
182  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
183    return true;
184
185  MachineBasicBlock::const_iterator E = lastMI;
186  ++E;
187  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188    const MachineInstr &MI = *I;
189
190    // Allow copies to and from li.reg
191    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193      if (SrcReg == li.reg || DstReg == li.reg)
194        continue;
195
196    // Check for operands using reg
197    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
198      const MachineOperand& mop = MI.getOperand(i);
199      if (!mop.isReg())
200        continue;
201      unsigned PhysReg = mop.getReg();
202      if (PhysReg == 0 || PhysReg == li.reg)
203        continue;
204      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205        if (!vrm.hasPhys(PhysReg))
206          continue;
207        PhysReg = vrm.getPhys(PhysReg);
208      }
209      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
210        return true;
211    }
212  }
213
214  // No conflicts found.
215  return false;
216}
217
218/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219/// it can check use as well.
220bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221                                            unsigned Reg, bool CheckUse,
222                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223  for (LiveInterval::Ranges::const_iterator
224         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225    for (SlotIndex index = I->start.getBaseIndex(),
226           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227           index != end;
228           index = index.getNextIndex()) {
229      MachineInstr *MI = getInstructionFromIndex(index);
230      if (!MI)
231        continue;               // skip deleted instructions
232
233      if (JoinedCopies.count(MI))
234        continue;
235      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236        MachineOperand& MO = MI->getOperand(i);
237        if (!MO.isReg())
238          continue;
239        if (MO.isUse() && !CheckUse)
240          continue;
241        unsigned PhysReg = MO.getReg();
242        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
243          continue;
244        if (tri_->isSubRegister(Reg, PhysReg))
245          return true;
246      }
247    }
248  }
249
250  return false;
251}
252
253#ifndef NDEBUG
254static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255  if (TargetRegisterInfo::isPhysicalRegister(reg))
256    errs() << tri_->getName(reg);
257  else
258    errs() << "%reg" << reg;
259}
260#endif
261
262void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263                                             MachineBasicBlock::iterator mi,
264                                             SlotIndex MIIdx,
265                                             MachineOperand& MO,
266                                             unsigned MOIdx,
267                                             LiveInterval &interval) {
268  DEBUG({
269      errs() << "\t\tregister: ";
270      printRegName(interval.reg, tri_);
271    });
272
273  // Virtual registers may be defined multiple times (due to phi
274  // elimination and 2-addr elimination).  Much of what we do only has to be
275  // done once for the vreg.  We use an empty interval to detect the first
276  // time we see a vreg.
277  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278  if (interval.empty()) {
279    // Get the Idx of the defining instructions.
280    SlotIndex defIndex = MIIdx.getDefIndex();
281    // Earlyclobbers move back one, so that they overlap the live range
282    // of inputs.
283    if (MO.isEarlyClobber())
284      defIndex = MIIdx.getUseIndex();
285    VNInfo *ValNo;
286    MachineInstr *CopyMI = NULL;
287    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
292      CopyMI = mi;
293    // Earlyclobbers move back one.
294    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
295
296    assert(ValNo->id == 0 && "First value in interval is not 0?");
297
298    // Loop over all of the blocks that the vreg is defined in.  There are
299    // two cases we have to handle here.  The most common case is a vreg
300    // whose lifetime is contained within a basic block.  In this case there
301    // will be a single kill, in MBB, which comes after the definition.
302    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303      // FIXME: what about dead vars?
304      SlotIndex killIdx;
305      if (vi.Kills[0] != mi)
306        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
307      else
308        killIdx = defIndex.getStoreIndex();
309
310      // If the kill happens after the definition, we have an intra-block
311      // live range.
312      if (killIdx > defIndex) {
313        assert(vi.AliveBlocks.empty() &&
314               "Shouldn't be alive across any blocks!");
315        LiveRange LR(defIndex, killIdx, ValNo);
316        interval.addRange(LR);
317        DEBUG(errs() << " +" << LR << "\n");
318        ValNo->addKill(killIdx);
319        return;
320      }
321    }
322
323    // The other case we handle is when a virtual register lives to the end
324    // of the defining block, potentially live across some blocks, then is
325    // live into some number of blocks, but gets killed.  Start by adding a
326    // range that goes from this definition to the end of the defining block.
327    LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
328                    ValNo);
329    DEBUG(errs() << " +" << NewLR);
330    interval.addRange(NewLR);
331
332    // Iterate over all of the blocks that the variable is completely
333    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
334    // live interval.
335    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336             E = vi.AliveBlocks.end(); I != E; ++I) {
337      LiveRange LR(
338          getMBBStartIdx(mf_->getBlockNumbered(*I)),
339          getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
340          ValNo);
341      interval.addRange(LR);
342      DEBUG(errs() << " +" << LR);
343    }
344
345    // Finally, this virtual register is live from the start of any killing
346    // block to the 'use' slot of the killing instruction.
347    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348      MachineInstr *Kill = vi.Kills[i];
349      SlotIndex killIdx =
350        getInstructionIndex(Kill).getDefIndex();
351      LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352      interval.addRange(LR);
353      ValNo->addKill(killIdx);
354      DEBUG(errs() << " +" << LR);
355    }
356
357  } else {
358    // If this is the second time we see a virtual register definition, it
359    // must be due to phi elimination or two addr elimination.  If this is
360    // the result of two address elimination, then the vreg is one of the
361    // def-and-use register operand.
362    if (mi->isRegTiedToUseOperand(MOIdx)) {
363      // If this is a two-address definition, then we have already processed
364      // the live range.  The only problem is that we didn't realize there
365      // are actually two values in the live interval.  Because of this we
366      // need to take the LiveRegion that defines this register and split it
367      // into two values.
368      assert(interval.containsOneValue());
369      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370      SlotIndex RedefIndex = MIIdx.getDefIndex();
371      if (MO.isEarlyClobber())
372        RedefIndex = MIIdx.getUseIndex();
373
374      const LiveRange *OldLR =
375        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376      VNInfo *OldValNo = OldLR->valno;
377
378      // Delete the initial value, which should be short and continuous,
379      // because the 2-addr copy must be in the same MBB as the redef.
380      interval.removeRange(DefIndex, RedefIndex);
381
382      // Two-address vregs should always only be redefined once.  This means
383      // that at this point, there should be exactly one value number in it.
384      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
385
386      // The new value number (#1) is defined by the instruction we claimed
387      // defined value #0.
388      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389                                            false, // update at *
390                                            VNInfoAllocator);
391      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
392
393      // Value#0 is now defined by the 2-addr instruction.
394      OldValNo->def  = RedefIndex;
395      OldValNo->setCopy(0);
396
397      // Add the new live interval which replaces the range for the input copy.
398      LiveRange LR(DefIndex, RedefIndex, ValNo);
399      DEBUG(errs() << " replace range with " << LR);
400      interval.addRange(LR);
401      ValNo->addKill(RedefIndex);
402
403      // If this redefinition is dead, we need to add a dummy unit live
404      // range covering the def slot.
405      if (MO.isDead())
406        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
407                                    OldValNo));
408
409      DEBUG({
410          errs() << " RESULT: ";
411          interval.print(errs(), tri_);
412        });
413    } else {
414      // Otherwise, this must be because of phi elimination.  If this is the
415      // first redefinition of the vreg that we have seen, go back and change
416      // the live range in the PHI block to be a different value number.
417      if (interval.containsOneValue()) {
418        // Remove the old range that we now know has an incorrect number.
419        VNInfo *VNI = interval.getValNumInfo(0);
420        MachineInstr *Killer = vi.Kills[0];
421        SlotIndex Start = getMBBStartIdx(Killer->getParent());
422        SlotIndex End = getInstructionIndex(Killer).getDefIndex();
423        DEBUG({
424            errs() << " Removing [" << Start << "," << End << "] from: ";
425            interval.print(errs(), tri_);
426            errs() << "\n";
427          });
428        interval.removeRange(Start, End);
429        assert(interval.ranges.size() == 1 &&
430               "Newly discovered PHI interval has >1 ranges.");
431        MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
432        VNI->addKill(indexes_->getTerminatorGap(killMBB));
433        VNI->setHasPHIKill(true);
434        DEBUG({
435            errs() << " RESULT: ";
436            interval.print(errs(), tri_);
437          });
438
439        // Replace the interval with one of a NEW value number.  Note that this
440        // value number isn't actually defined by an instruction, weird huh? :)
441        LiveRange LR(Start, End,
442                     interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true),
443                       0, false, VNInfoAllocator));
444        LR.valno->setIsPHIDef(true);
445        DEBUG(errs() << " replace range with " << LR);
446        interval.addRange(LR);
447        LR.valno->addKill(End);
448        DEBUG({
449            errs() << " RESULT: ";
450            interval.print(errs(), tri_);
451          });
452      }
453
454      // In the case of PHI elimination, each variable definition is only
455      // live until the end of the block.  We've already taken care of the
456      // rest of the live range.
457      SlotIndex defIndex = MIIdx.getDefIndex();
458      if (MO.isEarlyClobber())
459        defIndex = MIIdx.getUseIndex();
460
461      VNInfo *ValNo;
462      MachineInstr *CopyMI = NULL;
463      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
465          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
466          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
467          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
468        CopyMI = mi;
469      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
470
471      SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
472      LiveRange LR(defIndex, killIndex, ValNo);
473      interval.addRange(LR);
474      ValNo->addKill(indexes_->getTerminatorGap(mbb));
475      ValNo->setHasPHIKill(true);
476      DEBUG(errs() << " +" << LR);
477    }
478  }
479
480  DEBUG(errs() << '\n');
481}
482
483void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
484                                              MachineBasicBlock::iterator mi,
485                                              SlotIndex MIIdx,
486                                              MachineOperand& MO,
487                                              LiveInterval &interval,
488                                              MachineInstr *CopyMI) {
489  // A physical register cannot be live across basic block, so its
490  // lifetime must end somewhere in its defining basic block.
491  DEBUG({
492      errs() << "\t\tregister: ";
493      printRegName(interval.reg, tri_);
494    });
495
496  SlotIndex baseIndex = MIIdx;
497  SlotIndex start = baseIndex.getDefIndex();
498  // Earlyclobbers move back one.
499  if (MO.isEarlyClobber())
500    start = MIIdx.getUseIndex();
501  SlotIndex end = start;
502
503  // If it is not used after definition, it is considered dead at
504  // the instruction defining it. Hence its interval is:
505  // [defSlot(def), defSlot(def)+1)
506  // For earlyclobbers, the defSlot was pushed back one; the extra
507  // advance below compensates.
508  if (MO.isDead()) {
509    DEBUG(errs() << " dead");
510    end = start.getStoreIndex();
511    goto exit;
512  }
513
514  // If it is not dead on definition, it must be killed by a
515  // subsequent instruction. Hence its interval is:
516  // [defSlot(def), useSlot(kill)+1)
517  baseIndex = baseIndex.getNextIndex();
518  while (++mi != MBB->end()) {
519
520    if (getInstructionFromIndex(baseIndex) == 0)
521      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
522
523    if (mi->killsRegister(interval.reg, tri_)) {
524      DEBUG(errs() << " killed");
525      end = baseIndex.getDefIndex();
526      goto exit;
527    } else {
528      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
529      if (DefIdx != -1) {
530        if (mi->isRegTiedToUseOperand(DefIdx)) {
531          // Two-address instruction.
532          end = baseIndex.getDefIndex();
533        } else {
534          // Another instruction redefines the register before it is ever read.
535          // Then the register is essentially dead at the instruction that defines
536          // it. Hence its interval is:
537          // [defSlot(def), defSlot(def)+1)
538          DEBUG(errs() << " dead");
539          end = start.getStoreIndex();
540        }
541        goto exit;
542      }
543    }
544
545    baseIndex = baseIndex.getNextIndex();
546  }
547
548  // The only case we should have a dead physreg here without a killing or
549  // instruction where we know it's dead is if it is live-in to the function
550  // and never used. Another possible case is the implicit use of the
551  // physical register has been deleted by two-address pass.
552  end = start.getStoreIndex();
553
554exit:
555  assert(start < end && "did not find end of interval?");
556
557  // Already exists? Extend old live interval.
558  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
559  bool Extend = OldLR != interval.end();
560  VNInfo *ValNo = Extend
561    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
562  if (MO.isEarlyClobber() && Extend)
563    ValNo->setHasRedefByEC(true);
564  LiveRange LR(start, end, ValNo);
565  interval.addRange(LR);
566  LR.valno->addKill(end);
567  DEBUG(errs() << " +" << LR << '\n');
568}
569
570void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
571                                      MachineBasicBlock::iterator MI,
572                                      SlotIndex MIIdx,
573                                      MachineOperand& MO,
574                                      unsigned MOIdx) {
575  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
576    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
577                             getOrCreateInterval(MO.getReg()));
578  else if (allocatableRegs_[MO.getReg()]) {
579    MachineInstr *CopyMI = NULL;
580    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
581    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
582        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
583        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
584        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
585      CopyMI = MI;
586    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587                              getOrCreateInterval(MO.getReg()), CopyMI);
588    // Def of a register also defines its sub-registers.
589    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
590      // If MI also modifies the sub-register explicitly, avoid processing it
591      // more than once. Do not pass in TRI here so it checks for exact match.
592      if (!MI->modifiesRegister(*AS))
593        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594                                  getOrCreateInterval(*AS), 0);
595  }
596}
597
598void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
599                                         SlotIndex MIIdx,
600                                         LiveInterval &interval, bool isAlias) {
601  DEBUG({
602      errs() << "\t\tlivein register: ";
603      printRegName(interval.reg, tri_);
604    });
605
606  // Look for kills, if it reaches a def before it's killed, then it shouldn't
607  // be considered a livein.
608  MachineBasicBlock::iterator mi = MBB->begin();
609  SlotIndex baseIndex = MIIdx;
610  SlotIndex start = baseIndex;
611  if (getInstructionFromIndex(baseIndex) == 0)
612    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613
614  SlotIndex end = baseIndex;
615  bool SeenDefUse = false;
616
617  while (mi != MBB->end()) {
618    if (mi->killsRegister(interval.reg, tri_)) {
619      DEBUG(errs() << " killed");
620      end = baseIndex.getDefIndex();
621      SeenDefUse = true;
622      break;
623    } else if (mi->modifiesRegister(interval.reg, tri_)) {
624      // Another instruction redefines the register before it is ever read.
625      // Then the register is essentially dead at the instruction that defines
626      // it. Hence its interval is:
627      // [defSlot(def), defSlot(def)+1)
628      DEBUG(errs() << " dead");
629      end = start.getStoreIndex();
630      SeenDefUse = true;
631      break;
632    }
633
634    ++mi;
635    if (mi != MBB->end()) {
636      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
637    }
638  }
639
640  // Live-in register might not be used at all.
641  if (!SeenDefUse) {
642    if (isAlias) {
643      DEBUG(errs() << " dead");
644      end = MIIdx.getStoreIndex();
645    } else {
646      DEBUG(errs() << " live through");
647      end = baseIndex;
648    }
649  }
650
651  VNInfo *vni =
652    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
653                          0, false, VNInfoAllocator);
654  vni->setIsPHIDef(true);
655  LiveRange LR(start, end, vni);
656
657  interval.addRange(LR);
658  LR.valno->addKill(end);
659  DEBUG(errs() << " +" << LR << '\n');
660}
661
662/// computeIntervals - computes the live intervals for virtual
663/// registers. for some ordering of the machine instructions [1,N] a
664/// live interval is an interval [i, j) where 1 <= i <= j < N for
665/// which a variable is live
666void LiveIntervals::computeIntervals() {
667  DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
668               << "********** Function: "
669               << ((Value*)mf_->getFunction())->getName() << '\n');
670
671  SmallVector<unsigned, 8> UndefUses;
672  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
673       MBBI != E; ++MBBI) {
674    MachineBasicBlock *MBB = MBBI;
675    // Track the index of the current machine instr.
676    SlotIndex MIIndex = getMBBStartIdx(MBB);
677    DEBUG(errs() << MBB->getName() << ":\n");
678
679    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
680
681    // Create intervals for live-ins to this BB first.
682    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683           LE = MBB->livein_end(); LI != LE; ++LI) {
684      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685      // Multiple live-ins can alias the same register.
686      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687        if (!hasInterval(*AS))
688          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
689                               true);
690    }
691
692    // Skip over empty initial indices.
693    if (getInstructionFromIndex(MIIndex) == 0)
694      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
695
696    for (; MI != miEnd; ++MI) {
697      DEBUG(errs() << MIIndex << "\t" << *MI);
698
699      // Handle defs.
700      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
701        MachineOperand &MO = MI->getOperand(i);
702        if (!MO.isReg() || !MO.getReg())
703          continue;
704
705        // handle register defs - build intervals
706        if (MO.isDef())
707          handleRegisterDef(MBB, MI, MIIndex, MO, i);
708        else if (MO.isUndef())
709          UndefUses.push_back(MO.getReg());
710      }
711
712      // Move to the next instr slot.
713      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
714    }
715  }
716
717  // Create empty intervals for registers defined by implicit_def's (except
718  // for those implicit_def that define values which are liveout of their
719  // blocks.
720  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
721    unsigned UndefReg = UndefUses[i];
722    (void)getOrCreateInterval(UndefReg);
723  }
724}
725
726LiveInterval* LiveIntervals::createInterval(unsigned reg) {
727  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
728  return new LiveInterval(reg, Weight);
729}
730
731/// dupInterval - Duplicate a live interval. The caller is responsible for
732/// managing the allocated memory.
733LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
734  LiveInterval *NewLI = createInterval(li->reg);
735  NewLI->Copy(*li, mri_, getVNInfoAllocator());
736  return NewLI;
737}
738
739/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
740/// copy field and returns the source register that defines it.
741unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
742  if (!VNI->getCopy())
743    return 0;
744
745  if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
746    // If it's extracting out of a physical register, return the sub-register.
747    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
748    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
749      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
750      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
751      if (SrcSubReg == DstSubReg)
752        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
753        // reg1034 can still be coalesced to EDX.
754        return Reg;
755      assert(DstSubReg == 0);
756      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
757    }
758    return Reg;
759  } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
760             VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
761    return VNI->getCopy()->getOperand(2).getReg();
762
763  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
764  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
765    return SrcReg;
766  llvm_unreachable("Unrecognized copy instruction!");
767  return 0;
768}
769
770//===----------------------------------------------------------------------===//
771// Register allocator hooks.
772//
773
774/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
775/// allow one) virtual register operand, then its uses are implicitly using
776/// the register. Returns the virtual register.
777unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
778                                            MachineInstr *MI) const {
779  unsigned RegOp = 0;
780  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
781    MachineOperand &MO = MI->getOperand(i);
782    if (!MO.isReg() || !MO.isUse())
783      continue;
784    unsigned Reg = MO.getReg();
785    if (Reg == 0 || Reg == li.reg)
786      continue;
787
788    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
789        !allocatableRegs_[Reg])
790      continue;
791    // FIXME: For now, only remat MI with at most one register operand.
792    assert(!RegOp &&
793           "Can't rematerialize instruction with multiple register operand!");
794    RegOp = MO.getReg();
795#ifndef NDEBUG
796    break;
797#endif
798  }
799  return RegOp;
800}
801
802/// isValNoAvailableAt - Return true if the val# of the specified interval
803/// which reaches the given instruction also reaches the specified use index.
804bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
805                                       SlotIndex UseIdx) const {
806  SlotIndex Index = getInstructionIndex(MI);
807  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
808  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
809  return UI != li.end() && UI->valno == ValNo;
810}
811
812/// isReMaterializable - Returns true if the definition MI of the specified
813/// val# of the specified interval is re-materializable.
814bool LiveIntervals::isReMaterializable(const LiveInterval &li,
815                                       const VNInfo *ValNo, MachineInstr *MI,
816                                       SmallVectorImpl<LiveInterval*> &SpillIs,
817                                       bool &isLoad) {
818  if (DisableReMat)
819    return false;
820
821  if (!tii_->isTriviallyReMaterializable(MI, aa_))
822    return false;
823
824  // Target-specific code can mark an instruction as being rematerializable
825  // if it has one virtual reg use, though it had better be something like
826  // a PIC base register which is likely to be live everywhere.
827  unsigned ImpUse = getReMatImplicitUse(li, MI);
828  if (ImpUse) {
829    const LiveInterval &ImpLi = getInterval(ImpUse);
830    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
831           re = mri_->use_end(); ri != re; ++ri) {
832      MachineInstr *UseMI = &*ri;
833      SlotIndex UseIdx = getInstructionIndex(UseMI);
834      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
835        continue;
836      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
837        return false;
838    }
839
840    // If a register operand of the re-materialized instruction is going to
841    // be spilled next, then it's not legal to re-materialize this instruction.
842    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
843      if (ImpUse == SpillIs[i]->reg)
844        return false;
845  }
846  return true;
847}
848
849/// isReMaterializable - Returns true if the definition MI of the specified
850/// val# of the specified interval is re-materializable.
851bool LiveIntervals::isReMaterializable(const LiveInterval &li,
852                                       const VNInfo *ValNo, MachineInstr *MI) {
853  SmallVector<LiveInterval*, 4> Dummy1;
854  bool Dummy2;
855  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
856}
857
858/// isReMaterializable - Returns true if every definition of MI of every
859/// val# of the specified interval is re-materializable.
860bool LiveIntervals::isReMaterializable(const LiveInterval &li,
861                                       SmallVectorImpl<LiveInterval*> &SpillIs,
862                                       bool &isLoad) {
863  isLoad = false;
864  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
865       i != e; ++i) {
866    const VNInfo *VNI = *i;
867    if (VNI->isUnused())
868      continue; // Dead val#.
869    // Is the def for the val# rematerializable?
870    if (!VNI->isDefAccurate())
871      return false;
872    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
873    bool DefIsLoad = false;
874    if (!ReMatDefMI ||
875        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
876      return false;
877    isLoad |= DefIsLoad;
878  }
879  return true;
880}
881
882/// FilterFoldedOps - Filter out two-address use operands. Return
883/// true if it finds any issue with the operands that ought to prevent
884/// folding.
885static bool FilterFoldedOps(MachineInstr *MI,
886                            SmallVector<unsigned, 2> &Ops,
887                            unsigned &MRInfo,
888                            SmallVector<unsigned, 2> &FoldOps) {
889  MRInfo = 0;
890  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
891    unsigned OpIdx = Ops[i];
892    MachineOperand &MO = MI->getOperand(OpIdx);
893    // FIXME: fold subreg use.
894    if (MO.getSubReg())
895      return true;
896    if (MO.isDef())
897      MRInfo |= (unsigned)VirtRegMap::isMod;
898    else {
899      // Filter out two-address use operand(s).
900      if (MI->isRegTiedToDefOperand(OpIdx)) {
901        MRInfo = VirtRegMap::isModRef;
902        continue;
903      }
904      MRInfo |= (unsigned)VirtRegMap::isRef;
905    }
906    FoldOps.push_back(OpIdx);
907  }
908  return false;
909}
910
911
912/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
913/// slot / to reg or any rematerialized load into ith operand of specified
914/// MI. If it is successul, MI is updated with the newly created MI and
915/// returns true.
916bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
917                                         VirtRegMap &vrm, MachineInstr *DefMI,
918                                         SlotIndex InstrIdx,
919                                         SmallVector<unsigned, 2> &Ops,
920                                         bool isSS, int Slot, unsigned Reg) {
921  // If it is an implicit def instruction, just delete it.
922  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
923    RemoveMachineInstrFromMaps(MI);
924    vrm.RemoveMachineInstrFromMaps(MI);
925    MI->eraseFromParent();
926    ++numFolds;
927    return true;
928  }
929
930  // Filter the list of operand indexes that are to be folded. Abort if
931  // any operand will prevent folding.
932  unsigned MRInfo = 0;
933  SmallVector<unsigned, 2> FoldOps;
934  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
935    return false;
936
937  // The only time it's safe to fold into a two address instruction is when
938  // it's folding reload and spill from / into a spill stack slot.
939  if (DefMI && (MRInfo & VirtRegMap::isMod))
940    return false;
941
942  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
943                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
944  if (fmi) {
945    // Remember this instruction uses the spill slot.
946    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
947
948    // Attempt to fold the memory reference into the instruction. If
949    // we can do this, we don't need to insert spill code.
950    MachineBasicBlock &MBB = *MI->getParent();
951    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
952      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
953    vrm.transferSpillPts(MI, fmi);
954    vrm.transferRestorePts(MI, fmi);
955    vrm.transferEmergencySpills(MI, fmi);
956    ReplaceMachineInstrInMaps(MI, fmi);
957    MI = MBB.insert(MBB.erase(MI), fmi);
958    ++numFolds;
959    return true;
960  }
961  return false;
962}
963
964/// canFoldMemoryOperand - Returns true if the specified load / store
965/// folding is possible.
966bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
967                                         SmallVector<unsigned, 2> &Ops,
968                                         bool ReMat) const {
969  // Filter the list of operand indexes that are to be folded. Abort if
970  // any operand will prevent folding.
971  unsigned MRInfo = 0;
972  SmallVector<unsigned, 2> FoldOps;
973  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
974    return false;
975
976  // It's only legal to remat for a use, not a def.
977  if (ReMat && (MRInfo & VirtRegMap::isMod))
978    return false;
979
980  return tii_->canFoldMemoryOperand(MI, FoldOps);
981}
982
983bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
984  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
985
986  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
987
988  if (mbb == 0)
989    return false;
990
991  for (++itr; itr != li.ranges.end(); ++itr) {
992    MachineBasicBlock *mbb2 =
993      indexes_->getMBBCoveringRange(itr->start, itr->end);
994
995    if (mbb2 != mbb)
996      return false;
997  }
998
999  return true;
1000}
1001
1002/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1003/// interval on to-be re-materialized operands of MI) with new register.
1004void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1005                                       MachineInstr *MI, unsigned NewVReg,
1006                                       VirtRegMap &vrm) {
1007  // There is an implicit use. That means one of the other operand is
1008  // being remat'ed and the remat'ed instruction has li.reg as an
1009  // use operand. Make sure we rewrite that as well.
1010  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1011    MachineOperand &MO = MI->getOperand(i);
1012    if (!MO.isReg())
1013      continue;
1014    unsigned Reg = MO.getReg();
1015    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1016      continue;
1017    if (!vrm.isReMaterialized(Reg))
1018      continue;
1019    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1020    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1021    if (UseMO)
1022      UseMO->setReg(NewVReg);
1023  }
1024}
1025
1026/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1027/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1028bool LiveIntervals::
1029rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1030                 bool TrySplit, SlotIndex index, SlotIndex end,
1031                 MachineInstr *MI,
1032                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1033                 unsigned Slot, int LdSlot,
1034                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1035                 VirtRegMap &vrm,
1036                 const TargetRegisterClass* rc,
1037                 SmallVector<int, 4> &ReMatIds,
1038                 const MachineLoopInfo *loopInfo,
1039                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1040                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1041                 std::vector<LiveInterval*> &NewLIs) {
1042  bool CanFold = false;
1043 RestartInstruction:
1044  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1045    MachineOperand& mop = MI->getOperand(i);
1046    if (!mop.isReg())
1047      continue;
1048    unsigned Reg = mop.getReg();
1049    unsigned RegI = Reg;
1050    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1051      continue;
1052    if (Reg != li.reg)
1053      continue;
1054
1055    bool TryFold = !DefIsReMat;
1056    bool FoldSS = true; // Default behavior unless it's a remat.
1057    int FoldSlot = Slot;
1058    if (DefIsReMat) {
1059      // If this is the rematerializable definition MI itself and
1060      // all of its uses are rematerialized, simply delete it.
1061      if (MI == ReMatOrigDefMI && CanDelete) {
1062        DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1063                     << MI << '\n');
1064        RemoveMachineInstrFromMaps(MI);
1065        vrm.RemoveMachineInstrFromMaps(MI);
1066        MI->eraseFromParent();
1067        break;
1068      }
1069
1070      // If def for this use can't be rematerialized, then try folding.
1071      // If def is rematerializable and it's a load, also try folding.
1072      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1073      if (isLoad) {
1074        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1075        FoldSS = isLoadSS;
1076        FoldSlot = LdSlot;
1077      }
1078    }
1079
1080    // Scan all of the operands of this instruction rewriting operands
1081    // to use NewVReg instead of li.reg as appropriate.  We do this for
1082    // two reasons:
1083    //
1084    //   1. If the instr reads the same spilled vreg multiple times, we
1085    //      want to reuse the NewVReg.
1086    //   2. If the instr is a two-addr instruction, we are required to
1087    //      keep the src/dst regs pinned.
1088    //
1089    // Keep track of whether we replace a use and/or def so that we can
1090    // create the spill interval with the appropriate range.
1091
1092    HasUse = mop.isUse();
1093    HasDef = mop.isDef();
1094    SmallVector<unsigned, 2> Ops;
1095    Ops.push_back(i);
1096    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1097      const MachineOperand &MOj = MI->getOperand(j);
1098      if (!MOj.isReg())
1099        continue;
1100      unsigned RegJ = MOj.getReg();
1101      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1102        continue;
1103      if (RegJ == RegI) {
1104        Ops.push_back(j);
1105        if (!MOj.isUndef()) {
1106          HasUse |= MOj.isUse();
1107          HasDef |= MOj.isDef();
1108        }
1109      }
1110    }
1111
1112    // Create a new virtual register for the spill interval.
1113    // Create the new register now so we can map the fold instruction
1114    // to the new register so when it is unfolded we get the correct
1115    // answer.
1116    bool CreatedNewVReg = false;
1117    if (NewVReg == 0) {
1118      NewVReg = mri_->createVirtualRegister(rc);
1119      vrm.grow();
1120      CreatedNewVReg = true;
1121
1122      // The new virtual register should get the same allocation hints as the
1123      // old one.
1124      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1125      if (Hint.first || Hint.second)
1126        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1127    }
1128
1129    if (!TryFold)
1130      CanFold = false;
1131    else {
1132      // Do not fold load / store here if we are splitting. We'll find an
1133      // optimal point to insert a load / store later.
1134      if (!TrySplit) {
1135        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1136                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1137          // Folding the load/store can completely change the instruction in
1138          // unpredictable ways, rescan it from the beginning.
1139
1140          if (FoldSS) {
1141            // We need to give the new vreg the same stack slot as the
1142            // spilled interval.
1143            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1144          }
1145
1146          HasUse = false;
1147          HasDef = false;
1148          CanFold = false;
1149          if (isNotInMIMap(MI))
1150            break;
1151          goto RestartInstruction;
1152        }
1153      } else {
1154        // We'll try to fold it later if it's profitable.
1155        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1156      }
1157    }
1158
1159    mop.setReg(NewVReg);
1160    if (mop.isImplicit())
1161      rewriteImplicitOps(li, MI, NewVReg, vrm);
1162
1163    // Reuse NewVReg for other reads.
1164    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1165      MachineOperand &mopj = MI->getOperand(Ops[j]);
1166      mopj.setReg(NewVReg);
1167      if (mopj.isImplicit())
1168        rewriteImplicitOps(li, MI, NewVReg, vrm);
1169    }
1170
1171    if (CreatedNewVReg) {
1172      if (DefIsReMat) {
1173        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1174        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1175          // Each valnum may have its own remat id.
1176          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1177        } else {
1178          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1179        }
1180        if (!CanDelete || (HasUse && HasDef)) {
1181          // If this is a two-addr instruction then its use operands are
1182          // rematerializable but its def is not. It should be assigned a
1183          // stack slot.
1184          vrm.assignVirt2StackSlot(NewVReg, Slot);
1185        }
1186      } else {
1187        vrm.assignVirt2StackSlot(NewVReg, Slot);
1188      }
1189    } else if (HasUse && HasDef &&
1190               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1191      // If this interval hasn't been assigned a stack slot (because earlier
1192      // def is a deleted remat def), do it now.
1193      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1194      vrm.assignVirt2StackSlot(NewVReg, Slot);
1195    }
1196
1197    // Re-matting an instruction with virtual register use. Add the
1198    // register as an implicit use on the use MI.
1199    if (DefIsReMat && ImpUse)
1200      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1201
1202    // Create a new register interval for this spill / remat.
1203    LiveInterval &nI = getOrCreateInterval(NewVReg);
1204    if (CreatedNewVReg) {
1205      NewLIs.push_back(&nI);
1206      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1207      if (TrySplit)
1208        vrm.setIsSplitFromReg(NewVReg, li.reg);
1209    }
1210
1211    if (HasUse) {
1212      if (CreatedNewVReg) {
1213        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1214                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1215        DEBUG(errs() << " +" << LR);
1216        nI.addRange(LR);
1217      } else {
1218        // Extend the split live interval to this def / use.
1219        SlotIndex End = index.getDefIndex();
1220        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1221                     nI.getValNumInfo(nI.getNumValNums()-1));
1222        DEBUG(errs() << " +" << LR);
1223        nI.addRange(LR);
1224      }
1225    }
1226    if (HasDef) {
1227      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1228                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1229      DEBUG(errs() << " +" << LR);
1230      nI.addRange(LR);
1231    }
1232
1233    DEBUG({
1234        errs() << "\t\t\t\tAdded new interval: ";
1235        nI.print(errs(), tri_);
1236        errs() << '\n';
1237      });
1238  }
1239  return CanFold;
1240}
1241bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1242                                   const VNInfo *VNI,
1243                                   MachineBasicBlock *MBB,
1244                                   SlotIndex Idx) const {
1245  SlotIndex End = getMBBEndIdx(MBB);
1246  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1247    if (VNI->kills[j].isPHI())
1248      continue;
1249
1250    SlotIndex KillIdx = VNI->kills[j];
1251    if (KillIdx > Idx && KillIdx < End)
1252      return true;
1253  }
1254  return false;
1255}
1256
1257/// RewriteInfo - Keep track of machine instrs that will be rewritten
1258/// during spilling.
1259namespace {
1260  struct RewriteInfo {
1261    SlotIndex Index;
1262    MachineInstr *MI;
1263    bool HasUse;
1264    bool HasDef;
1265    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1266      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1267  };
1268
1269  struct RewriteInfoCompare {
1270    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1271      return LHS.Index < RHS.Index;
1272    }
1273  };
1274}
1275
1276void LiveIntervals::
1277rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1278                    LiveInterval::Ranges::const_iterator &I,
1279                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1280                    unsigned Slot, int LdSlot,
1281                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1282                    VirtRegMap &vrm,
1283                    const TargetRegisterClass* rc,
1284                    SmallVector<int, 4> &ReMatIds,
1285                    const MachineLoopInfo *loopInfo,
1286                    BitVector &SpillMBBs,
1287                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1288                    BitVector &RestoreMBBs,
1289                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1290                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1291                    std::vector<LiveInterval*> &NewLIs) {
1292  bool AllCanFold = true;
1293  unsigned NewVReg = 0;
1294  SlotIndex start = I->start.getBaseIndex();
1295  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1296
1297  // First collect all the def / use in this live range that will be rewritten.
1298  // Make sure they are sorted according to instruction index.
1299  std::vector<RewriteInfo> RewriteMIs;
1300  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1301         re = mri_->reg_end(); ri != re; ) {
1302    MachineInstr *MI = &*ri;
1303    MachineOperand &O = ri.getOperand();
1304    ++ri;
1305    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1306    SlotIndex index = getInstructionIndex(MI);
1307    if (index < start || index >= end)
1308      continue;
1309
1310    if (O.isUndef())
1311      // Must be defined by an implicit def. It should not be spilled. Note,
1312      // this is for correctness reason. e.g.
1313      // 8   %reg1024<def> = IMPLICIT_DEF
1314      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1315      // The live range [12, 14) are not part of the r1024 live interval since
1316      // it's defined by an implicit def. It will not conflicts with live
1317      // interval of r1025. Now suppose both registers are spilled, you can
1318      // easily see a situation where both registers are reloaded before
1319      // the INSERT_SUBREG and both target registers that would overlap.
1320      continue;
1321    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1322  }
1323  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1324
1325  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1326  // Now rewrite the defs and uses.
1327  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1328    RewriteInfo &rwi = RewriteMIs[i];
1329    ++i;
1330    SlotIndex index = rwi.Index;
1331    bool MIHasUse = rwi.HasUse;
1332    bool MIHasDef = rwi.HasDef;
1333    MachineInstr *MI = rwi.MI;
1334    // If MI def and/or use the same register multiple times, then there
1335    // are multiple entries.
1336    unsigned NumUses = MIHasUse;
1337    while (i != e && RewriteMIs[i].MI == MI) {
1338      assert(RewriteMIs[i].Index == index);
1339      bool isUse = RewriteMIs[i].HasUse;
1340      if (isUse) ++NumUses;
1341      MIHasUse |= isUse;
1342      MIHasDef |= RewriteMIs[i].HasDef;
1343      ++i;
1344    }
1345    MachineBasicBlock *MBB = MI->getParent();
1346
1347    if (ImpUse && MI != ReMatDefMI) {
1348      // Re-matting an instruction with virtual register use. Update the
1349      // register interval's spill weight to HUGE_VALF to prevent it from
1350      // being spilled.
1351      LiveInterval &ImpLi = getInterval(ImpUse);
1352      ImpLi.weight = HUGE_VALF;
1353    }
1354
1355    unsigned MBBId = MBB->getNumber();
1356    unsigned ThisVReg = 0;
1357    if (TrySplit) {
1358      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1359      if (NVI != MBBVRegsMap.end()) {
1360        ThisVReg = NVI->second;
1361        // One common case:
1362        // x = use
1363        // ...
1364        // ...
1365        // def = ...
1366        //     = use
1367        // It's better to start a new interval to avoid artifically
1368        // extend the new interval.
1369        if (MIHasDef && !MIHasUse) {
1370          MBBVRegsMap.erase(MBB->getNumber());
1371          ThisVReg = 0;
1372        }
1373      }
1374    }
1375
1376    bool IsNew = ThisVReg == 0;
1377    if (IsNew) {
1378      // This ends the previous live interval. If all of its def / use
1379      // can be folded, give it a low spill weight.
1380      if (NewVReg && TrySplit && AllCanFold) {
1381        LiveInterval &nI = getOrCreateInterval(NewVReg);
1382        nI.weight /= 10.0F;
1383      }
1384      AllCanFold = true;
1385    }
1386    NewVReg = ThisVReg;
1387
1388    bool HasDef = false;
1389    bool HasUse = false;
1390    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1391                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1392                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1393                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1394                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1395    if (!HasDef && !HasUse)
1396      continue;
1397
1398    AllCanFold &= CanFold;
1399
1400    // Update weight of spill interval.
1401    LiveInterval &nI = getOrCreateInterval(NewVReg);
1402    if (!TrySplit) {
1403      // The spill weight is now infinity as it cannot be spilled again.
1404      nI.weight = HUGE_VALF;
1405      continue;
1406    }
1407
1408    // Keep track of the last def and first use in each MBB.
1409    if (HasDef) {
1410      if (MI != ReMatOrigDefMI || !CanDelete) {
1411        bool HasKill = false;
1412        if (!HasUse)
1413          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1414        else {
1415          // If this is a two-address code, then this index starts a new VNInfo.
1416          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1417          if (VNI)
1418            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1419        }
1420        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1421          SpillIdxes.find(MBBId);
1422        if (!HasKill) {
1423          if (SII == SpillIdxes.end()) {
1424            std::vector<SRInfo> S;
1425            S.push_back(SRInfo(index, NewVReg, true));
1426            SpillIdxes.insert(std::make_pair(MBBId, S));
1427          } else if (SII->second.back().vreg != NewVReg) {
1428            SII->second.push_back(SRInfo(index, NewVReg, true));
1429          } else if (index > SII->second.back().index) {
1430            // If there is an earlier def and this is a two-address
1431            // instruction, then it's not possible to fold the store (which
1432            // would also fold the load).
1433            SRInfo &Info = SII->second.back();
1434            Info.index = index;
1435            Info.canFold = !HasUse;
1436          }
1437          SpillMBBs.set(MBBId);
1438        } else if (SII != SpillIdxes.end() &&
1439                   SII->second.back().vreg == NewVReg &&
1440                   index > SII->second.back().index) {
1441          // There is an earlier def that's not killed (must be two-address).
1442          // The spill is no longer needed.
1443          SII->second.pop_back();
1444          if (SII->second.empty()) {
1445            SpillIdxes.erase(MBBId);
1446            SpillMBBs.reset(MBBId);
1447          }
1448        }
1449      }
1450    }
1451
1452    if (HasUse) {
1453      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1454        SpillIdxes.find(MBBId);
1455      if (SII != SpillIdxes.end() &&
1456          SII->second.back().vreg == NewVReg &&
1457          index > SII->second.back().index)
1458        // Use(s) following the last def, it's not safe to fold the spill.
1459        SII->second.back().canFold = false;
1460      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1461        RestoreIdxes.find(MBBId);
1462      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1463        // If we are splitting live intervals, only fold if it's the first
1464        // use and there isn't another use later in the MBB.
1465        RII->second.back().canFold = false;
1466      else if (IsNew) {
1467        // Only need a reload if there isn't an earlier def / use.
1468        if (RII == RestoreIdxes.end()) {
1469          std::vector<SRInfo> Infos;
1470          Infos.push_back(SRInfo(index, NewVReg, true));
1471          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1472        } else {
1473          RII->second.push_back(SRInfo(index, NewVReg, true));
1474        }
1475        RestoreMBBs.set(MBBId);
1476      }
1477    }
1478
1479    // Update spill weight.
1480    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1481    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1482  }
1483
1484  if (NewVReg && TrySplit && AllCanFold) {
1485    // If all of its def / use can be folded, give it a low spill weight.
1486    LiveInterval &nI = getOrCreateInterval(NewVReg);
1487    nI.weight /= 10.0F;
1488  }
1489}
1490
1491bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1492                        unsigned vr, BitVector &RestoreMBBs,
1493                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1494  if (!RestoreMBBs[Id])
1495    return false;
1496  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1497  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1498    if (Restores[i].index == index &&
1499        Restores[i].vreg == vr &&
1500        Restores[i].canFold)
1501      return true;
1502  return false;
1503}
1504
1505void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1506                        unsigned vr, BitVector &RestoreMBBs,
1507                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1508  if (!RestoreMBBs[Id])
1509    return;
1510  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1511  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1512    if (Restores[i].index == index && Restores[i].vreg)
1513      Restores[i].index = SlotIndex();
1514}
1515
1516/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1517/// spilled and create empty intervals for their uses.
1518void
1519LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1520                                    const TargetRegisterClass* rc,
1521                                    std::vector<LiveInterval*> &NewLIs) {
1522  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1523         re = mri_->reg_end(); ri != re; ) {
1524    MachineOperand &O = ri.getOperand();
1525    MachineInstr *MI = &*ri;
1526    ++ri;
1527    if (O.isDef()) {
1528      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1529             "Register def was not rewritten?");
1530      RemoveMachineInstrFromMaps(MI);
1531      vrm.RemoveMachineInstrFromMaps(MI);
1532      MI->eraseFromParent();
1533    } else {
1534      // This must be an use of an implicit_def so it's not part of the live
1535      // interval. Create a new empty live interval for it.
1536      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1537      unsigned NewVReg = mri_->createVirtualRegister(rc);
1538      vrm.grow();
1539      vrm.setIsImplicitlyDefined(NewVReg);
1540      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1541      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1542        MachineOperand &MO = MI->getOperand(i);
1543        if (MO.isReg() && MO.getReg() == li.reg) {
1544          MO.setReg(NewVReg);
1545          MO.setIsUndef();
1546        }
1547      }
1548    }
1549  }
1550}
1551
1552std::vector<LiveInterval*> LiveIntervals::
1553addIntervalsForSpillsFast(const LiveInterval &li,
1554                          const MachineLoopInfo *loopInfo,
1555                          VirtRegMap &vrm) {
1556  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1557
1558  std::vector<LiveInterval*> added;
1559
1560  assert(li.weight != HUGE_VALF &&
1561         "attempt to spill already spilled interval!");
1562
1563  DEBUG({
1564      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1565      li.dump();
1566      errs() << '\n';
1567    });
1568
1569  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1570
1571  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1572  while (RI != mri_->reg_end()) {
1573    MachineInstr* MI = &*RI;
1574
1575    SmallVector<unsigned, 2> Indices;
1576    bool HasUse = false;
1577    bool HasDef = false;
1578
1579    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1580      MachineOperand& mop = MI->getOperand(i);
1581      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1582
1583      HasUse |= MI->getOperand(i).isUse();
1584      HasDef |= MI->getOperand(i).isDef();
1585
1586      Indices.push_back(i);
1587    }
1588
1589    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1590                              Indices, true, slot, li.reg)) {
1591      unsigned NewVReg = mri_->createVirtualRegister(rc);
1592      vrm.grow();
1593      vrm.assignVirt2StackSlot(NewVReg, slot);
1594
1595      // create a new register for this spill
1596      LiveInterval &nI = getOrCreateInterval(NewVReg);
1597
1598      // the spill weight is now infinity as it
1599      // cannot be spilled again
1600      nI.weight = HUGE_VALF;
1601
1602      // Rewrite register operands to use the new vreg.
1603      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1604           E = Indices.end(); I != E; ++I) {
1605        MI->getOperand(*I).setReg(NewVReg);
1606
1607        if (MI->getOperand(*I).isUse())
1608          MI->getOperand(*I).setIsKill(true);
1609      }
1610
1611      // Fill in  the new live interval.
1612      SlotIndex index = getInstructionIndex(MI);
1613      if (HasUse) {
1614        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1615                     nI.getNextValue(SlotIndex(), 0, false,
1616                                     getVNInfoAllocator()));
1617        DEBUG(errs() << " +" << LR);
1618        nI.addRange(LR);
1619        vrm.addRestorePoint(NewVReg, MI);
1620      }
1621      if (HasDef) {
1622        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1623                     nI.getNextValue(SlotIndex(), 0, false,
1624                                     getVNInfoAllocator()));
1625        DEBUG(errs() << " +" << LR);
1626        nI.addRange(LR);
1627        vrm.addSpillPoint(NewVReg, true, MI);
1628      }
1629
1630      added.push_back(&nI);
1631
1632      DEBUG({
1633          errs() << "\t\t\t\tadded new interval: ";
1634          nI.dump();
1635          errs() << '\n';
1636        });
1637    }
1638
1639
1640    RI = mri_->reg_begin(li.reg);
1641  }
1642
1643  return added;
1644}
1645
1646std::vector<LiveInterval*> LiveIntervals::
1647addIntervalsForSpills(const LiveInterval &li,
1648                      SmallVectorImpl<LiveInterval*> &SpillIs,
1649                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1650
1651  if (EnableFastSpilling)
1652    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1653
1654  assert(li.weight != HUGE_VALF &&
1655         "attempt to spill already spilled interval!");
1656
1657  DEBUG({
1658      errs() << "\t\t\t\tadding intervals for spills for interval: ";
1659      li.print(errs(), tri_);
1660      errs() << '\n';
1661    });
1662
1663  // Each bit specify whether a spill is required in the MBB.
1664  BitVector SpillMBBs(mf_->getNumBlockIDs());
1665  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1666  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1667  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1668  DenseMap<unsigned,unsigned> MBBVRegsMap;
1669  std::vector<LiveInterval*> NewLIs;
1670  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1671
1672  unsigned NumValNums = li.getNumValNums();
1673  SmallVector<MachineInstr*, 4> ReMatDefs;
1674  ReMatDefs.resize(NumValNums, NULL);
1675  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1676  ReMatOrigDefs.resize(NumValNums, NULL);
1677  SmallVector<int, 4> ReMatIds;
1678  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1679  BitVector ReMatDelete(NumValNums);
1680  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1681
1682  // Spilling a split live interval. It cannot be split any further. Also,
1683  // it's also guaranteed to be a single val# / range interval.
1684  if (vrm.getPreSplitReg(li.reg)) {
1685    vrm.setIsSplitFromReg(li.reg, 0);
1686    // Unset the split kill marker on the last use.
1687    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1688    if (KillIdx != SlotIndex()) {
1689      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1690      assert(KillMI && "Last use disappeared?");
1691      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1692      assert(KillOp != -1 && "Last use disappeared?");
1693      KillMI->getOperand(KillOp).setIsKill(false);
1694    }
1695    vrm.removeKillPoint(li.reg);
1696    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1697    Slot = vrm.getStackSlot(li.reg);
1698    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1699    MachineInstr *ReMatDefMI = DefIsReMat ?
1700      vrm.getReMaterializedMI(li.reg) : NULL;
1701    int LdSlot = 0;
1702    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1703    bool isLoad = isLoadSS ||
1704      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1705    bool IsFirstRange = true;
1706    for (LiveInterval::Ranges::const_iterator
1707           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1708      // If this is a split live interval with multiple ranges, it means there
1709      // are two-address instructions that re-defined the value. Only the
1710      // first def can be rematerialized!
1711      if (IsFirstRange) {
1712        // Note ReMatOrigDefMI has already been deleted.
1713        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1714                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1715                             false, vrm, rc, ReMatIds, loopInfo,
1716                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1717                             MBBVRegsMap, NewLIs);
1718      } else {
1719        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1720                             Slot, 0, false, false, false,
1721                             false, vrm, rc, ReMatIds, loopInfo,
1722                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1723                             MBBVRegsMap, NewLIs);
1724      }
1725      IsFirstRange = false;
1726    }
1727
1728    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1729    return NewLIs;
1730  }
1731
1732  bool TrySplit = !intervalIsInOneMBB(li);
1733  if (TrySplit)
1734    ++numSplits;
1735  bool NeedStackSlot = false;
1736  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1737       i != e; ++i) {
1738    const VNInfo *VNI = *i;
1739    unsigned VN = VNI->id;
1740    if (VNI->isUnused())
1741      continue; // Dead val#.
1742    // Is the def for the val# rematerializable?
1743    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1744      ? getInstructionFromIndex(VNI->def) : 0;
1745    bool dummy;
1746    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1747      // Remember how to remat the def of this val#.
1748      ReMatOrigDefs[VN] = ReMatDefMI;
1749      // Original def may be modified so we have to make a copy here.
1750      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1751      CloneMIs.push_back(Clone);
1752      ReMatDefs[VN] = Clone;
1753
1754      bool CanDelete = true;
1755      if (VNI->hasPHIKill()) {
1756        // A kill is a phi node, not all of its uses can be rematerialized.
1757        // It must not be deleted.
1758        CanDelete = false;
1759        // Need a stack slot if there is any live range where uses cannot be
1760        // rematerialized.
1761        NeedStackSlot = true;
1762      }
1763      if (CanDelete)
1764        ReMatDelete.set(VN);
1765    } else {
1766      // Need a stack slot if there is any live range where uses cannot be
1767      // rematerialized.
1768      NeedStackSlot = true;
1769    }
1770  }
1771
1772  // One stack slot per live interval.
1773  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1774    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1775      Slot = vrm.assignVirt2StackSlot(li.reg);
1776
1777    // This case only occurs when the prealloc splitter has already assigned
1778    // a stack slot to this vreg.
1779    else
1780      Slot = vrm.getStackSlot(li.reg);
1781  }
1782
1783  // Create new intervals and rewrite defs and uses.
1784  for (LiveInterval::Ranges::const_iterator
1785         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1786    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1787    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1788    bool DefIsReMat = ReMatDefMI != NULL;
1789    bool CanDelete = ReMatDelete[I->valno->id];
1790    int LdSlot = 0;
1791    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1792    bool isLoad = isLoadSS ||
1793      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1794    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1795                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1796                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1797                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1798                               MBBVRegsMap, NewLIs);
1799  }
1800
1801  // Insert spills / restores if we are splitting.
1802  if (!TrySplit) {
1803    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1804    return NewLIs;
1805  }
1806
1807  SmallPtrSet<LiveInterval*, 4> AddedKill;
1808  SmallVector<unsigned, 2> Ops;
1809  if (NeedStackSlot) {
1810    int Id = SpillMBBs.find_first();
1811    while (Id != -1) {
1812      std::vector<SRInfo> &spills = SpillIdxes[Id];
1813      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1814        SlotIndex index = spills[i].index;
1815        unsigned VReg = spills[i].vreg;
1816        LiveInterval &nI = getOrCreateInterval(VReg);
1817        bool isReMat = vrm.isReMaterialized(VReg);
1818        MachineInstr *MI = getInstructionFromIndex(index);
1819        bool CanFold = false;
1820        bool FoundUse = false;
1821        Ops.clear();
1822        if (spills[i].canFold) {
1823          CanFold = true;
1824          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1825            MachineOperand &MO = MI->getOperand(j);
1826            if (!MO.isReg() || MO.getReg() != VReg)
1827              continue;
1828
1829            Ops.push_back(j);
1830            if (MO.isDef())
1831              continue;
1832            if (isReMat ||
1833                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1834                                                RestoreMBBs, RestoreIdxes))) {
1835              // MI has two-address uses of the same register. If the use
1836              // isn't the first and only use in the BB, then we can't fold
1837              // it. FIXME: Move this to rewriteInstructionsForSpills.
1838              CanFold = false;
1839              break;
1840            }
1841            FoundUse = true;
1842          }
1843        }
1844        // Fold the store into the def if possible.
1845        bool Folded = false;
1846        if (CanFold && !Ops.empty()) {
1847          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1848            Folded = true;
1849            if (FoundUse) {
1850              // Also folded uses, do not issue a load.
1851              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1852              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1853            }
1854            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1855          }
1856        }
1857
1858        // Otherwise tell the spiller to issue a spill.
1859        if (!Folded) {
1860          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1861          bool isKill = LR->end == index.getStoreIndex();
1862          if (!MI->registerDefIsDead(nI.reg))
1863            // No need to spill a dead def.
1864            vrm.addSpillPoint(VReg, isKill, MI);
1865          if (isKill)
1866            AddedKill.insert(&nI);
1867        }
1868      }
1869      Id = SpillMBBs.find_next(Id);
1870    }
1871  }
1872
1873  int Id = RestoreMBBs.find_first();
1874  while (Id != -1) {
1875    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1876    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1877      SlotIndex index = restores[i].index;
1878      if (index == SlotIndex())
1879        continue;
1880      unsigned VReg = restores[i].vreg;
1881      LiveInterval &nI = getOrCreateInterval(VReg);
1882      bool isReMat = vrm.isReMaterialized(VReg);
1883      MachineInstr *MI = getInstructionFromIndex(index);
1884      bool CanFold = false;
1885      Ops.clear();
1886      if (restores[i].canFold) {
1887        CanFold = true;
1888        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1889          MachineOperand &MO = MI->getOperand(j);
1890          if (!MO.isReg() || MO.getReg() != VReg)
1891            continue;
1892
1893          if (MO.isDef()) {
1894            // If this restore were to be folded, it would have been folded
1895            // already.
1896            CanFold = false;
1897            break;
1898          }
1899          Ops.push_back(j);
1900        }
1901      }
1902
1903      // Fold the load into the use if possible.
1904      bool Folded = false;
1905      if (CanFold && !Ops.empty()) {
1906        if (!isReMat)
1907          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1908        else {
1909          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1910          int LdSlot = 0;
1911          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1912          // If the rematerializable def is a load, also try to fold it.
1913          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1914            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1915                                          Ops, isLoadSS, LdSlot, VReg);
1916          if (!Folded) {
1917            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1918            if (ImpUse) {
1919              // Re-matting an instruction with virtual register use. Add the
1920              // register as an implicit use on the use MI and update the register
1921              // interval's spill weight to HUGE_VALF to prevent it from being
1922              // spilled.
1923              LiveInterval &ImpLi = getInterval(ImpUse);
1924              ImpLi.weight = HUGE_VALF;
1925              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1926            }
1927          }
1928        }
1929      }
1930      // If folding is not possible / failed, then tell the spiller to issue a
1931      // load / rematerialization for us.
1932      if (Folded)
1933        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1934      else
1935        vrm.addRestorePoint(VReg, MI);
1936    }
1937    Id = RestoreMBBs.find_next(Id);
1938  }
1939
1940  // Finalize intervals: add kills, finalize spill weights, and filter out
1941  // dead intervals.
1942  std::vector<LiveInterval*> RetNewLIs;
1943  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1944    LiveInterval *LI = NewLIs[i];
1945    if (!LI->empty()) {
1946      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1947      if (!AddedKill.count(LI)) {
1948        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1949        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1950        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1951        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1952        assert(UseIdx != -1);
1953        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1954          LastUse->getOperand(UseIdx).setIsKill();
1955          vrm.addKillPoint(LI->reg, LastUseIdx);
1956        }
1957      }
1958      RetNewLIs.push_back(LI);
1959    }
1960  }
1961
1962  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1963  return RetNewLIs;
1964}
1965
1966/// hasAllocatableSuperReg - Return true if the specified physical register has
1967/// any super register that's allocatable.
1968bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1969  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1970    if (allocatableRegs_[*AS] && hasInterval(*AS))
1971      return true;
1972  return false;
1973}
1974
1975/// getRepresentativeReg - Find the largest super register of the specified
1976/// physical register.
1977unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1978  // Find the largest super-register that is allocatable.
1979  unsigned BestReg = Reg;
1980  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1981    unsigned SuperReg = *AS;
1982    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1983      BestReg = SuperReg;
1984      break;
1985    }
1986  }
1987  return BestReg;
1988}
1989
1990/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1991/// specified interval that conflicts with the specified physical register.
1992unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1993                                                   unsigned PhysReg) const {
1994  unsigned NumConflicts = 0;
1995  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1996  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1997         E = mri_->reg_end(); I != E; ++I) {
1998    MachineOperand &O = I.getOperand();
1999    MachineInstr *MI = O.getParent();
2000    SlotIndex Index = getInstructionIndex(MI);
2001    if (pli.liveAt(Index))
2002      ++NumConflicts;
2003  }
2004  return NumConflicts;
2005}
2006
2007/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2008/// around all defs and uses of the specified interval. Return true if it
2009/// was able to cut its interval.
2010bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2011                                            unsigned PhysReg, VirtRegMap &vrm) {
2012  unsigned SpillReg = getRepresentativeReg(PhysReg);
2013
2014  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2015    // If there are registers which alias PhysReg, but which are not a
2016    // sub-register of the chosen representative super register. Assert
2017    // since we can't handle it yet.
2018    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2019           tri_->isSuperRegister(*AS, SpillReg));
2020
2021  bool Cut = false;
2022  SmallVector<unsigned, 4> PRegs;
2023  if (hasInterval(SpillReg))
2024    PRegs.push_back(SpillReg);
2025  else {
2026    SmallSet<unsigned, 4> Added;
2027    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2028      if (Added.insert(*AS) && hasInterval(*AS)) {
2029        PRegs.push_back(*AS);
2030        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2031          Added.insert(*ASS);
2032      }
2033  }
2034
2035  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2036  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2037         E = mri_->reg_end(); I != E; ++I) {
2038    MachineOperand &O = I.getOperand();
2039    MachineInstr *MI = O.getParent();
2040    if (SeenMIs.count(MI))
2041      continue;
2042    SeenMIs.insert(MI);
2043    SlotIndex Index = getInstructionIndex(MI);
2044    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2045      unsigned PReg = PRegs[i];
2046      LiveInterval &pli = getInterval(PReg);
2047      if (!pli.liveAt(Index))
2048        continue;
2049      vrm.addEmergencySpill(PReg, MI);
2050      SlotIndex StartIdx = Index.getLoadIndex();
2051      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2052      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2053        pli.removeRange(StartIdx, EndIdx);
2054        Cut = true;
2055      } else {
2056        std::string msg;
2057        raw_string_ostream Msg(msg);
2058        Msg << "Ran out of registers during register allocation!";
2059        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2060          Msg << "\nPlease check your inline asm statement for invalid "
2061              << "constraints:\n";
2062          MI->print(Msg, tm_);
2063        }
2064        llvm_report_error(Msg.str());
2065      }
2066      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2067        if (!hasInterval(*AS))
2068          continue;
2069        LiveInterval &spli = getInterval(*AS);
2070        if (spli.liveAt(Index))
2071          spli.removeRange(Index.getLoadIndex(),
2072                           Index.getNextIndex().getBaseIndex());
2073      }
2074    }
2075  }
2076  return Cut;
2077}
2078
2079LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2080                                                  MachineInstr* startInst) {
2081  LiveInterval& Interval = getOrCreateInterval(reg);
2082  VNInfo* VN = Interval.getNextValue(
2083    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2084    startInst, true, getVNInfoAllocator());
2085  VN->setHasPHIKill(true);
2086  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2087  LiveRange LR(
2088     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2089     getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2090  Interval.addRange(LR);
2091
2092  return LR;
2093}
2094
2095