LiveIntervalAnalysis.cpp revision a24986d8bfd941252f7d080943e02bbe6a0c2944
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.DestroyAll();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << "    \t" << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
222                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223  for (LiveInterval::Ranges::const_iterator
224         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225    for (SlotIndex index = I->start.getBaseIndex(),
226           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227           index != end;
228           index = index.getNextIndex()) {
229      MachineInstr *MI = getInstructionFromIndex(index);
230      if (!MI)
231        continue;               // skip deleted instructions
232
233      if (JoinedCopies.count(MI))
234        continue;
235      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236        MachineOperand& MO = MI->getOperand(i);
237        if (!MO.isReg())
238          continue;
239        unsigned PhysReg = MO.getReg();
240        if (PhysReg == 0 || PhysReg == Reg ||
241            TargetRegisterInfo::isVirtualRegister(PhysReg))
242          continue;
243        if (tri_->regsOverlap(Reg, PhysReg))
244          return true;
245      }
246    }
247  }
248
249  return false;
250}
251
252#ifndef NDEBUG
253static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
254  if (TargetRegisterInfo::isPhysicalRegister(reg))
255    dbgs() << tri_->getName(reg);
256  else
257    dbgs() << "%reg" << reg;
258}
259#endif
260
261static
262bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
263  unsigned Reg = MI.getOperand(MOIdx).getReg();
264  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
265    const MachineOperand &MO = MI.getOperand(i);
266    if (!MO.isReg())
267      continue;
268    if (MO.getReg() == Reg && MO.isDef()) {
269      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
270             MI.getOperand(MOIdx).getSubReg() &&
271             MO.getSubReg());
272      return true;
273    }
274  }
275  return false;
276}
277
278/// isPartialRedef - Return true if the specified def at the specific index is
279/// partially re-defining the specified live interval. A common case of this is
280/// a definition of the sub-register.
281bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
282                                   LiveInterval &interval) {
283  if (!MO.getSubReg() || MO.isEarlyClobber())
284    return false;
285
286  SlotIndex RedefIndex = MIIdx.getDefIndex();
287  const LiveRange *OldLR =
288    interval.getLiveRangeContaining(RedefIndex.getUseIndex());
289  if (OldLR->valno->isDefAccurate()) {
290    MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
291    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
292  }
293  return false;
294}
295
296void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
297                                             MachineBasicBlock::iterator mi,
298                                             SlotIndex MIIdx,
299                                             MachineOperand& MO,
300                                             unsigned MOIdx,
301                                             LiveInterval &interval) {
302  DEBUG({
303      dbgs() << "\t\tregister: ";
304      printRegName(interval.reg, tri_);
305    });
306
307  // Virtual registers may be defined multiple times (due to phi
308  // elimination and 2-addr elimination).  Much of what we do only has to be
309  // done once for the vreg.  We use an empty interval to detect the first
310  // time we see a vreg.
311  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
312  if (interval.empty()) {
313    // Get the Idx of the defining instructions.
314    SlotIndex defIndex = MIIdx.getDefIndex();
315    // Earlyclobbers move back one, so that they overlap the live range
316    // of inputs.
317    if (MO.isEarlyClobber())
318      defIndex = MIIdx.getUseIndex();
319
320    // Make sure the first definition is not a partial redefinition. Add an
321    // <imp-def> of the full register.
322    if (MO.getSubReg())
323      mi->addRegisterDefined(interval.reg);
324
325    MachineInstr *CopyMI = NULL;
326    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
327    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
328        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
329      CopyMI = mi;
330
331      // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
332      // implicit defs without really knowing. It shows up as INSERT_SUBREG
333      // using an undefined register.
334      if (mi->isInsertSubreg())
335        mi->getOperand(1).setIsUndef();
336    }
337
338    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
339                                          VNInfoAllocator);
340    assert(ValNo->id == 0 && "First value in interval is not 0?");
341
342    // Loop over all of the blocks that the vreg is defined in.  There are
343    // two cases we have to handle here.  The most common case is a vreg
344    // whose lifetime is contained within a basic block.  In this case there
345    // will be a single kill, in MBB, which comes after the definition.
346    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
347      // FIXME: what about dead vars?
348      SlotIndex killIdx;
349      if (vi.Kills[0] != mi)
350        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
351      else
352        killIdx = defIndex.getStoreIndex();
353
354      // If the kill happens after the definition, we have an intra-block
355      // live range.
356      if (killIdx > defIndex) {
357        assert(vi.AliveBlocks.empty() &&
358               "Shouldn't be alive across any blocks!");
359        LiveRange LR(defIndex, killIdx, ValNo);
360        interval.addRange(LR);
361        DEBUG(dbgs() << " +" << LR << "\n");
362        ValNo->addKill(killIdx);
363        return;
364      }
365    }
366
367    // The other case we handle is when a virtual register lives to the end
368    // of the defining block, potentially live across some blocks, then is
369    // live into some number of blocks, but gets killed.  Start by adding a
370    // range that goes from this definition to the end of the defining block.
371    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
372    DEBUG(dbgs() << " +" << NewLR);
373    interval.addRange(NewLR);
374
375    bool PHIJoin = lv_->isPHIJoin(interval.reg);
376
377    if (PHIJoin) {
378      // A phi join register is killed at the end of the MBB and revived as a new
379      // valno in the killing blocks.
380      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
381      DEBUG(dbgs() << " phi-join");
382      ValNo->addKill(indexes_->getTerminatorGap(mbb));
383      ValNo->setHasPHIKill(true);
384    } else {
385      // Iterate over all of the blocks that the variable is completely
386      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
387      // live interval.
388      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
389               E = vi.AliveBlocks.end(); I != E; ++I) {
390        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
391        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
392        interval.addRange(LR);
393        DEBUG(dbgs() << " +" << LR);
394      }
395    }
396
397    // Finally, this virtual register is live from the start of any killing
398    // block to the 'use' slot of the killing instruction.
399    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
400      MachineInstr *Kill = vi.Kills[i];
401      SlotIndex Start = getMBBStartIdx(Kill->getParent());
402      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
403
404      // Create interval with one of a NEW value number.  Note that this value
405      // number isn't actually defined by an instruction, weird huh? :)
406      if (PHIJoin) {
407        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
408                                      VNInfoAllocator);
409        ValNo->setIsPHIDef(true);
410      }
411      LiveRange LR(Start, killIdx, ValNo);
412      interval.addRange(LR);
413      ValNo->addKill(killIdx);
414      DEBUG(dbgs() << " +" << LR);
415    }
416
417  } else {
418    if (MultipleDefsBySameMI(*mi, MOIdx))
419      // Multiple defs of the same virtual register by the same instruction.
420      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
421      // This is likely due to elimination of REG_SEQUENCE instructions. Return
422      // here since there is nothing to do.
423      return;
424
425    // If this is the second time we see a virtual register definition, it
426    // must be due to phi elimination or two addr elimination.  If this is
427    // the result of two address elimination, then the vreg is one of the
428    // def-and-use register operand.
429
430    // It may also be partial redef like this:
431    // 80	%reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
432    // 120	%reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
433    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
434    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
435      // If this is a two-address definition, then we have already processed
436      // the live range.  The only problem is that we didn't realize there
437      // are actually two values in the live interval.  Because of this we
438      // need to take the LiveRegion that defines this register and split it
439      // into two values.
440      SlotIndex RedefIndex = MIIdx.getDefIndex();
441      if (MO.isEarlyClobber())
442        RedefIndex = MIIdx.getUseIndex();
443
444      const LiveRange *OldLR =
445        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
446      VNInfo *OldValNo = OldLR->valno;
447      SlotIndex DefIndex = OldValNo->def.getDefIndex();
448
449      // Delete the previous value, which should be short and continuous,
450      // because the 2-addr copy must be in the same MBB as the redef.
451      interval.removeRange(DefIndex, RedefIndex);
452
453      // The new value number (#1) is defined by the instruction we claimed
454      // defined value #0.
455      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
456                                            false, // update at *
457                                            VNInfoAllocator);
458      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
459
460      // Value#0 is now defined by the 2-addr instruction.
461      OldValNo->def  = RedefIndex;
462      OldValNo->setCopy(0);
463
464      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
465      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
466      if (PartReDef &&
467          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
468        OldValNo->setCopy(&*mi);
469
470      // Add the new live interval which replaces the range for the input copy.
471      LiveRange LR(DefIndex, RedefIndex, ValNo);
472      DEBUG(dbgs() << " replace range with " << LR);
473      interval.addRange(LR);
474      ValNo->addKill(RedefIndex);
475
476      // If this redefinition is dead, we need to add a dummy unit live
477      // range covering the def slot.
478      if (MO.isDead())
479        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
480                                    OldValNo));
481
482      DEBUG({
483          dbgs() << " RESULT: ";
484          interval.print(dbgs(), tri_);
485        });
486    } else if (lv_->isPHIJoin(interval.reg)) {
487      // In the case of PHI elimination, each variable definition is only
488      // live until the end of the block.  We've already taken care of the
489      // rest of the live range.
490
491      SlotIndex defIndex = MIIdx.getDefIndex();
492      if (MO.isEarlyClobber())
493        defIndex = MIIdx.getUseIndex();
494
495      VNInfo *ValNo;
496      MachineInstr *CopyMI = NULL;
497      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
498      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
499          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
500        CopyMI = mi;
501      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
502
503      SlotIndex killIndex = getMBBEndIdx(mbb);
504      LiveRange LR(defIndex, killIndex, ValNo);
505      interval.addRange(LR);
506      ValNo->addKill(indexes_->getTerminatorGap(mbb));
507      ValNo->setHasPHIKill(true);
508      DEBUG(dbgs() << " phi-join +" << LR);
509    } else {
510      llvm_unreachable("Multiply defined register");
511    }
512  }
513
514  DEBUG(dbgs() << '\n');
515}
516
517void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
518                                              MachineBasicBlock::iterator mi,
519                                              SlotIndex MIIdx,
520                                              MachineOperand& MO,
521                                              LiveInterval &interval,
522                                              MachineInstr *CopyMI) {
523  // A physical register cannot be live across basic block, so its
524  // lifetime must end somewhere in its defining basic block.
525  DEBUG({
526      dbgs() << "\t\tregister: ";
527      printRegName(interval.reg, tri_);
528    });
529
530  SlotIndex baseIndex = MIIdx;
531  SlotIndex start = baseIndex.getDefIndex();
532  // Earlyclobbers move back one.
533  if (MO.isEarlyClobber())
534    start = MIIdx.getUseIndex();
535  SlotIndex end = start;
536
537  // If it is not used after definition, it is considered dead at
538  // the instruction defining it. Hence its interval is:
539  // [defSlot(def), defSlot(def)+1)
540  // For earlyclobbers, the defSlot was pushed back one; the extra
541  // advance below compensates.
542  if (MO.isDead()) {
543    DEBUG(dbgs() << " dead");
544    end = start.getStoreIndex();
545    goto exit;
546  }
547
548  // If it is not dead on definition, it must be killed by a
549  // subsequent instruction. Hence its interval is:
550  // [defSlot(def), useSlot(kill)+1)
551  baseIndex = baseIndex.getNextIndex();
552  while (++mi != MBB->end()) {
553
554    if (mi->isDebugValue())
555      continue;
556    if (getInstructionFromIndex(baseIndex) == 0)
557      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
558
559    if (mi->killsRegister(interval.reg, tri_)) {
560      DEBUG(dbgs() << " killed");
561      end = baseIndex.getDefIndex();
562      goto exit;
563    } else {
564      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
565      if (DefIdx != -1) {
566        if (mi->isRegTiedToUseOperand(DefIdx)) {
567          // Two-address instruction.
568          end = baseIndex.getDefIndex();
569        } else {
570          // Another instruction redefines the register before it is ever read.
571          // Then the register is essentially dead at the instruction that
572          // defines it. Hence its interval is:
573          // [defSlot(def), defSlot(def)+1)
574          DEBUG(dbgs() << " dead");
575          end = start.getStoreIndex();
576        }
577        goto exit;
578      }
579    }
580
581    baseIndex = baseIndex.getNextIndex();
582  }
583
584  // The only case we should have a dead physreg here without a killing or
585  // instruction where we know it's dead is if it is live-in to the function
586  // and never used. Another possible case is the implicit use of the
587  // physical register has been deleted by two-address pass.
588  end = start.getStoreIndex();
589
590exit:
591  assert(start < end && "did not find end of interval?");
592
593  // Already exists? Extend old live interval.
594  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
595  bool Extend = OldLR != interval.end();
596  VNInfo *ValNo = Extend
597    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
598  if (MO.isEarlyClobber() && Extend)
599    ValNo->setHasRedefByEC(true);
600  LiveRange LR(start, end, ValNo);
601  interval.addRange(LR);
602  LR.valno->addKill(end);
603  DEBUG(dbgs() << " +" << LR << '\n');
604}
605
606void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
607                                      MachineBasicBlock::iterator MI,
608                                      SlotIndex MIIdx,
609                                      MachineOperand& MO,
610                                      unsigned MOIdx) {
611  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
612    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
613                             getOrCreateInterval(MO.getReg()));
614  else if (allocatableRegs_[MO.getReg()]) {
615    MachineInstr *CopyMI = NULL;
616    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
617    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
618        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
619      CopyMI = MI;
620    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
621                              getOrCreateInterval(MO.getReg()), CopyMI);
622    // Def of a register also defines its sub-registers.
623    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
624      // If MI also modifies the sub-register explicitly, avoid processing it
625      // more than once. Do not pass in TRI here so it checks for exact match.
626      if (!MI->definesRegister(*AS))
627        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
628                                  getOrCreateInterval(*AS), 0);
629  }
630}
631
632void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
633                                         SlotIndex MIIdx,
634                                         LiveInterval &interval, bool isAlias) {
635  DEBUG({
636      dbgs() << "\t\tlivein register: ";
637      printRegName(interval.reg, tri_);
638    });
639
640  // Look for kills, if it reaches a def before it's killed, then it shouldn't
641  // be considered a livein.
642  MachineBasicBlock::iterator mi = MBB->begin();
643  MachineBasicBlock::iterator E = MBB->end();
644  // Skip over DBG_VALUE at the start of the MBB.
645  if (mi != E && mi->isDebugValue()) {
646    while (++mi != E && mi->isDebugValue())
647      ;
648    if (mi == E)
649      // MBB is empty except for DBG_VALUE's.
650      return;
651  }
652
653  SlotIndex baseIndex = MIIdx;
654  SlotIndex start = baseIndex;
655  if (getInstructionFromIndex(baseIndex) == 0)
656    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
657
658  SlotIndex end = baseIndex;
659  bool SeenDefUse = false;
660
661  while (mi != E) {
662    if (mi->killsRegister(interval.reg, tri_)) {
663      DEBUG(dbgs() << " killed");
664      end = baseIndex.getDefIndex();
665      SeenDefUse = true;
666      break;
667    } else if (mi->definesRegister(interval.reg, tri_)) {
668      // Another instruction redefines the register before it is ever read.
669      // Then the register is essentially dead at the instruction that defines
670      // it. Hence its interval is:
671      // [defSlot(def), defSlot(def)+1)
672      DEBUG(dbgs() << " dead");
673      end = start.getStoreIndex();
674      SeenDefUse = true;
675      break;
676    }
677
678    while (++mi != E && mi->isDebugValue())
679      // Skip over DBG_VALUE.
680      ;
681    if (mi != E)
682      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
683  }
684
685  // Live-in register might not be used at all.
686  if (!SeenDefUse) {
687    if (isAlias) {
688      DEBUG(dbgs() << " dead");
689      end = MIIdx.getStoreIndex();
690    } else {
691      DEBUG(dbgs() << " live through");
692      end = baseIndex;
693    }
694  }
695
696  VNInfo *vni =
697    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
698                          0, false, VNInfoAllocator);
699  vni->setIsPHIDef(true);
700  LiveRange LR(start, end, vni);
701
702  interval.addRange(LR);
703  LR.valno->addKill(end);
704  DEBUG(dbgs() << " +" << LR << '\n');
705}
706
707/// computeIntervals - computes the live intervals for virtual
708/// registers. for some ordering of the machine instructions [1,N] a
709/// live interval is an interval [i, j) where 1 <= i <= j < N for
710/// which a variable is live
711void LiveIntervals::computeIntervals() {
712  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
713               << "********** Function: "
714               << ((Value*)mf_->getFunction())->getName() << '\n');
715
716  SmallVector<unsigned, 8> UndefUses;
717  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
718       MBBI != E; ++MBBI) {
719    MachineBasicBlock *MBB = MBBI;
720    if (MBB->empty())
721      continue;
722
723    // Track the index of the current machine instr.
724    SlotIndex MIIndex = getMBBStartIdx(MBB);
725    DEBUG(dbgs() << "BB#" << MBB->getNumber()
726          << ":\t\t# derived from " << MBB->getName() << "\n");
727
728    // Create intervals for live-ins to this BB first.
729    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
730           LE = MBB->livein_end(); LI != LE; ++LI) {
731      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
732      // Multiple live-ins can alias the same register.
733      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
734        if (!hasInterval(*AS))
735          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
736                               true);
737    }
738
739    // Skip over empty initial indices.
740    if (getInstructionFromIndex(MIIndex) == 0)
741      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
742
743    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
744         MI != miEnd; ++MI) {
745      DEBUG(dbgs() << MIIndex << "\t" << *MI);
746      if (MI->isDebugValue())
747        continue;
748
749      // Handle defs.
750      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
751        MachineOperand &MO = MI->getOperand(i);
752        if (!MO.isReg() || !MO.getReg())
753          continue;
754
755        // handle register defs - build intervals
756        if (MO.isDef())
757          handleRegisterDef(MBB, MI, MIIndex, MO, i);
758        else if (MO.isUndef())
759          UndefUses.push_back(MO.getReg());
760      }
761
762      // Move to the next instr slot.
763      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
764    }
765  }
766
767  // Create empty intervals for registers defined by implicit_def's (except
768  // for those implicit_def that define values which are liveout of their
769  // blocks.
770  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
771    unsigned UndefReg = UndefUses[i];
772    (void)getOrCreateInterval(UndefReg);
773  }
774}
775
776LiveInterval* LiveIntervals::createInterval(unsigned reg) {
777  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
778  return new LiveInterval(reg, Weight);
779}
780
781/// dupInterval - Duplicate a live interval. The caller is responsible for
782/// managing the allocated memory.
783LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
784  LiveInterval *NewLI = createInterval(li->reg);
785  NewLI->Copy(*li, mri_, getVNInfoAllocator());
786  return NewLI;
787}
788
789/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
790/// copy field and returns the source register that defines it.
791unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
792  if (!VNI->getCopy())
793    return 0;
794
795  if (VNI->getCopy()->isExtractSubreg()) {
796    // If it's extracting out of a physical register, return the sub-register.
797    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
798    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
799      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
800      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
801      if (SrcSubReg == DstSubReg)
802        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
803        // reg1034 can still be coalesced to EDX.
804        return Reg;
805      assert(DstSubReg == 0);
806      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
807    }
808    return Reg;
809  } else if (VNI->getCopy()->isInsertSubreg() ||
810             VNI->getCopy()->isSubregToReg())
811    return VNI->getCopy()->getOperand(2).getReg();
812
813  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
814  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
815    return SrcReg;
816  llvm_unreachable("Unrecognized copy instruction!");
817  return 0;
818}
819
820//===----------------------------------------------------------------------===//
821// Register allocator hooks.
822//
823
824/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
825/// allow one) virtual register operand, then its uses are implicitly using
826/// the register. Returns the virtual register.
827unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
828                                            MachineInstr *MI) const {
829  unsigned RegOp = 0;
830  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
831    MachineOperand &MO = MI->getOperand(i);
832    if (!MO.isReg() || !MO.isUse())
833      continue;
834    unsigned Reg = MO.getReg();
835    if (Reg == 0 || Reg == li.reg)
836      continue;
837
838    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
839        !allocatableRegs_[Reg])
840      continue;
841    // FIXME: For now, only remat MI with at most one register operand.
842    assert(!RegOp &&
843           "Can't rematerialize instruction with multiple register operand!");
844    RegOp = MO.getReg();
845#ifndef NDEBUG
846    break;
847#endif
848  }
849  return RegOp;
850}
851
852/// isValNoAvailableAt - Return true if the val# of the specified interval
853/// which reaches the given instruction also reaches the specified use index.
854bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
855                                       SlotIndex UseIdx) const {
856  SlotIndex Index = getInstructionIndex(MI);
857  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
858  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
859  return UI != li.end() && UI->valno == ValNo;
860}
861
862/// isReMaterializable - Returns true if the definition MI of the specified
863/// val# of the specified interval is re-materializable.
864bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865                                       const VNInfo *ValNo, MachineInstr *MI,
866                                       SmallVectorImpl<LiveInterval*> &SpillIs,
867                                       bool &isLoad) {
868  if (DisableReMat)
869    return false;
870
871  if (!tii_->isTriviallyReMaterializable(MI, aa_))
872    return false;
873
874  // Target-specific code can mark an instruction as being rematerializable
875  // if it has one virtual reg use, though it had better be something like
876  // a PIC base register which is likely to be live everywhere.
877  unsigned ImpUse = getReMatImplicitUse(li, MI);
878  if (ImpUse) {
879    const LiveInterval &ImpLi = getInterval(ImpUse);
880    for (MachineRegisterInfo::use_nodbg_iterator
881           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
882         ri != re; ++ri) {
883      MachineInstr *UseMI = &*ri;
884      SlotIndex UseIdx = getInstructionIndex(UseMI);
885      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
886        continue;
887      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
888        return false;
889    }
890
891    // If a register operand of the re-materialized instruction is going to
892    // be spilled next, then it's not legal to re-materialize this instruction.
893    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
894      if (ImpUse == SpillIs[i]->reg)
895        return false;
896  }
897  return true;
898}
899
900/// isReMaterializable - Returns true if the definition MI of the specified
901/// val# of the specified interval is re-materializable.
902bool LiveIntervals::isReMaterializable(const LiveInterval &li,
903                                       const VNInfo *ValNo, MachineInstr *MI) {
904  SmallVector<LiveInterval*, 4> Dummy1;
905  bool Dummy2;
906  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
907}
908
909/// isReMaterializable - Returns true if every definition of MI of every
910/// val# of the specified interval is re-materializable.
911bool LiveIntervals::isReMaterializable(const LiveInterval &li,
912                                       SmallVectorImpl<LiveInterval*> &SpillIs,
913                                       bool &isLoad) {
914  isLoad = false;
915  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
916       i != e; ++i) {
917    const VNInfo *VNI = *i;
918    if (VNI->isUnused())
919      continue; // Dead val#.
920    // Is the def for the val# rematerializable?
921    if (!VNI->isDefAccurate())
922      return false;
923    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
924    bool DefIsLoad = false;
925    if (!ReMatDefMI ||
926        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
927      return false;
928    isLoad |= DefIsLoad;
929  }
930  return true;
931}
932
933/// FilterFoldedOps - Filter out two-address use operands. Return
934/// true if it finds any issue with the operands that ought to prevent
935/// folding.
936static bool FilterFoldedOps(MachineInstr *MI,
937                            SmallVector<unsigned, 2> &Ops,
938                            unsigned &MRInfo,
939                            SmallVector<unsigned, 2> &FoldOps) {
940  MRInfo = 0;
941  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
942    unsigned OpIdx = Ops[i];
943    MachineOperand &MO = MI->getOperand(OpIdx);
944    // FIXME: fold subreg use.
945    if (MO.getSubReg())
946      return true;
947    if (MO.isDef())
948      MRInfo |= (unsigned)VirtRegMap::isMod;
949    else {
950      // Filter out two-address use operand(s).
951      if (MI->isRegTiedToDefOperand(OpIdx)) {
952        MRInfo = VirtRegMap::isModRef;
953        continue;
954      }
955      MRInfo |= (unsigned)VirtRegMap::isRef;
956    }
957    FoldOps.push_back(OpIdx);
958  }
959  return false;
960}
961
962
963/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
964/// slot / to reg or any rematerialized load into ith operand of specified
965/// MI. If it is successul, MI is updated with the newly created MI and
966/// returns true.
967bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
968                                         VirtRegMap &vrm, MachineInstr *DefMI,
969                                         SlotIndex InstrIdx,
970                                         SmallVector<unsigned, 2> &Ops,
971                                         bool isSS, int Slot, unsigned Reg) {
972  // If it is an implicit def instruction, just delete it.
973  if (MI->isImplicitDef()) {
974    RemoveMachineInstrFromMaps(MI);
975    vrm.RemoveMachineInstrFromMaps(MI);
976    MI->eraseFromParent();
977    ++numFolds;
978    return true;
979  }
980
981  // Filter the list of operand indexes that are to be folded. Abort if
982  // any operand will prevent folding.
983  unsigned MRInfo = 0;
984  SmallVector<unsigned, 2> FoldOps;
985  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
986    return false;
987
988  // The only time it's safe to fold into a two address instruction is when
989  // it's folding reload and spill from / into a spill stack slot.
990  if (DefMI && (MRInfo & VirtRegMap::isMod))
991    return false;
992
993  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
994                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
995  if (fmi) {
996    // Remember this instruction uses the spill slot.
997    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
998
999    // Attempt to fold the memory reference into the instruction. If
1000    // we can do this, we don't need to insert spill code.
1001    MachineBasicBlock &MBB = *MI->getParent();
1002    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1003      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1004    vrm.transferSpillPts(MI, fmi);
1005    vrm.transferRestorePts(MI, fmi);
1006    vrm.transferEmergencySpills(MI, fmi);
1007    ReplaceMachineInstrInMaps(MI, fmi);
1008    MI = MBB.insert(MBB.erase(MI), fmi);
1009    ++numFolds;
1010    return true;
1011  }
1012  return false;
1013}
1014
1015/// canFoldMemoryOperand - Returns true if the specified load / store
1016/// folding is possible.
1017bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1018                                         SmallVector<unsigned, 2> &Ops,
1019                                         bool ReMat) const {
1020  // Filter the list of operand indexes that are to be folded. Abort if
1021  // any operand will prevent folding.
1022  unsigned MRInfo = 0;
1023  SmallVector<unsigned, 2> FoldOps;
1024  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1025    return false;
1026
1027  // It's only legal to remat for a use, not a def.
1028  if (ReMat && (MRInfo & VirtRegMap::isMod))
1029    return false;
1030
1031  return tii_->canFoldMemoryOperand(MI, FoldOps);
1032}
1033
1034bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1035  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1036
1037  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
1038
1039  if (mbb == 0)
1040    return false;
1041
1042  for (++itr; itr != li.ranges.end(); ++itr) {
1043    MachineBasicBlock *mbb2 =
1044      indexes_->getMBBCoveringRange(itr->start, itr->end);
1045
1046    if (mbb2 != mbb)
1047      return false;
1048  }
1049
1050  return true;
1051}
1052
1053/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1054/// interval on to-be re-materialized operands of MI) with new register.
1055void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1056                                       MachineInstr *MI, unsigned NewVReg,
1057                                       VirtRegMap &vrm) {
1058  // There is an implicit use. That means one of the other operand is
1059  // being remat'ed and the remat'ed instruction has li.reg as an
1060  // use operand. Make sure we rewrite that as well.
1061  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1062    MachineOperand &MO = MI->getOperand(i);
1063    if (!MO.isReg())
1064      continue;
1065    unsigned Reg = MO.getReg();
1066    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1067      continue;
1068    if (!vrm.isReMaterialized(Reg))
1069      continue;
1070    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1071    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1072    if (UseMO)
1073      UseMO->setReg(NewVReg);
1074  }
1075}
1076
1077/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1078/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1079bool LiveIntervals::
1080rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1081                 bool TrySplit, SlotIndex index, SlotIndex end,
1082                 MachineInstr *MI,
1083                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1084                 unsigned Slot, int LdSlot,
1085                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1086                 VirtRegMap &vrm,
1087                 const TargetRegisterClass* rc,
1088                 SmallVector<int, 4> &ReMatIds,
1089                 const MachineLoopInfo *loopInfo,
1090                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1091                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1092                 std::vector<LiveInterval*> &NewLIs) {
1093  bool CanFold = false;
1094 RestartInstruction:
1095  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1096    MachineOperand& mop = MI->getOperand(i);
1097    if (!mop.isReg())
1098      continue;
1099    unsigned Reg = mop.getReg();
1100    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1101      continue;
1102    if (Reg != li.reg)
1103      continue;
1104
1105    bool TryFold = !DefIsReMat;
1106    bool FoldSS = true; // Default behavior unless it's a remat.
1107    int FoldSlot = Slot;
1108    if (DefIsReMat) {
1109      // If this is the rematerializable definition MI itself and
1110      // all of its uses are rematerialized, simply delete it.
1111      if (MI == ReMatOrigDefMI && CanDelete) {
1112        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1113                     << *MI << '\n');
1114        RemoveMachineInstrFromMaps(MI);
1115        vrm.RemoveMachineInstrFromMaps(MI);
1116        MI->eraseFromParent();
1117        break;
1118      }
1119
1120      // If def for this use can't be rematerialized, then try folding.
1121      // If def is rematerializable and it's a load, also try folding.
1122      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1123      if (isLoad) {
1124        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1125        FoldSS = isLoadSS;
1126        FoldSlot = LdSlot;
1127      }
1128    }
1129
1130    // Scan all of the operands of this instruction rewriting operands
1131    // to use NewVReg instead of li.reg as appropriate.  We do this for
1132    // two reasons:
1133    //
1134    //   1. If the instr reads the same spilled vreg multiple times, we
1135    //      want to reuse the NewVReg.
1136    //   2. If the instr is a two-addr instruction, we are required to
1137    //      keep the src/dst regs pinned.
1138    //
1139    // Keep track of whether we replace a use and/or def so that we can
1140    // create the spill interval with the appropriate range.
1141    SmallVector<unsigned, 2> Ops;
1142    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1143
1144    // Create a new virtual register for the spill interval.
1145    // Create the new register now so we can map the fold instruction
1146    // to the new register so when it is unfolded we get the correct
1147    // answer.
1148    bool CreatedNewVReg = false;
1149    if (NewVReg == 0) {
1150      NewVReg = mri_->createVirtualRegister(rc);
1151      vrm.grow();
1152      CreatedNewVReg = true;
1153
1154      // The new virtual register should get the same allocation hints as the
1155      // old one.
1156      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1157      if (Hint.first || Hint.second)
1158        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1159    }
1160
1161    if (!TryFold)
1162      CanFold = false;
1163    else {
1164      // Do not fold load / store here if we are splitting. We'll find an
1165      // optimal point to insert a load / store later.
1166      if (!TrySplit) {
1167        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1168                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1169          // Folding the load/store can completely change the instruction in
1170          // unpredictable ways, rescan it from the beginning.
1171
1172          if (FoldSS) {
1173            // We need to give the new vreg the same stack slot as the
1174            // spilled interval.
1175            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1176          }
1177
1178          HasUse = false;
1179          HasDef = false;
1180          CanFold = false;
1181          if (isNotInMIMap(MI))
1182            break;
1183          goto RestartInstruction;
1184        }
1185      } else {
1186        // We'll try to fold it later if it's profitable.
1187        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1188      }
1189    }
1190
1191    mop.setReg(NewVReg);
1192    if (mop.isImplicit())
1193      rewriteImplicitOps(li, MI, NewVReg, vrm);
1194
1195    // Reuse NewVReg for other reads.
1196    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1197      MachineOperand &mopj = MI->getOperand(Ops[j]);
1198      mopj.setReg(NewVReg);
1199      if (mopj.isImplicit())
1200        rewriteImplicitOps(li, MI, NewVReg, vrm);
1201    }
1202
1203    if (CreatedNewVReg) {
1204      if (DefIsReMat) {
1205        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1206        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1207          // Each valnum may have its own remat id.
1208          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1209        } else {
1210          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1211        }
1212        if (!CanDelete || (HasUse && HasDef)) {
1213          // If this is a two-addr instruction then its use operands are
1214          // rematerializable but its def is not. It should be assigned a
1215          // stack slot.
1216          vrm.assignVirt2StackSlot(NewVReg, Slot);
1217        }
1218      } else {
1219        vrm.assignVirt2StackSlot(NewVReg, Slot);
1220      }
1221    } else if (HasUse && HasDef &&
1222               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1223      // If this interval hasn't been assigned a stack slot (because earlier
1224      // def is a deleted remat def), do it now.
1225      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1226      vrm.assignVirt2StackSlot(NewVReg, Slot);
1227    }
1228
1229    // Re-matting an instruction with virtual register use. Add the
1230    // register as an implicit use on the use MI.
1231    if (DefIsReMat && ImpUse)
1232      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1233
1234    // Create a new register interval for this spill / remat.
1235    LiveInterval &nI = getOrCreateInterval(NewVReg);
1236    if (CreatedNewVReg) {
1237      NewLIs.push_back(&nI);
1238      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1239      if (TrySplit)
1240        vrm.setIsSplitFromReg(NewVReg, li.reg);
1241    }
1242
1243    if (HasUse) {
1244      if (CreatedNewVReg) {
1245        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1246                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1247        DEBUG(dbgs() << " +" << LR);
1248        nI.addRange(LR);
1249      } else {
1250        // Extend the split live interval to this def / use.
1251        SlotIndex End = index.getDefIndex();
1252        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1253                     nI.getValNumInfo(nI.getNumValNums()-1));
1254        DEBUG(dbgs() << " +" << LR);
1255        nI.addRange(LR);
1256      }
1257    }
1258    if (HasDef) {
1259      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1260                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1261      DEBUG(dbgs() << " +" << LR);
1262      nI.addRange(LR);
1263    }
1264
1265    DEBUG({
1266        dbgs() << "\t\t\t\tAdded new interval: ";
1267        nI.print(dbgs(), tri_);
1268        dbgs() << '\n';
1269      });
1270  }
1271  return CanFold;
1272}
1273bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1274                                   const VNInfo *VNI,
1275                                   MachineBasicBlock *MBB,
1276                                   SlotIndex Idx) const {
1277  SlotIndex End = getMBBEndIdx(MBB);
1278  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1279    if (VNI->kills[j].isPHI())
1280      continue;
1281
1282    SlotIndex KillIdx = VNI->kills[j];
1283    assert(getInstructionFromIndex(KillIdx) && "Dangling kill");
1284    if (KillIdx > Idx && KillIdx <= End)
1285      return true;
1286  }
1287  return false;
1288}
1289
1290/// RewriteInfo - Keep track of machine instrs that will be rewritten
1291/// during spilling.
1292namespace {
1293  struct RewriteInfo {
1294    SlotIndex Index;
1295    MachineInstr *MI;
1296    RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1297  };
1298
1299  struct RewriteInfoCompare {
1300    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1301      return LHS.Index < RHS.Index;
1302    }
1303  };
1304}
1305
1306void LiveIntervals::
1307rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1308                    LiveInterval::Ranges::const_iterator &I,
1309                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1310                    unsigned Slot, int LdSlot,
1311                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1312                    VirtRegMap &vrm,
1313                    const TargetRegisterClass* rc,
1314                    SmallVector<int, 4> &ReMatIds,
1315                    const MachineLoopInfo *loopInfo,
1316                    BitVector &SpillMBBs,
1317                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1318                    BitVector &RestoreMBBs,
1319                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1320                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1321                    std::vector<LiveInterval*> &NewLIs) {
1322  bool AllCanFold = true;
1323  unsigned NewVReg = 0;
1324  SlotIndex start = I->start.getBaseIndex();
1325  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1326
1327  // First collect all the def / use in this live range that will be rewritten.
1328  // Make sure they are sorted according to instruction index.
1329  std::vector<RewriteInfo> RewriteMIs;
1330  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1331         re = mri_->reg_end(); ri != re; ) {
1332    MachineInstr *MI = &*ri;
1333    MachineOperand &O = ri.getOperand();
1334    ++ri;
1335    if (MI->isDebugValue()) {
1336      // Modify DBG_VALUE now that the value is in a spill slot.
1337      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1338        uint64_t Offset = MI->getOperand(1).getImm();
1339        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1340        DebugLoc DL = MI->getDebugLoc();
1341        int FI = isLoadSS ? LdSlot : (int)Slot;
1342        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1343                                                           Offset, MDPtr, DL)) {
1344          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1345          ReplaceMachineInstrInMaps(MI, NewDV);
1346          MachineBasicBlock *MBB = MI->getParent();
1347          MBB->insert(MBB->erase(MI), NewDV);
1348          continue;
1349        }
1350      }
1351
1352      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1353      RemoveMachineInstrFromMaps(MI);
1354      vrm.RemoveMachineInstrFromMaps(MI);
1355      MI->eraseFromParent();
1356      continue;
1357    }
1358    assert(!(O.isImplicit() && O.isUse()) &&
1359           "Spilling register that's used as implicit use?");
1360    SlotIndex index = getInstructionIndex(MI);
1361    if (index < start || index >= end)
1362      continue;
1363
1364    if (O.isUndef())
1365      // Must be defined by an implicit def. It should not be spilled. Note,
1366      // this is for correctness reason. e.g.
1367      // 8   %reg1024<def> = IMPLICIT_DEF
1368      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1369      // The live range [12, 14) are not part of the r1024 live interval since
1370      // it's defined by an implicit def. It will not conflicts with live
1371      // interval of r1025. Now suppose both registers are spilled, you can
1372      // easily see a situation where both registers are reloaded before
1373      // the INSERT_SUBREG and both target registers that would overlap.
1374      continue;
1375    RewriteMIs.push_back(RewriteInfo(index, MI));
1376  }
1377  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1378
1379  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1380  // Now rewrite the defs and uses.
1381  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1382    RewriteInfo &rwi = RewriteMIs[i];
1383    ++i;
1384    SlotIndex index = rwi.Index;
1385    MachineInstr *MI = rwi.MI;
1386    // If MI def and/or use the same register multiple times, then there
1387    // are multiple entries.
1388    while (i != e && RewriteMIs[i].MI == MI) {
1389      assert(RewriteMIs[i].Index == index);
1390      ++i;
1391    }
1392    MachineBasicBlock *MBB = MI->getParent();
1393
1394    if (ImpUse && MI != ReMatDefMI) {
1395      // Re-matting an instruction with virtual register use. Prevent interval
1396      // from being spilled.
1397      getInterval(ImpUse).markNotSpillable();
1398    }
1399
1400    unsigned MBBId = MBB->getNumber();
1401    unsigned ThisVReg = 0;
1402    if (TrySplit) {
1403      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1404      if (NVI != MBBVRegsMap.end()) {
1405        ThisVReg = NVI->second;
1406        // One common case:
1407        // x = use
1408        // ...
1409        // ...
1410        // def = ...
1411        //     = use
1412        // It's better to start a new interval to avoid artifically
1413        // extend the new interval.
1414        if (MI->readsWritesVirtualRegister(li.reg) ==
1415            std::make_pair(false,true)) {
1416          MBBVRegsMap.erase(MBB->getNumber());
1417          ThisVReg = 0;
1418        }
1419      }
1420    }
1421
1422    bool IsNew = ThisVReg == 0;
1423    if (IsNew) {
1424      // This ends the previous live interval. If all of its def / use
1425      // can be folded, give it a low spill weight.
1426      if (NewVReg && TrySplit && AllCanFold) {
1427        LiveInterval &nI = getOrCreateInterval(NewVReg);
1428        nI.weight /= 10.0F;
1429      }
1430      AllCanFold = true;
1431    }
1432    NewVReg = ThisVReg;
1433
1434    bool HasDef = false;
1435    bool HasUse = false;
1436    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1437                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1438                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1439                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1440                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1441    if (!HasDef && !HasUse)
1442      continue;
1443
1444    AllCanFold &= CanFold;
1445
1446    // Update weight of spill interval.
1447    LiveInterval &nI = getOrCreateInterval(NewVReg);
1448    if (!TrySplit) {
1449      // The spill weight is now infinity as it cannot be spilled again.
1450      nI.markNotSpillable();
1451      continue;
1452    }
1453
1454    // Keep track of the last def and first use in each MBB.
1455    if (HasDef) {
1456      if (MI != ReMatOrigDefMI || !CanDelete) {
1457        bool HasKill = false;
1458        if (!HasUse)
1459          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1460        else {
1461          // If this is a two-address code, then this index starts a new VNInfo.
1462          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1463          if (VNI)
1464            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1465        }
1466        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1467          SpillIdxes.find(MBBId);
1468        if (!HasKill) {
1469          if (SII == SpillIdxes.end()) {
1470            std::vector<SRInfo> S;
1471            S.push_back(SRInfo(index, NewVReg, true));
1472            SpillIdxes.insert(std::make_pair(MBBId, S));
1473          } else if (SII->second.back().vreg != NewVReg) {
1474            SII->second.push_back(SRInfo(index, NewVReg, true));
1475          } else if (index > SII->second.back().index) {
1476            // If there is an earlier def and this is a two-address
1477            // instruction, then it's not possible to fold the store (which
1478            // would also fold the load).
1479            SRInfo &Info = SII->second.back();
1480            Info.index = index;
1481            Info.canFold = !HasUse;
1482          }
1483          SpillMBBs.set(MBBId);
1484        } else if (SII != SpillIdxes.end() &&
1485                   SII->second.back().vreg == NewVReg &&
1486                   index > SII->second.back().index) {
1487          // There is an earlier def that's not killed (must be two-address).
1488          // The spill is no longer needed.
1489          SII->second.pop_back();
1490          if (SII->second.empty()) {
1491            SpillIdxes.erase(MBBId);
1492            SpillMBBs.reset(MBBId);
1493          }
1494        }
1495      }
1496    }
1497
1498    if (HasUse) {
1499      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1500        SpillIdxes.find(MBBId);
1501      if (SII != SpillIdxes.end() &&
1502          SII->second.back().vreg == NewVReg &&
1503          index > SII->second.back().index)
1504        // Use(s) following the last def, it's not safe to fold the spill.
1505        SII->second.back().canFold = false;
1506      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1507        RestoreIdxes.find(MBBId);
1508      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1509        // If we are splitting live intervals, only fold if it's the first
1510        // use and there isn't another use later in the MBB.
1511        RII->second.back().canFold = false;
1512      else if (IsNew) {
1513        // Only need a reload if there isn't an earlier def / use.
1514        if (RII == RestoreIdxes.end()) {
1515          std::vector<SRInfo> Infos;
1516          Infos.push_back(SRInfo(index, NewVReg, true));
1517          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1518        } else {
1519          RII->second.push_back(SRInfo(index, NewVReg, true));
1520        }
1521        RestoreMBBs.set(MBBId);
1522      }
1523    }
1524
1525    // Update spill weight.
1526    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1527    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1528  }
1529
1530  if (NewVReg && TrySplit && AllCanFold) {
1531    // If all of its def / use can be folded, give it a low spill weight.
1532    LiveInterval &nI = getOrCreateInterval(NewVReg);
1533    nI.weight /= 10.0F;
1534  }
1535}
1536
1537bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1538                        unsigned vr, BitVector &RestoreMBBs,
1539                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1540  if (!RestoreMBBs[Id])
1541    return false;
1542  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1543  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1544    if (Restores[i].index == index &&
1545        Restores[i].vreg == vr &&
1546        Restores[i].canFold)
1547      return true;
1548  return false;
1549}
1550
1551void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1552                        unsigned vr, BitVector &RestoreMBBs,
1553                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1554  if (!RestoreMBBs[Id])
1555    return;
1556  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1557  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1558    if (Restores[i].index == index && Restores[i].vreg)
1559      Restores[i].index = SlotIndex();
1560}
1561
1562/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1563/// spilled and create empty intervals for their uses.
1564void
1565LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1566                                    const TargetRegisterClass* rc,
1567                                    std::vector<LiveInterval*> &NewLIs) {
1568  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1569         re = mri_->reg_end(); ri != re; ) {
1570    MachineOperand &O = ri.getOperand();
1571    MachineInstr *MI = &*ri;
1572    ++ri;
1573    if (MI->isDebugValue()) {
1574      // Remove debug info for now.
1575      O.setReg(0U);
1576      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1577      continue;
1578    }
1579    if (O.isDef()) {
1580      assert(MI->isImplicitDef() &&
1581             "Register def was not rewritten?");
1582      RemoveMachineInstrFromMaps(MI);
1583      vrm.RemoveMachineInstrFromMaps(MI);
1584      MI->eraseFromParent();
1585    } else {
1586      // This must be an use of an implicit_def so it's not part of the live
1587      // interval. Create a new empty live interval for it.
1588      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1589      unsigned NewVReg = mri_->createVirtualRegister(rc);
1590      vrm.grow();
1591      vrm.setIsImplicitlyDefined(NewVReg);
1592      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1593      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1594        MachineOperand &MO = MI->getOperand(i);
1595        if (MO.isReg() && MO.getReg() == li.reg) {
1596          MO.setReg(NewVReg);
1597          MO.setIsUndef();
1598        }
1599      }
1600    }
1601  }
1602}
1603
1604float
1605LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1606  // Limit the loop depth ridiculousness.
1607  if (loopDepth > 200)
1608    loopDepth = 200;
1609
1610  // The loop depth is used to roughly estimate the number of times the
1611  // instruction is executed. Something like 10^d is simple, but will quickly
1612  // overflow a float. This expression behaves like 10^d for small d, but is
1613  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1614  // headroom before overflow.
1615  float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1616
1617  return (isDef + isUse) * lc;
1618}
1619
1620void
1621LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1622  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1623    normalizeSpillWeight(*NewLIs[i]);
1624}
1625
1626std::vector<LiveInterval*> LiveIntervals::
1627addIntervalsForSpillsFast(const LiveInterval &li,
1628                          const MachineLoopInfo *loopInfo,
1629                          VirtRegMap &vrm) {
1630  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1631
1632  std::vector<LiveInterval*> added;
1633
1634  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1635
1636  DEBUG({
1637      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1638      li.dump();
1639      dbgs() << '\n';
1640    });
1641
1642  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1643
1644  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1645  while (RI != mri_->reg_end()) {
1646    MachineInstr* MI = &*RI;
1647
1648    SmallVector<unsigned, 2> Indices;
1649    bool HasUse, HasDef;
1650    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices);
1651
1652    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1653                              Indices, true, slot, li.reg)) {
1654      unsigned NewVReg = mri_->createVirtualRegister(rc);
1655      vrm.grow();
1656      vrm.assignVirt2StackSlot(NewVReg, slot);
1657
1658      // create a new register for this spill
1659      LiveInterval &nI = getOrCreateInterval(NewVReg);
1660      nI.markNotSpillable();
1661
1662      // Rewrite register operands to use the new vreg.
1663      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1664           E = Indices.end(); I != E; ++I) {
1665        MI->getOperand(*I).setReg(NewVReg);
1666
1667        if (MI->getOperand(*I).isUse())
1668          MI->getOperand(*I).setIsKill(true);
1669      }
1670
1671      // Fill in  the new live interval.
1672      SlotIndex index = getInstructionIndex(MI);
1673      if (HasUse) {
1674        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1675                     nI.getNextValue(SlotIndex(), 0, false,
1676                                     getVNInfoAllocator()));
1677        DEBUG(dbgs() << " +" << LR);
1678        nI.addRange(LR);
1679        vrm.addRestorePoint(NewVReg, MI);
1680      }
1681      if (HasDef) {
1682        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1683                     nI.getNextValue(SlotIndex(), 0, false,
1684                                     getVNInfoAllocator()));
1685        DEBUG(dbgs() << " +" << LR);
1686        nI.addRange(LR);
1687        vrm.addSpillPoint(NewVReg, true, MI);
1688      }
1689
1690      added.push_back(&nI);
1691
1692      DEBUG({
1693          dbgs() << "\t\t\t\tadded new interval: ";
1694          nI.dump();
1695          dbgs() << '\n';
1696        });
1697    }
1698
1699
1700    RI = mri_->reg_begin(li.reg);
1701  }
1702
1703  return added;
1704}
1705
1706std::vector<LiveInterval*> LiveIntervals::
1707addIntervalsForSpills(const LiveInterval &li,
1708                      SmallVectorImpl<LiveInterval*> &SpillIs,
1709                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1710
1711  if (EnableFastSpilling)
1712    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1713
1714  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1715
1716  DEBUG({
1717      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1718      li.print(dbgs(), tri_);
1719      dbgs() << '\n';
1720    });
1721
1722  // Each bit specify whether a spill is required in the MBB.
1723  BitVector SpillMBBs(mf_->getNumBlockIDs());
1724  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1725  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1726  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1727  DenseMap<unsigned,unsigned> MBBVRegsMap;
1728  std::vector<LiveInterval*> NewLIs;
1729  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1730
1731  unsigned NumValNums = li.getNumValNums();
1732  SmallVector<MachineInstr*, 4> ReMatDefs;
1733  ReMatDefs.resize(NumValNums, NULL);
1734  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1735  ReMatOrigDefs.resize(NumValNums, NULL);
1736  SmallVector<int, 4> ReMatIds;
1737  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1738  BitVector ReMatDelete(NumValNums);
1739  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1740
1741  // Spilling a split live interval. It cannot be split any further. Also,
1742  // it's also guaranteed to be a single val# / range interval.
1743  if (vrm.getPreSplitReg(li.reg)) {
1744    vrm.setIsSplitFromReg(li.reg, 0);
1745    // Unset the split kill marker on the last use.
1746    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1747    if (KillIdx != SlotIndex()) {
1748      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1749      assert(KillMI && "Last use disappeared?");
1750      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1751      assert(KillOp != -1 && "Last use disappeared?");
1752      KillMI->getOperand(KillOp).setIsKill(false);
1753    }
1754    vrm.removeKillPoint(li.reg);
1755    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1756    Slot = vrm.getStackSlot(li.reg);
1757    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1758    MachineInstr *ReMatDefMI = DefIsReMat ?
1759      vrm.getReMaterializedMI(li.reg) : NULL;
1760    int LdSlot = 0;
1761    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1762    bool isLoad = isLoadSS ||
1763      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1764    bool IsFirstRange = true;
1765    for (LiveInterval::Ranges::const_iterator
1766           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1767      // If this is a split live interval with multiple ranges, it means there
1768      // are two-address instructions that re-defined the value. Only the
1769      // first def can be rematerialized!
1770      if (IsFirstRange) {
1771        // Note ReMatOrigDefMI has already been deleted.
1772        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1773                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1774                             false, vrm, rc, ReMatIds, loopInfo,
1775                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1776                             MBBVRegsMap, NewLIs);
1777      } else {
1778        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1779                             Slot, 0, false, false, false,
1780                             false, vrm, rc, ReMatIds, loopInfo,
1781                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1782                             MBBVRegsMap, NewLIs);
1783      }
1784      IsFirstRange = false;
1785    }
1786
1787    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1788    normalizeSpillWeights(NewLIs);
1789    return NewLIs;
1790  }
1791
1792  bool TrySplit = !intervalIsInOneMBB(li);
1793  if (TrySplit)
1794    ++numSplits;
1795  bool NeedStackSlot = false;
1796  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1797       i != e; ++i) {
1798    const VNInfo *VNI = *i;
1799    unsigned VN = VNI->id;
1800    if (VNI->isUnused())
1801      continue; // Dead val#.
1802    // Is the def for the val# rematerializable?
1803    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1804      ? getInstructionFromIndex(VNI->def) : 0;
1805    bool dummy;
1806    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1807      // Remember how to remat the def of this val#.
1808      ReMatOrigDefs[VN] = ReMatDefMI;
1809      // Original def may be modified so we have to make a copy here.
1810      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1811      CloneMIs.push_back(Clone);
1812      ReMatDefs[VN] = Clone;
1813
1814      bool CanDelete = true;
1815      if (VNI->hasPHIKill()) {
1816        // A kill is a phi node, not all of its uses can be rematerialized.
1817        // It must not be deleted.
1818        CanDelete = false;
1819        // Need a stack slot if there is any live range where uses cannot be
1820        // rematerialized.
1821        NeedStackSlot = true;
1822      }
1823      if (CanDelete)
1824        ReMatDelete.set(VN);
1825    } else {
1826      // Need a stack slot if there is any live range where uses cannot be
1827      // rematerialized.
1828      NeedStackSlot = true;
1829    }
1830  }
1831
1832  // One stack slot per live interval.
1833  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1834    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1835      Slot = vrm.assignVirt2StackSlot(li.reg);
1836
1837    // This case only occurs when the prealloc splitter has already assigned
1838    // a stack slot to this vreg.
1839    else
1840      Slot = vrm.getStackSlot(li.reg);
1841  }
1842
1843  // Create new intervals and rewrite defs and uses.
1844  for (LiveInterval::Ranges::const_iterator
1845         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1846    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1847    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1848    bool DefIsReMat = ReMatDefMI != NULL;
1849    bool CanDelete = ReMatDelete[I->valno->id];
1850    int LdSlot = 0;
1851    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1852    bool isLoad = isLoadSS ||
1853      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1854    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1855                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1856                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1857                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1858                               MBBVRegsMap, NewLIs);
1859  }
1860
1861  // Insert spills / restores if we are splitting.
1862  if (!TrySplit) {
1863    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1864    normalizeSpillWeights(NewLIs);
1865    return NewLIs;
1866  }
1867
1868  SmallPtrSet<LiveInterval*, 4> AddedKill;
1869  SmallVector<unsigned, 2> Ops;
1870  if (NeedStackSlot) {
1871    int Id = SpillMBBs.find_first();
1872    while (Id != -1) {
1873      std::vector<SRInfo> &spills = SpillIdxes[Id];
1874      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1875        SlotIndex index = spills[i].index;
1876        unsigned VReg = spills[i].vreg;
1877        LiveInterval &nI = getOrCreateInterval(VReg);
1878        bool isReMat = vrm.isReMaterialized(VReg);
1879        MachineInstr *MI = getInstructionFromIndex(index);
1880        bool CanFold = false;
1881        bool FoundUse = false;
1882        Ops.clear();
1883        if (spills[i].canFold) {
1884          CanFold = true;
1885          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1886            MachineOperand &MO = MI->getOperand(j);
1887            if (!MO.isReg() || MO.getReg() != VReg)
1888              continue;
1889
1890            Ops.push_back(j);
1891            if (MO.isDef())
1892              continue;
1893            if (isReMat ||
1894                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1895                                                RestoreMBBs, RestoreIdxes))) {
1896              // MI has two-address uses of the same register. If the use
1897              // isn't the first and only use in the BB, then we can't fold
1898              // it. FIXME: Move this to rewriteInstructionsForSpills.
1899              CanFold = false;
1900              break;
1901            }
1902            FoundUse = true;
1903          }
1904        }
1905        // Fold the store into the def if possible.
1906        bool Folded = false;
1907        if (CanFold && !Ops.empty()) {
1908          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1909            Folded = true;
1910            if (FoundUse) {
1911              // Also folded uses, do not issue a load.
1912              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1913              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1914            }
1915            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1916          }
1917        }
1918
1919        // Otherwise tell the spiller to issue a spill.
1920        if (!Folded) {
1921          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1922          bool isKill = LR->end == index.getStoreIndex();
1923          if (!MI->registerDefIsDead(nI.reg))
1924            // No need to spill a dead def.
1925            vrm.addSpillPoint(VReg, isKill, MI);
1926          if (isKill)
1927            AddedKill.insert(&nI);
1928        }
1929      }
1930      Id = SpillMBBs.find_next(Id);
1931    }
1932  }
1933
1934  int Id = RestoreMBBs.find_first();
1935  while (Id != -1) {
1936    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1937    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1938      SlotIndex index = restores[i].index;
1939      if (index == SlotIndex())
1940        continue;
1941      unsigned VReg = restores[i].vreg;
1942      LiveInterval &nI = getOrCreateInterval(VReg);
1943      bool isReMat = vrm.isReMaterialized(VReg);
1944      MachineInstr *MI = getInstructionFromIndex(index);
1945      bool CanFold = false;
1946      Ops.clear();
1947      if (restores[i].canFold) {
1948        CanFold = true;
1949        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1950          MachineOperand &MO = MI->getOperand(j);
1951          if (!MO.isReg() || MO.getReg() != VReg)
1952            continue;
1953
1954          if (MO.isDef()) {
1955            // If this restore were to be folded, it would have been folded
1956            // already.
1957            CanFold = false;
1958            break;
1959          }
1960          Ops.push_back(j);
1961        }
1962      }
1963
1964      // Fold the load into the use if possible.
1965      bool Folded = false;
1966      if (CanFold && !Ops.empty()) {
1967        if (!isReMat)
1968          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1969        else {
1970          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1971          int LdSlot = 0;
1972          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1973          // If the rematerializable def is a load, also try to fold it.
1974          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1975            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1976                                          Ops, isLoadSS, LdSlot, VReg);
1977          if (!Folded) {
1978            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1979            if (ImpUse) {
1980              // Re-matting an instruction with virtual register use. Add the
1981              // register as an implicit use on the use MI and mark the register
1982              // interval as unspillable.
1983              LiveInterval &ImpLi = getInterval(ImpUse);
1984              ImpLi.markNotSpillable();
1985              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1986            }
1987          }
1988        }
1989      }
1990      // If folding is not possible / failed, then tell the spiller to issue a
1991      // load / rematerialization for us.
1992      if (Folded)
1993        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1994      else
1995        vrm.addRestorePoint(VReg, MI);
1996    }
1997    Id = RestoreMBBs.find_next(Id);
1998  }
1999
2000  // Finalize intervals: add kills, finalize spill weights, and filter out
2001  // dead intervals.
2002  std::vector<LiveInterval*> RetNewLIs;
2003  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2004    LiveInterval *LI = NewLIs[i];
2005    if (!LI->empty()) {
2006      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2007      if (!AddedKill.count(LI)) {
2008        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2009        SlotIndex LastUseIdx = LR->end.getBaseIndex();
2010        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2011        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2012        assert(UseIdx != -1);
2013        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2014          LastUse->getOperand(UseIdx).setIsKill();
2015          vrm.addKillPoint(LI->reg, LastUseIdx);
2016        }
2017      }
2018      RetNewLIs.push_back(LI);
2019    }
2020  }
2021
2022  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2023  normalizeSpillWeights(RetNewLIs);
2024  return RetNewLIs;
2025}
2026
2027/// hasAllocatableSuperReg - Return true if the specified physical register has
2028/// any super register that's allocatable.
2029bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2030  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2031    if (allocatableRegs_[*AS] && hasInterval(*AS))
2032      return true;
2033  return false;
2034}
2035
2036/// getRepresentativeReg - Find the largest super register of the specified
2037/// physical register.
2038unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2039  // Find the largest super-register that is allocatable.
2040  unsigned BestReg = Reg;
2041  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2042    unsigned SuperReg = *AS;
2043    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2044      BestReg = SuperReg;
2045      break;
2046    }
2047  }
2048  return BestReg;
2049}
2050
2051/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2052/// specified interval that conflicts with the specified physical register.
2053unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2054                                                   unsigned PhysReg) const {
2055  unsigned NumConflicts = 0;
2056  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2057  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2058         E = mri_->reg_end(); I != E; ++I) {
2059    MachineOperand &O = I.getOperand();
2060    MachineInstr *MI = O.getParent();
2061    if (MI->isDebugValue())
2062      continue;
2063    SlotIndex Index = getInstructionIndex(MI);
2064    if (pli.liveAt(Index))
2065      ++NumConflicts;
2066  }
2067  return NumConflicts;
2068}
2069
2070/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2071/// around all defs and uses of the specified interval. Return true if it
2072/// was able to cut its interval.
2073bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2074                                            unsigned PhysReg, VirtRegMap &vrm) {
2075  unsigned SpillReg = getRepresentativeReg(PhysReg);
2076
2077  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2078    // If there are registers which alias PhysReg, but which are not a
2079    // sub-register of the chosen representative super register. Assert
2080    // since we can't handle it yet.
2081    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2082           tri_->isSuperRegister(*AS, SpillReg));
2083
2084  bool Cut = false;
2085  SmallVector<unsigned, 4> PRegs;
2086  if (hasInterval(SpillReg))
2087    PRegs.push_back(SpillReg);
2088  else {
2089    SmallSet<unsigned, 4> Added;
2090    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2091      if (Added.insert(*AS) && hasInterval(*AS)) {
2092        PRegs.push_back(*AS);
2093        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2094          Added.insert(*ASS);
2095      }
2096  }
2097
2098  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2099  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2100         E = mri_->reg_end(); I != E; ++I) {
2101    MachineOperand &O = I.getOperand();
2102    MachineInstr *MI = O.getParent();
2103    if (MI->isDebugValue() || SeenMIs.count(MI))
2104      continue;
2105    SeenMIs.insert(MI);
2106    SlotIndex Index = getInstructionIndex(MI);
2107    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2108      unsigned PReg = PRegs[i];
2109      LiveInterval &pli = getInterval(PReg);
2110      if (!pli.liveAt(Index))
2111        continue;
2112      vrm.addEmergencySpill(PReg, MI);
2113      SlotIndex StartIdx = Index.getLoadIndex();
2114      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2115      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2116        pli.removeRange(StartIdx, EndIdx);
2117        Cut = true;
2118      } else {
2119        std::string msg;
2120        raw_string_ostream Msg(msg);
2121        Msg << "Ran out of registers during register allocation!";
2122        if (MI->isInlineAsm()) {
2123          Msg << "\nPlease check your inline asm statement for invalid "
2124              << "constraints:\n";
2125          MI->print(Msg, tm_);
2126        }
2127        report_fatal_error(Msg.str());
2128      }
2129      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2130        if (!hasInterval(*AS))
2131          continue;
2132        LiveInterval &spli = getInterval(*AS);
2133        if (spli.liveAt(Index))
2134          spli.removeRange(Index.getLoadIndex(),
2135                           Index.getNextIndex().getBaseIndex());
2136      }
2137    }
2138  }
2139  return Cut;
2140}
2141
2142LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2143                                                  MachineInstr* startInst) {
2144  LiveInterval& Interval = getOrCreateInterval(reg);
2145  VNInfo* VN = Interval.getNextValue(
2146    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2147    startInst, true, getVNInfoAllocator());
2148  VN->setHasPHIKill(true);
2149  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2150  LiveRange LR(
2151     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2152     getMBBEndIdx(startInst->getParent()), VN);
2153  Interval.addRange(LR);
2154
2155  return LR;
2156}
2157
2158