LiveIntervalAnalysis.cpp revision aa111080dfb161054255c9c367779f1ea2581849
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52STATISTIC(numIntervals, "Number of original intervals");
53STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
54STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
55STATISTIC(numSplits   , "Number of intervals split");
56
57char LiveIntervals::ID = 0;
58static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addRequired<AliasAnalysis>();
62  AU.addPreserved<AliasAnalysis>();
63  AU.addPreserved<LiveVariables>();
64  AU.addRequired<LiveVariables>();
65  AU.addPreservedID(MachineLoopInfoID);
66  AU.addPreservedID(MachineDominatorsID);
67  AU.addPreservedID(PHIEliminationID);
68  AU.addRequiredID(PHIEliminationID);
69  AU.addRequiredID(TwoAddressInstructionPassID);
70  MachineFunctionPass::getAnalysisUsage(AU);
71}
72
73void LiveIntervals::releaseMemory() {
74  MBB2IdxMap.clear();
75  Idx2MBBMap.clear();
76  mi2iMap_.clear();
77  i2miMap_.clear();
78  r2iMap_.clear();
79  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
80  VNInfoAllocator.Reset();
81  while (!ClonedMIs.empty()) {
82    MachineInstr *MI = ClonedMIs.back();
83    ClonedMIs.pop_back();
84    mf_->DeleteMachineInstr(MI);
85  }
86}
87
88void LiveIntervals::computeNumbering() {
89  Index2MiMap OldI2MI = i2miMap_;
90  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
91
92  Idx2MBBMap.clear();
93  MBB2IdxMap.clear();
94  mi2iMap_.clear();
95  i2miMap_.clear();
96
97  FunctionSize = 0;
98
99  // Number MachineInstrs and MachineBasicBlocks.
100  // Initialize MBB indexes to a sentinal.
101  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
102
103  unsigned MIIndex = 0;
104  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
105       MBB != E; ++MBB) {
106    unsigned StartIdx = MIIndex;
107
108    // Insert an empty slot at the beginning of each block.
109    MIIndex += InstrSlots::NUM;
110    i2miMap_.push_back(0);
111
112    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
113         I != E; ++I) {
114      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
115      assert(inserted && "multiple MachineInstr -> index mappings");
116      i2miMap_.push_back(I);
117      MIIndex += InstrSlots::NUM;
118      FunctionSize++;
119
120      // Insert an empty slot after every instruction.
121      MIIndex += InstrSlots::NUM;
122      i2miMap_.push_back(0);
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  if (!OldI2MI.empty())
132    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
133      for (LiveInterval::iterator LI = OI->second.begin(),
134           LE = OI->second.end(); LI != LE; ++LI) {
135
136        // Remap the start index of the live range to the corresponding new
137        // number, or our best guess at what it _should_ correspond to if the
138        // original instruction has been erased.  This is either the following
139        // instruction or its predecessor.
140        unsigned index = LI->start / InstrSlots::NUM;
141        unsigned offset = LI->start % InstrSlots::NUM;
142        if (offset == InstrSlots::LOAD) {
143          std::vector<IdxMBBPair>::const_iterator I =
144                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
145          // Take the pair containing the index
146          std::vector<IdxMBBPair>::const_iterator J =
147                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
148
149          LI->start = getMBBStartIdx(J->second);
150        } else {
151          LI->start = mi2iMap_[OldI2MI[index]] + offset;
152        }
153
154        // Remap the ending index in the same way that we remapped the start,
155        // except for the final step where we always map to the immediately
156        // following instruction.
157        index = (LI->end - 1) / InstrSlots::NUM;
158        offset  = LI->end % InstrSlots::NUM;
159        if (offset == InstrSlots::LOAD) {
160          // VReg dies at end of block.
161          std::vector<IdxMBBPair>::const_iterator I =
162                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
163          --I;
164
165          LI->end = getMBBEndIdx(I->second) + 1;
166        } else {
167          unsigned idx = index;
168          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
169
170          if (index != OldI2MI.size())
171            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
172          else
173            LI->end = InstrSlots::NUM * i2miMap_.size();
174        }
175      }
176
177      for (LiveInterval::vni_iterator VNI = OI->second.vni_begin(),
178           VNE = OI->second.vni_end(); VNI != VNE; ++VNI) {
179        VNInfo* vni = *VNI;
180
181        // Remap the VNInfo def index, which works the same as the
182        // start indices above. VN's with special sentinel defs
183        // don't need to be remapped.
184        if (vni->def != ~0U && vni->def != ~1U) {
185          unsigned index = vni->def / InstrSlots::NUM;
186          unsigned offset = vni->def % InstrSlots::NUM;
187          if (offset == InstrSlots::LOAD) {
188            std::vector<IdxMBBPair>::const_iterator I =
189                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
190            // Take the pair containing the index
191            std::vector<IdxMBBPair>::const_iterator J =
192                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
193
194            vni->def = getMBBStartIdx(J->second);
195          } else {
196            vni->def = mi2iMap_[OldI2MI[index]] + offset;
197          }
198        }
199
200        // Remap the VNInfo kill indices, which works the same as
201        // the end indices above.
202        for (size_t i = 0; i < vni->kills.size(); ++i) {
203          // PHI kills don't need to be remapped.
204          if (!vni->kills[i]) continue;
205
206          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
207          unsigned offset = vni->kills[i] % InstrSlots::NUM;
208          if (offset == InstrSlots::STORE) {
209            std::vector<IdxMBBPair>::const_iterator I =
210             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
211            --I;
212
213            vni->kills[i] = getMBBEndIdx(I->second);
214          } else {
215            unsigned idx = index;
216            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
217
218            if (index != OldI2MI.size())
219              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
220                              (idx == index ? offset : 0);
221            else
222              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
223          }
224        }
225      }
226    }
227}
228
229/// runOnMachineFunction - Register allocate the whole function
230///
231bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
232  mf_ = &fn;
233  mri_ = &mf_->getRegInfo();
234  tm_ = &fn.getTarget();
235  tri_ = tm_->getRegisterInfo();
236  tii_ = tm_->getInstrInfo();
237  aa_ = &getAnalysis<AliasAnalysis>();
238  lv_ = &getAnalysis<LiveVariables>();
239  allocatableRegs_ = tri_->getAllocatableSet(fn);
240
241  computeNumbering();
242  computeIntervals();
243
244  numIntervals += getNumIntervals();
245
246  DOUT << "********** INTERVALS **********\n";
247  for (iterator I = begin(), E = end(); I != E; ++I) {
248    I->second.print(DOUT, tri_);
249    DOUT << "\n";
250  }
251
252  numIntervalsAfter += getNumIntervals();
253  DEBUG(dump());
254  return true;
255}
256
257/// print - Implement the dump method.
258void LiveIntervals::print(std::ostream &O, const Module* ) const {
259  O << "********** INTERVALS **********\n";
260  for (const_iterator I = begin(), E = end(); I != E; ++I) {
261    I->second.print(O, tri_);
262    O << "\n";
263  }
264
265  O << "********** MACHINEINSTRS **********\n";
266  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
267       mbbi != mbbe; ++mbbi) {
268    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
269    for (MachineBasicBlock::iterator mii = mbbi->begin(),
270           mie = mbbi->end(); mii != mie; ++mii) {
271      O << getInstructionIndex(mii) << '\t' << *mii;
272    }
273  }
274}
275
276/// conflictsWithPhysRegDef - Returns true if the specified register
277/// is defined during the duration of the specified interval.
278bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
279                                            VirtRegMap &vrm, unsigned reg) {
280  for (LiveInterval::Ranges::const_iterator
281         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
282    for (unsigned index = getBaseIndex(I->start),
283           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
284         index += InstrSlots::NUM) {
285      // skip deleted instructions
286      while (index != end && !getInstructionFromIndex(index))
287        index += InstrSlots::NUM;
288      if (index == end) break;
289
290      MachineInstr *MI = getInstructionFromIndex(index);
291      unsigned SrcReg, DstReg;
292      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
293        if (SrcReg == li.reg || DstReg == li.reg)
294          continue;
295      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
296        MachineOperand& mop = MI->getOperand(i);
297        if (!mop.isRegister())
298          continue;
299        unsigned PhysReg = mop.getReg();
300        if (PhysReg == 0 || PhysReg == li.reg)
301          continue;
302        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
303          if (!vrm.hasPhys(PhysReg))
304            continue;
305          PhysReg = vrm.getPhys(PhysReg);
306        }
307        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
308          return true;
309      }
310    }
311  }
312
313  return false;
314}
315
316void LiveIntervals::printRegName(unsigned reg) const {
317  if (TargetRegisterInfo::isPhysicalRegister(reg))
318    cerr << tri_->getName(reg);
319  else
320    cerr << "%reg" << reg;
321}
322
323void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
324                                             MachineBasicBlock::iterator mi,
325                                             unsigned MIIdx, MachineOperand& MO,
326                                             unsigned MOIdx,
327                                             LiveInterval &interval) {
328  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
329  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
330
331  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
332    DOUT << "is a implicit_def\n";
333    return;
334  }
335
336  // Virtual registers may be defined multiple times (due to phi
337  // elimination and 2-addr elimination).  Much of what we do only has to be
338  // done once for the vreg.  We use an empty interval to detect the first
339  // time we see a vreg.
340  if (interval.empty()) {
341    // Get the Idx of the defining instructions.
342    unsigned defIndex = getDefIndex(MIIdx);
343    VNInfo *ValNo;
344    MachineInstr *CopyMI = NULL;
345    unsigned SrcReg, DstReg;
346    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
347        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
348        tii_->isMoveInstr(*mi, SrcReg, DstReg))
349      CopyMI = mi;
350    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
351
352    assert(ValNo->id == 0 && "First value in interval is not 0?");
353
354    // Loop over all of the blocks that the vreg is defined in.  There are
355    // two cases we have to handle here.  The most common case is a vreg
356    // whose lifetime is contained within a basic block.  In this case there
357    // will be a single kill, in MBB, which comes after the definition.
358    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
359      // FIXME: what about dead vars?
360      unsigned killIdx;
361      if (vi.Kills[0] != mi)
362        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
363      else
364        killIdx = defIndex+1;
365
366      // If the kill happens after the definition, we have an intra-block
367      // live range.
368      if (killIdx > defIndex) {
369        assert(vi.AliveBlocks.none() &&
370               "Shouldn't be alive across any blocks!");
371        LiveRange LR(defIndex, killIdx, ValNo);
372        interval.addRange(LR);
373        DOUT << " +" << LR << "\n";
374        interval.addKill(ValNo, killIdx);
375        return;
376      }
377    }
378
379    // The other case we handle is when a virtual register lives to the end
380    // of the defining block, potentially live across some blocks, then is
381    // live into some number of blocks, but gets killed.  Start by adding a
382    // range that goes from this definition to the end of the defining block.
383    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
384    DOUT << " +" << NewLR;
385    interval.addRange(NewLR);
386
387    // Iterate over all of the blocks that the variable is completely
388    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
389    // live interval.
390    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
391      if (vi.AliveBlocks[i]) {
392        LiveRange LR(getMBBStartIdx(i),
393                     getMBBEndIdx(i)+1,  // MBB ends at -1.
394                     ValNo);
395        interval.addRange(LR);
396        DOUT << " +" << LR;
397      }
398    }
399
400    // Finally, this virtual register is live from the start of any killing
401    // block to the 'use' slot of the killing instruction.
402    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
403      MachineInstr *Kill = vi.Kills[i];
404      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
405      LiveRange LR(getMBBStartIdx(Kill->getParent()),
406                   killIdx, ValNo);
407      interval.addRange(LR);
408      interval.addKill(ValNo, killIdx);
409      DOUT << " +" << LR;
410    }
411
412  } else {
413    // If this is the second time we see a virtual register definition, it
414    // must be due to phi elimination or two addr elimination.  If this is
415    // the result of two address elimination, then the vreg is one of the
416    // def-and-use register operand.
417    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
418      // If this is a two-address definition, then we have already processed
419      // the live range.  The only problem is that we didn't realize there
420      // are actually two values in the live interval.  Because of this we
421      // need to take the LiveRegion that defines this register and split it
422      // into two values.
423      assert(interval.containsOneValue());
424      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
425      unsigned RedefIndex = getDefIndex(MIIdx);
426
427      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
428      VNInfo *OldValNo = OldLR->valno;
429
430      // Delete the initial value, which should be short and continuous,
431      // because the 2-addr copy must be in the same MBB as the redef.
432      interval.removeRange(DefIndex, RedefIndex);
433
434      // Two-address vregs should always only be redefined once.  This means
435      // that at this point, there should be exactly one value number in it.
436      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
437
438      // The new value number (#1) is defined by the instruction we claimed
439      // defined value #0.
440      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
441                                            VNInfoAllocator);
442
443      // Value#0 is now defined by the 2-addr instruction.
444      OldValNo->def  = RedefIndex;
445      OldValNo->copy = 0;
446
447      // Add the new live interval which replaces the range for the input copy.
448      LiveRange LR(DefIndex, RedefIndex, ValNo);
449      DOUT << " replace range with " << LR;
450      interval.addRange(LR);
451      interval.addKill(ValNo, RedefIndex);
452
453      // If this redefinition is dead, we need to add a dummy unit live
454      // range covering the def slot.
455      if (MO.isDead())
456        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
457
458      DOUT << " RESULT: ";
459      interval.print(DOUT, tri_);
460
461    } else {
462      // Otherwise, this must be because of phi elimination.  If this is the
463      // first redefinition of the vreg that we have seen, go back and change
464      // the live range in the PHI block to be a different value number.
465      if (interval.containsOneValue()) {
466        assert(vi.Kills.size() == 1 &&
467               "PHI elimination vreg should have one kill, the PHI itself!");
468
469        // Remove the old range that we now know has an incorrect number.
470        VNInfo *VNI = interval.getValNumInfo(0);
471        MachineInstr *Killer = vi.Kills[0];
472        unsigned Start = getMBBStartIdx(Killer->getParent());
473        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
474        DOUT << " Removing [" << Start << "," << End << "] from: ";
475        interval.print(DOUT, tri_); DOUT << "\n";
476        interval.removeRange(Start, End);
477        VNI->hasPHIKill = true;
478        DOUT << " RESULT: "; interval.print(DOUT, tri_);
479
480        // Replace the interval with one of a NEW value number.  Note that this
481        // value number isn't actually defined by an instruction, weird huh? :)
482        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
483        DOUT << " replace range with " << LR;
484        interval.addRange(LR);
485        interval.addKill(LR.valno, End);
486        DOUT << " RESULT: "; interval.print(DOUT, tri_);
487      }
488
489      // In the case of PHI elimination, each variable definition is only
490      // live until the end of the block.  We've already taken care of the
491      // rest of the live range.
492      unsigned defIndex = getDefIndex(MIIdx);
493
494      VNInfo *ValNo;
495      MachineInstr *CopyMI = NULL;
496      unsigned SrcReg, DstReg;
497      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
498          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
499          tii_->isMoveInstr(*mi, SrcReg, DstReg))
500        CopyMI = mi;
501      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
502
503      unsigned killIndex = getMBBEndIdx(mbb) + 1;
504      LiveRange LR(defIndex, killIndex, ValNo);
505      interval.addRange(LR);
506      interval.addKill(ValNo, killIndex);
507      ValNo->hasPHIKill = true;
508      DOUT << " +" << LR;
509    }
510  }
511
512  DOUT << '\n';
513}
514
515void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
516                                              MachineBasicBlock::iterator mi,
517                                              unsigned MIIdx,
518                                              MachineOperand& MO,
519                                              LiveInterval &interval,
520                                              MachineInstr *CopyMI) {
521  // A physical register cannot be live across basic block, so its
522  // lifetime must end somewhere in its defining basic block.
523  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
524
525  unsigned baseIndex = MIIdx;
526  unsigned start = getDefIndex(baseIndex);
527  unsigned end = start;
528
529  // If it is not used after definition, it is considered dead at
530  // the instruction defining it. Hence its interval is:
531  // [defSlot(def), defSlot(def)+1)
532  if (MO.isDead()) {
533    DOUT << " dead";
534    end = getDefIndex(start) + 1;
535    goto exit;
536  }
537
538  // If it is not dead on definition, it must be killed by a
539  // subsequent instruction. Hence its interval is:
540  // [defSlot(def), useSlot(kill)+1)
541  baseIndex += InstrSlots::NUM;
542  while (++mi != MBB->end()) {
543    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
544           getInstructionFromIndex(baseIndex) == 0)
545      baseIndex += InstrSlots::NUM;
546    if (mi->killsRegister(interval.reg, tri_)) {
547      DOUT << " killed";
548      end = getUseIndex(baseIndex) + 1;
549      goto exit;
550    } else if (mi->modifiesRegister(interval.reg, tri_)) {
551      // Another instruction redefines the register before it is ever read.
552      // Then the register is essentially dead at the instruction that defines
553      // it. Hence its interval is:
554      // [defSlot(def), defSlot(def)+1)
555      DOUT << " dead";
556      end = getDefIndex(start) + 1;
557      goto exit;
558    }
559
560    baseIndex += InstrSlots::NUM;
561  }
562
563  // The only case we should have a dead physreg here without a killing or
564  // instruction where we know it's dead is if it is live-in to the function
565  // and never used.
566  assert(!CopyMI && "physreg was not killed in defining block!");
567  end = getDefIndex(start) + 1;  // It's dead.
568
569exit:
570  assert(start < end && "did not find end of interval?");
571
572  // Already exists? Extend old live interval.
573  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
574  VNInfo *ValNo = (OldLR != interval.end())
575    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
576  LiveRange LR(start, end, ValNo);
577  interval.addRange(LR);
578  interval.addKill(LR.valno, end);
579  DOUT << " +" << LR << '\n';
580}
581
582void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
583                                      MachineBasicBlock::iterator MI,
584                                      unsigned MIIdx,
585                                      MachineOperand& MO,
586                                      unsigned MOIdx) {
587  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
588    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
589                             getOrCreateInterval(MO.getReg()));
590  else if (allocatableRegs_[MO.getReg()]) {
591    MachineInstr *CopyMI = NULL;
592    unsigned SrcReg, DstReg;
593    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
594        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
595        tii_->isMoveInstr(*MI, SrcReg, DstReg))
596      CopyMI = MI;
597    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
598                              getOrCreateInterval(MO.getReg()), CopyMI);
599    // Def of a register also defines its sub-registers.
600    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
601      // If MI also modifies the sub-register explicitly, avoid processing it
602      // more than once. Do not pass in TRI here so it checks for exact match.
603      if (!MI->modifiesRegister(*AS))
604        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
605                                  getOrCreateInterval(*AS), 0);
606  }
607}
608
609void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
610                                         unsigned MIIdx,
611                                         LiveInterval &interval, bool isAlias) {
612  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
613
614  // Look for kills, if it reaches a def before it's killed, then it shouldn't
615  // be considered a livein.
616  MachineBasicBlock::iterator mi = MBB->begin();
617  unsigned baseIndex = MIIdx;
618  unsigned start = baseIndex;
619  unsigned end = start;
620  while (mi != MBB->end()) {
621    if (mi->killsRegister(interval.reg, tri_)) {
622      DOUT << " killed";
623      end = getUseIndex(baseIndex) + 1;
624      goto exit;
625    } else if (mi->modifiesRegister(interval.reg, tri_)) {
626      // Another instruction redefines the register before it is ever read.
627      // Then the register is essentially dead at the instruction that defines
628      // it. Hence its interval is:
629      // [defSlot(def), defSlot(def)+1)
630      DOUT << " dead";
631      end = getDefIndex(start) + 1;
632      goto exit;
633    }
634
635    baseIndex += InstrSlots::NUM;
636    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
637           getInstructionFromIndex(baseIndex) == 0)
638      baseIndex += InstrSlots::NUM;
639    ++mi;
640  }
641
642exit:
643  // Live-in register might not be used at all.
644  if (end == MIIdx) {
645    if (isAlias) {
646      DOUT << " dead";
647      end = getDefIndex(MIIdx) + 1;
648    } else {
649      DOUT << " live through";
650      end = baseIndex;
651    }
652  }
653
654  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
655  interval.addRange(LR);
656  interval.addKill(LR.valno, end);
657  DOUT << " +" << LR << '\n';
658}
659
660/// computeIntervals - computes the live intervals for virtual
661/// registers. for some ordering of the machine instructions [1,N] a
662/// live interval is an interval [i, j) where 1 <= i <= j < N for
663/// which a variable is live
664void LiveIntervals::computeIntervals() {
665  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
666       << "********** Function: "
667       << ((Value*)mf_->getFunction())->getName() << '\n';
668  // Track the index of the current machine instr.
669  unsigned MIIndex = 0;
670
671  // Skip over empty initial indices.
672  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
673         getInstructionFromIndex(MIIndex) == 0)
674    MIIndex += InstrSlots::NUM;
675
676  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
677       MBBI != E; ++MBBI) {
678    MachineBasicBlock *MBB = MBBI;
679    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
680
681    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
682
683    // Create intervals for live-ins to this BB first.
684    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
685           LE = MBB->livein_end(); LI != LE; ++LI) {
686      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
687      // Multiple live-ins can alias the same register.
688      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
689        if (!hasInterval(*AS))
690          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
691                               true);
692    }
693
694    for (; MI != miEnd; ++MI) {
695      DOUT << MIIndex << "\t" << *MI;
696
697      // Handle defs.
698      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
699        MachineOperand &MO = MI->getOperand(i);
700        // handle register defs - build intervals
701        if (MO.isRegister() && MO.getReg() && MO.isDef())
702          handleRegisterDef(MBB, MI, MIIndex, MO, i);
703      }
704
705      MIIndex += InstrSlots::NUM;
706
707      // Skip over empty indices.
708      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
709             getInstructionFromIndex(MIIndex) == 0)
710        MIIndex += InstrSlots::NUM;
711    }
712  }
713}
714
715bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
716                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
717  std::vector<IdxMBBPair>::const_iterator I =
718    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
719
720  bool ResVal = false;
721  while (I != Idx2MBBMap.end()) {
722    if (LR.end <= I->first)
723      break;
724    MBBs.push_back(I->second);
725    ResVal = true;
726    ++I;
727  }
728  return ResVal;
729}
730
731
732LiveInterval LiveIntervals::createInterval(unsigned reg) {
733  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
734                       HUGE_VALF : 0.0F;
735  return LiveInterval(reg, Weight);
736}
737
738/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
739/// copy field and returns the source register that defines it.
740unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
741  if (!VNI->copy)
742    return 0;
743
744  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
745    return VNI->copy->getOperand(1).getReg();
746  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
747    return VNI->copy->getOperand(2).getReg();
748  unsigned SrcReg, DstReg;
749  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
750    return SrcReg;
751  assert(0 && "Unrecognized copy instruction!");
752  return 0;
753}
754
755//===----------------------------------------------------------------------===//
756// Register allocator hooks.
757//
758
759/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
760/// allow one) virtual register operand, then its uses are implicitly using
761/// the register. Returns the virtual register.
762unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
763                                            MachineInstr *MI) const {
764  unsigned RegOp = 0;
765  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
766    MachineOperand &MO = MI->getOperand(i);
767    if (!MO.isRegister() || !MO.isUse())
768      continue;
769    unsigned Reg = MO.getReg();
770    if (Reg == 0 || Reg == li.reg)
771      continue;
772    // FIXME: For now, only remat MI with at most one register operand.
773    assert(!RegOp &&
774           "Can't rematerialize instruction with multiple register operand!");
775    RegOp = MO.getReg();
776#ifndef NDEBUG
777    break;
778#endif
779  }
780  return RegOp;
781}
782
783/// isValNoAvailableAt - Return true if the val# of the specified interval
784/// which reaches the given instruction also reaches the specified use index.
785bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
786                                       unsigned UseIdx) const {
787  unsigned Index = getInstructionIndex(MI);
788  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
789  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
790  return UI != li.end() && UI->valno == ValNo;
791}
792
793/// isReMaterializable - Returns true if the definition MI of the specified
794/// val# of the specified interval is re-materializable.
795bool LiveIntervals::isReMaterializable(const LiveInterval &li,
796                                       const VNInfo *ValNo, MachineInstr *MI,
797                                       bool &isLoad) {
798  if (DisableReMat)
799    return false;
800
801  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
802    return true;
803
804  int FrameIdx = 0;
805  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
806      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
807    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
808    // this but remember this is not safe to fold into a two-address
809    // instruction.
810    // This is a load from fixed stack slot. It can be rematerialized.
811    return true;
812
813  // If the target-specific rules don't identify an instruction as
814  // being trivially rematerializable, use some target-independent
815  // rules.
816  if (!MI->getDesc().isRematerializable() ||
817      !tii_->isTriviallyReMaterializable(MI)) {
818    if (!EnableAggressiveRemat)
819      return false;
820
821    // If the instruction accesses memory but the memoperands have been lost,
822    // we can't analyze it.
823    const TargetInstrDesc &TID = MI->getDesc();
824    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
825      return false;
826
827    // Avoid instructions obviously unsafe for remat.
828    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
829      return false;
830
831    // If the instruction accesses memory and the memory could be non-constant,
832    // assume the instruction is not rematerializable.
833    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
834         E = MI->memoperands_end(); I != E; ++I) {
835      const MachineMemOperand &MMO = *I;
836      if (MMO.isVolatile() || MMO.isStore())
837        return false;
838      const Value *V = MMO.getValue();
839      if (!V)
840        return false;
841      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
842        if (!PSV->isConstant(mf_->getFrameInfo()))
843          return false;
844      } else if (!aa_->pointsToConstantMemory(V))
845        return false;
846    }
847
848    // If any of the registers accessed are non-constant, conservatively assume
849    // the instruction is not rematerializable.
850    unsigned ImpUse = 0;
851    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
852      const MachineOperand &MO = MI->getOperand(i);
853      if (MO.isReg()) {
854        unsigned Reg = MO.getReg();
855        if (Reg == 0)
856          continue;
857        if (TargetRegisterInfo::isPhysicalRegister(Reg))
858          return false;
859
860        // Only allow one def, and that in the first operand.
861        if (MO.isDef() != (i == 0))
862          return false;
863
864        // Only allow constant-valued registers.
865        bool IsLiveIn = mri_->isLiveIn(Reg);
866        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
867                                          E = mri_->def_end();
868
869        // For the def, it should be the only def.
870        if (MO.isDef() && (next(I) != E || IsLiveIn))
871          return false;
872
873        if (MO.isUse()) {
874          // Only allow one use other register use, as that's all the
875          // remat mechanisms support currently.
876          if (Reg != li.reg) {
877            if (ImpUse == 0)
878              ImpUse = Reg;
879            else if (Reg != ImpUse)
880              return false;
881          }
882          // For uses, there should be only one associate def.
883          if (I != E && (next(I) != E || IsLiveIn))
884            return false;
885        }
886      }
887    }
888  }
889
890  unsigned ImpUse = getReMatImplicitUse(li, MI);
891  if (ImpUse) {
892    const LiveInterval &ImpLi = getInterval(ImpUse);
893    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
894           re = mri_->use_end(); ri != re; ++ri) {
895      MachineInstr *UseMI = &*ri;
896      unsigned UseIdx = getInstructionIndex(UseMI);
897      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
898        continue;
899      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
900        return false;
901    }
902  }
903  return true;
904}
905
906/// isReMaterializable - Returns true if every definition of MI of every
907/// val# of the specified interval is re-materializable.
908bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
909  isLoad = false;
910  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
911       i != e; ++i) {
912    const VNInfo *VNI = *i;
913    unsigned DefIdx = VNI->def;
914    if (DefIdx == ~1U)
915      continue; // Dead val#.
916    // Is the def for the val# rematerializable?
917    if (DefIdx == ~0u)
918      return false;
919    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
920    bool DefIsLoad = false;
921    if (!ReMatDefMI ||
922        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
923      return false;
924    isLoad |= DefIsLoad;
925  }
926  return true;
927}
928
929/// FilterFoldedOps - Filter out two-address use operands. Return
930/// true if it finds any issue with the operands that ought to prevent
931/// folding.
932static bool FilterFoldedOps(MachineInstr *MI,
933                            SmallVector<unsigned, 2> &Ops,
934                            unsigned &MRInfo,
935                            SmallVector<unsigned, 2> &FoldOps) {
936  const TargetInstrDesc &TID = MI->getDesc();
937
938  MRInfo = 0;
939  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
940    unsigned OpIdx = Ops[i];
941    MachineOperand &MO = MI->getOperand(OpIdx);
942    // FIXME: fold subreg use.
943    if (MO.getSubReg())
944      return true;
945    if (MO.isDef())
946      MRInfo |= (unsigned)VirtRegMap::isMod;
947    else {
948      // Filter out two-address use operand(s).
949      if (!MO.isImplicit() &&
950          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
951        MRInfo = VirtRegMap::isModRef;
952        continue;
953      }
954      MRInfo |= (unsigned)VirtRegMap::isRef;
955    }
956    FoldOps.push_back(OpIdx);
957  }
958  return false;
959}
960
961
962/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
963/// slot / to reg or any rematerialized load into ith operand of specified
964/// MI. If it is successul, MI is updated with the newly created MI and
965/// returns true.
966bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
967                                         VirtRegMap &vrm, MachineInstr *DefMI,
968                                         unsigned InstrIdx,
969                                         SmallVector<unsigned, 2> &Ops,
970                                         bool isSS, int Slot, unsigned Reg) {
971  // If it is an implicit def instruction, just delete it.
972  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
973    RemoveMachineInstrFromMaps(MI);
974    vrm.RemoveMachineInstrFromMaps(MI);
975    MI->eraseFromParent();
976    ++numFolds;
977    return true;
978  }
979
980  // Filter the list of operand indexes that are to be folded. Abort if
981  // any operand will prevent folding.
982  unsigned MRInfo = 0;
983  SmallVector<unsigned, 2> FoldOps;
984  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
985    return false;
986
987  // The only time it's safe to fold into a two address instruction is when
988  // it's folding reload and spill from / into a spill stack slot.
989  if (DefMI && (MRInfo & VirtRegMap::isMod))
990    return false;
991
992  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
993                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
994  if (fmi) {
995    // Remember this instruction uses the spill slot.
996    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
997
998    // Attempt to fold the memory reference into the instruction. If
999    // we can do this, we don't need to insert spill code.
1000    MachineBasicBlock &MBB = *MI->getParent();
1001    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1002      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1003    vrm.transferSpillPts(MI, fmi);
1004    vrm.transferRestorePts(MI, fmi);
1005    vrm.transferEmergencySpills(MI, fmi);
1006    mi2iMap_.erase(MI);
1007    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1008    mi2iMap_[fmi] = InstrIdx;
1009    MI = MBB.insert(MBB.erase(MI), fmi);
1010    ++numFolds;
1011    return true;
1012  }
1013  return false;
1014}
1015
1016/// canFoldMemoryOperand - Returns true if the specified load / store
1017/// folding is possible.
1018bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1019                                         SmallVector<unsigned, 2> &Ops,
1020                                         bool ReMat) const {
1021  // Filter the list of operand indexes that are to be folded. Abort if
1022  // any operand will prevent folding.
1023  unsigned MRInfo = 0;
1024  SmallVector<unsigned, 2> FoldOps;
1025  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1026    return false;
1027
1028  // It's only legal to remat for a use, not a def.
1029  if (ReMat && (MRInfo & VirtRegMap::isMod))
1030    return false;
1031
1032  return tii_->canFoldMemoryOperand(MI, FoldOps);
1033}
1034
1035bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1036  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1037  for (LiveInterval::Ranges::const_iterator
1038         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1039    std::vector<IdxMBBPair>::const_iterator II =
1040      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1041    if (II == Idx2MBBMap.end())
1042      continue;
1043    if (I->end > II->first)  // crossing a MBB.
1044      return false;
1045    MBBs.insert(II->second);
1046    if (MBBs.size() > 1)
1047      return false;
1048  }
1049  return true;
1050}
1051
1052/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1053/// interval on to-be re-materialized operands of MI) with new register.
1054void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1055                                       MachineInstr *MI, unsigned NewVReg,
1056                                       VirtRegMap &vrm) {
1057  // There is an implicit use. That means one of the other operand is
1058  // being remat'ed and the remat'ed instruction has li.reg as an
1059  // use operand. Make sure we rewrite that as well.
1060  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1061    MachineOperand &MO = MI->getOperand(i);
1062    if (!MO.isRegister())
1063      continue;
1064    unsigned Reg = MO.getReg();
1065    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1066      continue;
1067    if (!vrm.isReMaterialized(Reg))
1068      continue;
1069    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1070    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1071    if (UseMO)
1072      UseMO->setReg(NewVReg);
1073  }
1074}
1075
1076/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1077/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1078bool LiveIntervals::
1079rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1080                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1081                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1082                 unsigned Slot, int LdSlot,
1083                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1084                 VirtRegMap &vrm,
1085                 const TargetRegisterClass* rc,
1086                 SmallVector<int, 4> &ReMatIds,
1087                 const MachineLoopInfo *loopInfo,
1088                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1089                 std::map<unsigned,unsigned> &MBBVRegsMap,
1090                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1091  MachineBasicBlock *MBB = MI->getParent();
1092  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1093  bool CanFold = false;
1094 RestartInstruction:
1095  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1096    MachineOperand& mop = MI->getOperand(i);
1097    if (!mop.isRegister())
1098      continue;
1099    unsigned Reg = mop.getReg();
1100    unsigned RegI = Reg;
1101    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1102      continue;
1103    if (Reg != li.reg)
1104      continue;
1105
1106    bool TryFold = !DefIsReMat;
1107    bool FoldSS = true; // Default behavior unless it's a remat.
1108    int FoldSlot = Slot;
1109    if (DefIsReMat) {
1110      // If this is the rematerializable definition MI itself and
1111      // all of its uses are rematerialized, simply delete it.
1112      if (MI == ReMatOrigDefMI && CanDelete) {
1113        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1114        DOUT << MI << '\n';
1115        RemoveMachineInstrFromMaps(MI);
1116        vrm.RemoveMachineInstrFromMaps(MI);
1117        MI->eraseFromParent();
1118        break;
1119      }
1120
1121      // If def for this use can't be rematerialized, then try folding.
1122      // If def is rematerializable and it's a load, also try folding.
1123      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1124      if (isLoad) {
1125        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1126        FoldSS = isLoadSS;
1127        FoldSlot = LdSlot;
1128      }
1129    }
1130
1131    // Scan all of the operands of this instruction rewriting operands
1132    // to use NewVReg instead of li.reg as appropriate.  We do this for
1133    // two reasons:
1134    //
1135    //   1. If the instr reads the same spilled vreg multiple times, we
1136    //      want to reuse the NewVReg.
1137    //   2. If the instr is a two-addr instruction, we are required to
1138    //      keep the src/dst regs pinned.
1139    //
1140    // Keep track of whether we replace a use and/or def so that we can
1141    // create the spill interval with the appropriate range.
1142
1143    HasUse = mop.isUse();
1144    HasDef = mop.isDef();
1145    SmallVector<unsigned, 2> Ops;
1146    Ops.push_back(i);
1147    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1148      const MachineOperand &MOj = MI->getOperand(j);
1149      if (!MOj.isRegister())
1150        continue;
1151      unsigned RegJ = MOj.getReg();
1152      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1153        continue;
1154      if (RegJ == RegI) {
1155        Ops.push_back(j);
1156        HasUse |= MOj.isUse();
1157        HasDef |= MOj.isDef();
1158      }
1159    }
1160
1161    if (HasUse && !li.liveAt(getUseIndex(index)))
1162      // Must be defined by an implicit def. It should not be spilled. Note,
1163      // this is for correctness reason. e.g.
1164      // 8   %reg1024<def> = IMPLICIT_DEF
1165      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1166      // The live range [12, 14) are not part of the r1024 live interval since
1167      // it's defined by an implicit def. It will not conflicts with live
1168      // interval of r1025. Now suppose both registers are spilled, you can
1169      // easily see a situation where both registers are reloaded before
1170      // the INSERT_SUBREG and both target registers that would overlap.
1171      HasUse = false;
1172
1173    // Update stack slot spill weight if we are splitting.
1174    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1175      if (!TrySplit)
1176      SSWeight += Weight;
1177
1178    if (!TryFold)
1179      CanFold = false;
1180    else {
1181      // Do not fold load / store here if we are splitting. We'll find an
1182      // optimal point to insert a load / store later.
1183      if (!TrySplit) {
1184        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1185                                 Ops, FoldSS, FoldSlot, Reg)) {
1186          // Folding the load/store can completely change the instruction in
1187          // unpredictable ways, rescan it from the beginning.
1188          HasUse = false;
1189          HasDef = false;
1190          CanFold = false;
1191          if (isRemoved(MI)) {
1192            SSWeight -= Weight;
1193            break;
1194          }
1195          goto RestartInstruction;
1196        }
1197      } else {
1198        // We'll try to fold it later if it's profitable.
1199        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1200      }
1201    }
1202
1203    // Create a new virtual register for the spill interval.
1204    bool CreatedNewVReg = false;
1205    if (NewVReg == 0) {
1206      NewVReg = mri_->createVirtualRegister(rc);
1207      vrm.grow();
1208      CreatedNewVReg = true;
1209    }
1210    mop.setReg(NewVReg);
1211    if (mop.isImplicit())
1212      rewriteImplicitOps(li, MI, NewVReg, vrm);
1213
1214    // Reuse NewVReg for other reads.
1215    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1216      MachineOperand &mopj = MI->getOperand(Ops[j]);
1217      mopj.setReg(NewVReg);
1218      if (mopj.isImplicit())
1219        rewriteImplicitOps(li, MI, NewVReg, vrm);
1220    }
1221
1222    if (CreatedNewVReg) {
1223      if (DefIsReMat) {
1224        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1225        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1226          // Each valnum may have its own remat id.
1227          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1228        } else {
1229          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1230        }
1231        if (!CanDelete || (HasUse && HasDef)) {
1232          // If this is a two-addr instruction then its use operands are
1233          // rematerializable but its def is not. It should be assigned a
1234          // stack slot.
1235          vrm.assignVirt2StackSlot(NewVReg, Slot);
1236        }
1237      } else {
1238        vrm.assignVirt2StackSlot(NewVReg, Slot);
1239      }
1240    } else if (HasUse && HasDef &&
1241               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1242      // If this interval hasn't been assigned a stack slot (because earlier
1243      // def is a deleted remat def), do it now.
1244      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1245      vrm.assignVirt2StackSlot(NewVReg, Slot);
1246    }
1247
1248    // Re-matting an instruction with virtual register use. Add the
1249    // register as an implicit use on the use MI.
1250    if (DefIsReMat && ImpUse)
1251      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1252
1253    // create a new register interval for this spill / remat.
1254    LiveInterval &nI = getOrCreateInterval(NewVReg);
1255    if (CreatedNewVReg) {
1256      NewLIs.push_back(&nI);
1257      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1258      if (TrySplit)
1259        vrm.setIsSplitFromReg(NewVReg, li.reg);
1260    }
1261
1262    if (HasUse) {
1263      if (CreatedNewVReg) {
1264        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1265                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1266        DOUT << " +" << LR;
1267        nI.addRange(LR);
1268      } else {
1269        // Extend the split live interval to this def / use.
1270        unsigned End = getUseIndex(index)+1;
1271        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1272                     nI.getValNumInfo(nI.getNumValNums()-1));
1273        DOUT << " +" << LR;
1274        nI.addRange(LR);
1275      }
1276    }
1277    if (HasDef) {
1278      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1279                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1280      DOUT << " +" << LR;
1281      nI.addRange(LR);
1282    }
1283
1284    DOUT << "\t\t\t\tAdded new interval: ";
1285    nI.print(DOUT, tri_);
1286    DOUT << '\n';
1287  }
1288  return CanFold;
1289}
1290bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1291                                   const VNInfo *VNI,
1292                                   MachineBasicBlock *MBB, unsigned Idx) const {
1293  unsigned End = getMBBEndIdx(MBB);
1294  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1295    unsigned KillIdx = VNI->kills[j];
1296    if (KillIdx > Idx && KillIdx < End)
1297      return true;
1298  }
1299  return false;
1300}
1301
1302/// RewriteInfo - Keep track of machine instrs that will be rewritten
1303/// during spilling.
1304namespace {
1305  struct RewriteInfo {
1306    unsigned Index;
1307    MachineInstr *MI;
1308    bool HasUse;
1309    bool HasDef;
1310    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1311      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1312  };
1313
1314  struct RewriteInfoCompare {
1315    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1316      return LHS.Index < RHS.Index;
1317    }
1318  };
1319}
1320
1321void LiveIntervals::
1322rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1323                    LiveInterval::Ranges::const_iterator &I,
1324                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1325                    unsigned Slot, int LdSlot,
1326                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1327                    VirtRegMap &vrm,
1328                    const TargetRegisterClass* rc,
1329                    SmallVector<int, 4> &ReMatIds,
1330                    const MachineLoopInfo *loopInfo,
1331                    BitVector &SpillMBBs,
1332                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1333                    BitVector &RestoreMBBs,
1334                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1335                    std::map<unsigned,unsigned> &MBBVRegsMap,
1336                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1337  bool AllCanFold = true;
1338  unsigned NewVReg = 0;
1339  unsigned start = getBaseIndex(I->start);
1340  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1341
1342  // First collect all the def / use in this live range that will be rewritten.
1343  // Make sure they are sorted according to instruction index.
1344  std::vector<RewriteInfo> RewriteMIs;
1345  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1346         re = mri_->reg_end(); ri != re; ) {
1347    MachineInstr *MI = &*ri;
1348    MachineOperand &O = ri.getOperand();
1349    ++ri;
1350    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1351    unsigned index = getInstructionIndex(MI);
1352    if (index < start || index >= end)
1353      continue;
1354    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1355      // Must be defined by an implicit def. It should not be spilled. Note,
1356      // this is for correctness reason. e.g.
1357      // 8   %reg1024<def> = IMPLICIT_DEF
1358      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1359      // The live range [12, 14) are not part of the r1024 live interval since
1360      // it's defined by an implicit def. It will not conflicts with live
1361      // interval of r1025. Now suppose both registers are spilled, you can
1362      // easily see a situation where both registers are reloaded before
1363      // the INSERT_SUBREG and both target registers that would overlap.
1364      continue;
1365    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1366  }
1367  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1368
1369  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1370  // Now rewrite the defs and uses.
1371  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1372    RewriteInfo &rwi = RewriteMIs[i];
1373    ++i;
1374    unsigned index = rwi.Index;
1375    bool MIHasUse = rwi.HasUse;
1376    bool MIHasDef = rwi.HasDef;
1377    MachineInstr *MI = rwi.MI;
1378    // If MI def and/or use the same register multiple times, then there
1379    // are multiple entries.
1380    unsigned NumUses = MIHasUse;
1381    while (i != e && RewriteMIs[i].MI == MI) {
1382      assert(RewriteMIs[i].Index == index);
1383      bool isUse = RewriteMIs[i].HasUse;
1384      if (isUse) ++NumUses;
1385      MIHasUse |= isUse;
1386      MIHasDef |= RewriteMIs[i].HasDef;
1387      ++i;
1388    }
1389    MachineBasicBlock *MBB = MI->getParent();
1390
1391    if (ImpUse && MI != ReMatDefMI) {
1392      // Re-matting an instruction with virtual register use. Update the
1393      // register interval's spill weight to HUGE_VALF to prevent it from
1394      // being spilled.
1395      LiveInterval &ImpLi = getInterval(ImpUse);
1396      ImpLi.weight = HUGE_VALF;
1397    }
1398
1399    unsigned MBBId = MBB->getNumber();
1400    unsigned ThisVReg = 0;
1401    if (TrySplit) {
1402      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1403      if (NVI != MBBVRegsMap.end()) {
1404        ThisVReg = NVI->second;
1405        // One common case:
1406        // x = use
1407        // ...
1408        // ...
1409        // def = ...
1410        //     = use
1411        // It's better to start a new interval to avoid artifically
1412        // extend the new interval.
1413        if (MIHasDef && !MIHasUse) {
1414          MBBVRegsMap.erase(MBB->getNumber());
1415          ThisVReg = 0;
1416        }
1417      }
1418    }
1419
1420    bool IsNew = ThisVReg == 0;
1421    if (IsNew) {
1422      // This ends the previous live interval. If all of its def / use
1423      // can be folded, give it a low spill weight.
1424      if (NewVReg && TrySplit && AllCanFold) {
1425        LiveInterval &nI = getOrCreateInterval(NewVReg);
1426        nI.weight /= 10.0F;
1427      }
1428      AllCanFold = true;
1429    }
1430    NewVReg = ThisVReg;
1431
1432    bool HasDef = false;
1433    bool HasUse = false;
1434    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1435                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1436                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1437                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1438                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1439    if (!HasDef && !HasUse)
1440      continue;
1441
1442    AllCanFold &= CanFold;
1443
1444    // Update weight of spill interval.
1445    LiveInterval &nI = getOrCreateInterval(NewVReg);
1446    if (!TrySplit) {
1447      // The spill weight is now infinity as it cannot be spilled again.
1448      nI.weight = HUGE_VALF;
1449      continue;
1450    }
1451
1452    // Keep track of the last def and first use in each MBB.
1453    if (HasDef) {
1454      if (MI != ReMatOrigDefMI || !CanDelete) {
1455        bool HasKill = false;
1456        if (!HasUse)
1457          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1458        else {
1459          // If this is a two-address code, then this index starts a new VNInfo.
1460          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1461          if (VNI)
1462            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1463        }
1464        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1465          SpillIdxes.find(MBBId);
1466        if (!HasKill) {
1467          if (SII == SpillIdxes.end()) {
1468            std::vector<SRInfo> S;
1469            S.push_back(SRInfo(index, NewVReg, true));
1470            SpillIdxes.insert(std::make_pair(MBBId, S));
1471          } else if (SII->second.back().vreg != NewVReg) {
1472            SII->second.push_back(SRInfo(index, NewVReg, true));
1473          } else if ((int)index > SII->second.back().index) {
1474            // If there is an earlier def and this is a two-address
1475            // instruction, then it's not possible to fold the store (which
1476            // would also fold the load).
1477            SRInfo &Info = SII->second.back();
1478            Info.index = index;
1479            Info.canFold = !HasUse;
1480          }
1481          SpillMBBs.set(MBBId);
1482        } else if (SII != SpillIdxes.end() &&
1483                   SII->second.back().vreg == NewVReg &&
1484                   (int)index > SII->second.back().index) {
1485          // There is an earlier def that's not killed (must be two-address).
1486          // The spill is no longer needed.
1487          SII->second.pop_back();
1488          if (SII->second.empty()) {
1489            SpillIdxes.erase(MBBId);
1490            SpillMBBs.reset(MBBId);
1491          }
1492        }
1493      }
1494    }
1495
1496    if (HasUse) {
1497      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1498        SpillIdxes.find(MBBId);
1499      if (SII != SpillIdxes.end() &&
1500          SII->second.back().vreg == NewVReg &&
1501          (int)index > SII->second.back().index)
1502        // Use(s) following the last def, it's not safe to fold the spill.
1503        SII->second.back().canFold = false;
1504      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1505        RestoreIdxes.find(MBBId);
1506      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1507        // If we are splitting live intervals, only fold if it's the first
1508        // use and there isn't another use later in the MBB.
1509        RII->second.back().canFold = false;
1510      else if (IsNew) {
1511        // Only need a reload if there isn't an earlier def / use.
1512        if (RII == RestoreIdxes.end()) {
1513          std::vector<SRInfo> Infos;
1514          Infos.push_back(SRInfo(index, NewVReg, true));
1515          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1516        } else {
1517          RII->second.push_back(SRInfo(index, NewVReg, true));
1518        }
1519        RestoreMBBs.set(MBBId);
1520      }
1521    }
1522
1523    // Update spill weight.
1524    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1525    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1526  }
1527
1528  if (NewVReg && TrySplit && AllCanFold) {
1529    // If all of its def / use can be folded, give it a low spill weight.
1530    LiveInterval &nI = getOrCreateInterval(NewVReg);
1531    nI.weight /= 10.0F;
1532  }
1533}
1534
1535bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1536                        BitVector &RestoreMBBs,
1537                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1538  if (!RestoreMBBs[Id])
1539    return false;
1540  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1541  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1542    if (Restores[i].index == index &&
1543        Restores[i].vreg == vr &&
1544        Restores[i].canFold)
1545      return true;
1546  return false;
1547}
1548
1549void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1550                        BitVector &RestoreMBBs,
1551                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1552  if (!RestoreMBBs[Id])
1553    return;
1554  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1555  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1556    if (Restores[i].index == index && Restores[i].vreg)
1557      Restores[i].index = -1;
1558}
1559
1560/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1561/// spilled and create empty intervals for their uses.
1562void
1563LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1564                                    const TargetRegisterClass* rc,
1565                                    std::vector<LiveInterval*> &NewLIs) {
1566  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1567         re = mri_->reg_end(); ri != re; ) {
1568    MachineOperand &O = ri.getOperand();
1569    MachineInstr *MI = &*ri;
1570    ++ri;
1571    if (O.isDef()) {
1572      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1573             "Register def was not rewritten?");
1574      RemoveMachineInstrFromMaps(MI);
1575      vrm.RemoveMachineInstrFromMaps(MI);
1576      MI->eraseFromParent();
1577    } else {
1578      // This must be an use of an implicit_def so it's not part of the live
1579      // interval. Create a new empty live interval for it.
1580      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1581      unsigned NewVReg = mri_->createVirtualRegister(rc);
1582      vrm.grow();
1583      vrm.setIsImplicitlyDefined(NewVReg);
1584      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1585      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1586        MachineOperand &MO = MI->getOperand(i);
1587        if (MO.isReg() && MO.getReg() == li.reg)
1588          MO.setReg(NewVReg);
1589      }
1590    }
1591  }
1592}
1593
1594
1595std::vector<LiveInterval*> LiveIntervals::
1596addIntervalsForSpills(const LiveInterval &li,
1597                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1598                      float &SSWeight) {
1599  assert(li.weight != HUGE_VALF &&
1600         "attempt to spill already spilled interval!");
1601
1602  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1603  li.print(DOUT, tri_);
1604  DOUT << '\n';
1605
1606  // Spill slot weight.
1607  SSWeight = 0.0f;
1608
1609  // Each bit specify whether it a spill is required in the MBB.
1610  BitVector SpillMBBs(mf_->getNumBlockIDs());
1611  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1612  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1613  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1614  std::map<unsigned,unsigned> MBBVRegsMap;
1615  std::vector<LiveInterval*> NewLIs;
1616  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1617
1618  unsigned NumValNums = li.getNumValNums();
1619  SmallVector<MachineInstr*, 4> ReMatDefs;
1620  ReMatDefs.resize(NumValNums, NULL);
1621  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1622  ReMatOrigDefs.resize(NumValNums, NULL);
1623  SmallVector<int, 4> ReMatIds;
1624  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1625  BitVector ReMatDelete(NumValNums);
1626  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1627
1628  // Spilling a split live interval. It cannot be split any further. Also,
1629  // it's also guaranteed to be a single val# / range interval.
1630  if (vrm.getPreSplitReg(li.reg)) {
1631    vrm.setIsSplitFromReg(li.reg, 0);
1632    // Unset the split kill marker on the last use.
1633    unsigned KillIdx = vrm.getKillPoint(li.reg);
1634    if (KillIdx) {
1635      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1636      assert(KillMI && "Last use disappeared?");
1637      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1638      assert(KillOp != -1 && "Last use disappeared?");
1639      KillMI->getOperand(KillOp).setIsKill(false);
1640    }
1641    vrm.removeKillPoint(li.reg);
1642    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1643    Slot = vrm.getStackSlot(li.reg);
1644    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1645    MachineInstr *ReMatDefMI = DefIsReMat ?
1646      vrm.getReMaterializedMI(li.reg) : NULL;
1647    int LdSlot = 0;
1648    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1649    bool isLoad = isLoadSS ||
1650      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1651    bool IsFirstRange = true;
1652    for (LiveInterval::Ranges::const_iterator
1653           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1654      // If this is a split live interval with multiple ranges, it means there
1655      // are two-address instructions that re-defined the value. Only the
1656      // first def can be rematerialized!
1657      if (IsFirstRange) {
1658        // Note ReMatOrigDefMI has already been deleted.
1659        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1660                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1661                             false, vrm, rc, ReMatIds, loopInfo,
1662                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1663                             MBBVRegsMap, NewLIs, SSWeight);
1664      } else {
1665        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1666                             Slot, 0, false, false, false,
1667                             false, vrm, rc, ReMatIds, loopInfo,
1668                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1669                             MBBVRegsMap, NewLIs, SSWeight);
1670      }
1671      IsFirstRange = false;
1672    }
1673
1674    SSWeight = 0.0f;  // Already accounted for when split.
1675    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1676    return NewLIs;
1677  }
1678
1679  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1680  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1681    TrySplit = false;
1682  if (TrySplit)
1683    ++numSplits;
1684  bool NeedStackSlot = false;
1685  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1686       i != e; ++i) {
1687    const VNInfo *VNI = *i;
1688    unsigned VN = VNI->id;
1689    unsigned DefIdx = VNI->def;
1690    if (DefIdx == ~1U)
1691      continue; // Dead val#.
1692    // Is the def for the val# rematerializable?
1693    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1694      ? 0 : getInstructionFromIndex(DefIdx);
1695    bool dummy;
1696    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1697      // Remember how to remat the def of this val#.
1698      ReMatOrigDefs[VN] = ReMatDefMI;
1699      // Original def may be modified so we have to make a copy here.
1700      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1701      ClonedMIs.push_back(Clone);
1702      ReMatDefs[VN] = Clone;
1703
1704      bool CanDelete = true;
1705      if (VNI->hasPHIKill) {
1706        // A kill is a phi node, not all of its uses can be rematerialized.
1707        // It must not be deleted.
1708        CanDelete = false;
1709        // Need a stack slot if there is any live range where uses cannot be
1710        // rematerialized.
1711        NeedStackSlot = true;
1712      }
1713      if (CanDelete)
1714        ReMatDelete.set(VN);
1715    } else {
1716      // Need a stack slot if there is any live range where uses cannot be
1717      // rematerialized.
1718      NeedStackSlot = true;
1719    }
1720  }
1721
1722  // One stack slot per live interval.
1723  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1724    Slot = vrm.assignVirt2StackSlot(li.reg);
1725
1726  // Create new intervals and rewrite defs and uses.
1727  for (LiveInterval::Ranges::const_iterator
1728         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1729    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1730    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1731    bool DefIsReMat = ReMatDefMI != NULL;
1732    bool CanDelete = ReMatDelete[I->valno->id];
1733    int LdSlot = 0;
1734    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1735    bool isLoad = isLoadSS ||
1736      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1737    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1738                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1739                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1740                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1741                               MBBVRegsMap, NewLIs, SSWeight);
1742  }
1743
1744  // Insert spills / restores if we are splitting.
1745  if (!TrySplit) {
1746    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1747    return NewLIs;
1748  }
1749
1750  SmallPtrSet<LiveInterval*, 4> AddedKill;
1751  SmallVector<unsigned, 2> Ops;
1752  if (NeedStackSlot) {
1753    int Id = SpillMBBs.find_first();
1754    while (Id != -1) {
1755      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1756      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1757      std::vector<SRInfo> &spills = SpillIdxes[Id];
1758      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1759        int index = spills[i].index;
1760        unsigned VReg = spills[i].vreg;
1761        LiveInterval &nI = getOrCreateInterval(VReg);
1762        bool isReMat = vrm.isReMaterialized(VReg);
1763        MachineInstr *MI = getInstructionFromIndex(index);
1764        bool CanFold = false;
1765        bool FoundUse = false;
1766        Ops.clear();
1767        if (spills[i].canFold) {
1768          CanFold = true;
1769          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1770            MachineOperand &MO = MI->getOperand(j);
1771            if (!MO.isRegister() || MO.getReg() != VReg)
1772              continue;
1773
1774            Ops.push_back(j);
1775            if (MO.isDef())
1776              continue;
1777            if (isReMat ||
1778                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1779                                                RestoreMBBs, RestoreIdxes))) {
1780              // MI has two-address uses of the same register. If the use
1781              // isn't the first and only use in the BB, then we can't fold
1782              // it. FIXME: Move this to rewriteInstructionsForSpills.
1783              CanFold = false;
1784              break;
1785            }
1786            FoundUse = true;
1787          }
1788        }
1789        // Fold the store into the def if possible.
1790        bool Folded = false;
1791        if (CanFold && !Ops.empty()) {
1792          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1793            Folded = true;
1794            if (FoundUse > 0) {
1795              // Also folded uses, do not issue a load.
1796              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1797              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1798            }
1799            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1800          }
1801        }
1802
1803        // Otherwise tell the spiller to issue a spill.
1804        if (!Folded) {
1805          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1806          bool isKill = LR->end == getStoreIndex(index);
1807          if (!MI->registerDefIsDead(nI.reg))
1808            // No need to spill a dead def.
1809            vrm.addSpillPoint(VReg, isKill, MI);
1810          if (isKill)
1811            AddedKill.insert(&nI);
1812        }
1813
1814        // Update spill slot weight.
1815        if (!isReMat)
1816          SSWeight += getSpillWeight(true, false, loopDepth);
1817      }
1818      Id = SpillMBBs.find_next(Id);
1819    }
1820  }
1821
1822  int Id = RestoreMBBs.find_first();
1823  while (Id != -1) {
1824    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1825    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1826
1827    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1828    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1829      int index = restores[i].index;
1830      if (index == -1)
1831        continue;
1832      unsigned VReg = restores[i].vreg;
1833      LiveInterval &nI = getOrCreateInterval(VReg);
1834      bool isReMat = vrm.isReMaterialized(VReg);
1835      MachineInstr *MI = getInstructionFromIndex(index);
1836      bool CanFold = false;
1837      Ops.clear();
1838      if (restores[i].canFold) {
1839        CanFold = true;
1840        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1841          MachineOperand &MO = MI->getOperand(j);
1842          if (!MO.isRegister() || MO.getReg() != VReg)
1843            continue;
1844
1845          if (MO.isDef()) {
1846            // If this restore were to be folded, it would have been folded
1847            // already.
1848            CanFold = false;
1849            break;
1850          }
1851          Ops.push_back(j);
1852        }
1853      }
1854
1855      // Fold the load into the use if possible.
1856      bool Folded = false;
1857      if (CanFold && !Ops.empty()) {
1858        if (!isReMat)
1859          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1860        else {
1861          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1862          int LdSlot = 0;
1863          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1864          // If the rematerializable def is a load, also try to fold it.
1865          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1866            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1867                                          Ops, isLoadSS, LdSlot, VReg);
1868          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1869          if (ImpUse) {
1870            // Re-matting an instruction with virtual register use. Add the
1871            // register as an implicit use on the use MI and update the register
1872            // interval's spill weight to HUGE_VALF to prevent it from being
1873            // spilled.
1874            LiveInterval &ImpLi = getInterval(ImpUse);
1875            ImpLi.weight = HUGE_VALF;
1876            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1877          }
1878        }
1879      }
1880      // If folding is not possible / failed, then tell the spiller to issue a
1881      // load / rematerialization for us.
1882      if (Folded)
1883        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1884      else
1885        vrm.addRestorePoint(VReg, MI);
1886
1887      // Update spill slot weight.
1888      if (!isReMat)
1889        SSWeight += getSpillWeight(false, true, loopDepth);
1890    }
1891    Id = RestoreMBBs.find_next(Id);
1892  }
1893
1894  // Finalize intervals: add kills, finalize spill weights, and filter out
1895  // dead intervals.
1896  std::vector<LiveInterval*> RetNewLIs;
1897  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1898    LiveInterval *LI = NewLIs[i];
1899    if (!LI->empty()) {
1900      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
1901      if (!AddedKill.count(LI)) {
1902        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1903        unsigned LastUseIdx = getBaseIndex(LR->end);
1904        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1905        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1906        assert(UseIdx != -1);
1907        if (LastUse->getOperand(UseIdx).isImplicit() ||
1908            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1909          LastUse->getOperand(UseIdx).setIsKill();
1910          vrm.addKillPoint(LI->reg, LastUseIdx);
1911        }
1912      }
1913      RetNewLIs.push_back(LI);
1914    }
1915  }
1916
1917  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1918  return RetNewLIs;
1919}
1920
1921/// hasAllocatableSuperReg - Return true if the specified physical register has
1922/// any super register that's allocatable.
1923bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1924  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1925    if (allocatableRegs_[*AS] && hasInterval(*AS))
1926      return true;
1927  return false;
1928}
1929
1930/// getRepresentativeReg - Find the largest super register of the specified
1931/// physical register.
1932unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1933  // Find the largest super-register that is allocatable.
1934  unsigned BestReg = Reg;
1935  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1936    unsigned SuperReg = *AS;
1937    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1938      BestReg = SuperReg;
1939      break;
1940    }
1941  }
1942  return BestReg;
1943}
1944
1945/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1946/// specified interval that conflicts with the specified physical register.
1947unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1948                                                   unsigned PhysReg) const {
1949  unsigned NumConflicts = 0;
1950  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1951  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1952         E = mri_->reg_end(); I != E; ++I) {
1953    MachineOperand &O = I.getOperand();
1954    MachineInstr *MI = O.getParent();
1955    unsigned Index = getInstructionIndex(MI);
1956    if (pli.liveAt(Index))
1957      ++NumConflicts;
1958  }
1959  return NumConflicts;
1960}
1961
1962/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1963/// around all defs and uses of the specified interval.
1964void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1965                                            unsigned PhysReg, VirtRegMap &vrm) {
1966  unsigned SpillReg = getRepresentativeReg(PhysReg);
1967
1968  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1969    // If there are registers which alias PhysReg, but which are not a
1970    // sub-register of the chosen representative super register. Assert
1971    // since we can't handle it yet.
1972    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1973           tri_->isSuperRegister(*AS, SpillReg));
1974
1975  LiveInterval &pli = getInterval(SpillReg);
1976  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1977  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1978         E = mri_->reg_end(); I != E; ++I) {
1979    MachineOperand &O = I.getOperand();
1980    MachineInstr *MI = O.getParent();
1981    if (SeenMIs.count(MI))
1982      continue;
1983    SeenMIs.insert(MI);
1984    unsigned Index = getInstructionIndex(MI);
1985    if (pli.liveAt(Index)) {
1986      vrm.addEmergencySpill(SpillReg, MI);
1987      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1988      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1989        if (!hasInterval(*AS))
1990          continue;
1991        LiveInterval &spli = getInterval(*AS);
1992        if (spli.liveAt(Index))
1993          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1994      }
1995    }
1996  }
1997}
1998
1999LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2000                                                   MachineInstr* startInst) {
2001  LiveInterval& Interval = getOrCreateInterval(reg);
2002  VNInfo* VN = Interval.getNextValue(
2003            getInstructionIndex(startInst) + InstrSlots::DEF,
2004            startInst, getVNInfoAllocator());
2005  VN->hasPHIKill = true;
2006  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2007  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2008               getMBBEndIdx(startInst->getParent()) + 1, VN);
2009  Interval.addRange(LR);
2010
2011  return LR;
2012}
2013