LiveIntervalAnalysis.cpp revision 712c9f069048e18c1b3782046487f5e1f74b2483
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/ADT/Statistic.h"
36#include "llvm/ADT/STLExtras.h"
37#include <algorithm>
38#include <cmath>
39using namespace llvm;
40
41// Hidden options for help debugging.
42static cl::opt<bool> DisableReMat("disable-rematerialization",
43                                  cl::init(false), cl::Hidden);
44
45static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46                               cl::init(true), cl::Hidden);
47static cl::opt<int> SplitLimit("split-limit",
48                               cl::init(-1), cl::Hidden);
49
50static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
51
52static cl::opt<bool> EnableFastSpilling("fast-spill",
53                                        cl::init(false), cl::Hidden);
54
55STATISTIC(numIntervals, "Number of original intervals");
56STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits   , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.addRequired<AliasAnalysis>();
65  AU.addPreserved<AliasAnalysis>();
66  AU.addPreserved<LiveVariables>();
67  AU.addRequired<LiveVariables>();
68  AU.addPreservedID(MachineLoopInfoID);
69  AU.addPreservedID(MachineDominatorsID);
70  AU.addPreservedID(PHIEliminationID);
71  AU.addRequiredID(PHIEliminationID);
72  AU.addRequiredID(TwoAddressInstructionPassID);
73  MachineFunctionPass::getAnalysisUsage(AU);
74}
75
76void LiveIntervals::releaseMemory() {
77  // Free the live intervals themselves.
78  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79       E = r2iMap_.end(); I != E; ++I)
80    delete I->second;
81
82  MBB2IdxMap.clear();
83  Idx2MBBMap.clear();
84  mi2iMap_.clear();
85  i2miMap_.clear();
86  r2iMap_.clear();
87  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88  VNInfoAllocator.Reset();
89  while (!ClonedMIs.empty()) {
90    MachineInstr *MI = ClonedMIs.back();
91    ClonedMIs.pop_back();
92    mf_->DeleteMachineInstr(MI);
93  }
94}
95
96void LiveIntervals::computeNumbering() {
97  Index2MiMap OldI2MI = i2miMap_;
98  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
99
100  Idx2MBBMap.clear();
101  MBB2IdxMap.clear();
102  mi2iMap_.clear();
103  i2miMap_.clear();
104
105  FunctionSize = 0;
106
107  // Number MachineInstrs and MachineBasicBlocks.
108  // Initialize MBB indexes to a sentinal.
109  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
110
111  unsigned MIIndex = 0;
112  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
113       MBB != E; ++MBB) {
114    unsigned StartIdx = MIIndex;
115
116    // Insert an empty slot at the beginning of each block.
117    MIIndex += InstrSlots::NUM;
118    i2miMap_.push_back(0);
119
120    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
121         I != E; ++I) {
122      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123      assert(inserted && "multiple MachineInstr -> index mappings");
124      i2miMap_.push_back(I);
125      MIIndex += InstrSlots::NUM;
126      FunctionSize++;
127
128      // Insert an empty slot after every instruction.
129      MIIndex += InstrSlots::NUM;
130      i2miMap_.push_back(0);
131    }
132
133    // Set the MBB2IdxMap entry for this MBB.
134    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
136  }
137  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
138
139  if (!OldI2MI.empty())
140    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141      for (LiveInterval::iterator LI = OI->second->begin(),
142           LE = OI->second->end(); LI != LE; ++LI) {
143
144        // Remap the start index of the live range to the corresponding new
145        // number, or our best guess at what it _should_ correspond to if the
146        // original instruction has been erased.  This is either the following
147        // instruction or its predecessor.
148        unsigned index = LI->start / InstrSlots::NUM;
149        unsigned offset = LI->start % InstrSlots::NUM;
150        if (offset == InstrSlots::LOAD) {
151          std::vector<IdxMBBPair>::const_iterator I =
152                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153          // Take the pair containing the index
154          std::vector<IdxMBBPair>::const_iterator J =
155                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
156
157          LI->start = getMBBStartIdx(J->second);
158        } else {
159          LI->start = mi2iMap_[OldI2MI[index]] + offset;
160        }
161
162        // Remap the ending index in the same way that we remapped the start,
163        // except for the final step where we always map to the immediately
164        // following instruction.
165        index = (LI->end - 1) / InstrSlots::NUM;
166        offset  = LI->end % InstrSlots::NUM;
167        if (offset == InstrSlots::LOAD) {
168          // VReg dies at end of block.
169          std::vector<IdxMBBPair>::const_iterator I =
170                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
171          --I;
172
173          LI->end = getMBBEndIdx(I->second) + 1;
174        } else {
175          unsigned idx = index;
176          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
177
178          if (index != OldI2MI.size())
179            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
180          else
181            LI->end = InstrSlots::NUM * i2miMap_.size();
182        }
183      }
184
185      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
187        VNInfo* vni = *VNI;
188
189        // Remap the VNInfo def index, which works the same as the
190        // start indices above. VN's with special sentinel defs
191        // don't need to be remapped.
192        if (vni->def != ~0U && vni->def != ~1U) {
193          unsigned index = vni->def / InstrSlots::NUM;
194          unsigned offset = vni->def % InstrSlots::NUM;
195          if (offset == InstrSlots::LOAD) {
196            std::vector<IdxMBBPair>::const_iterator I =
197                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198            // Take the pair containing the index
199            std::vector<IdxMBBPair>::const_iterator J =
200                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
201
202            vni->def = getMBBStartIdx(J->second);
203          } else {
204            vni->def = mi2iMap_[OldI2MI[index]] + offset;
205          }
206        }
207
208        // Remap the VNInfo kill indices, which works the same as
209        // the end indices above.
210        for (size_t i = 0; i < vni->kills.size(); ++i) {
211          // PHI kills don't need to be remapped.
212          if (!vni->kills[i]) continue;
213
214          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215          unsigned offset = vni->kills[i] % InstrSlots::NUM;
216          if (offset == InstrSlots::STORE) {
217            std::vector<IdxMBBPair>::const_iterator I =
218             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
219            --I;
220
221            vni->kills[i] = getMBBEndIdx(I->second);
222          } else {
223            unsigned idx = index;
224            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
225
226            if (index != OldI2MI.size())
227              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228                              (idx == index ? offset : 0);
229            else
230              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
231          }
232        }
233      }
234    }
235}
236
237/// runOnMachineFunction - Register allocate the whole function
238///
239bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
240  mf_ = &fn;
241  mri_ = &mf_->getRegInfo();
242  tm_ = &fn.getTarget();
243  tri_ = tm_->getRegisterInfo();
244  tii_ = tm_->getInstrInfo();
245  aa_ = &getAnalysis<AliasAnalysis>();
246  lv_ = &getAnalysis<LiveVariables>();
247  allocatableRegs_ = tri_->getAllocatableSet(fn);
248
249  computeNumbering();
250  computeIntervals();
251
252  numIntervals += getNumIntervals();
253
254  DOUT << "********** INTERVALS **********\n";
255  for (iterator I = begin(), E = end(); I != E; ++I) {
256    I->second->print(DOUT, tri_);
257    DOUT << "\n";
258  }
259
260  numIntervalsAfter += getNumIntervals();
261  DEBUG(dump());
262  return true;
263}
264
265/// print - Implement the dump method.
266void LiveIntervals::print(std::ostream &O, const Module* ) const {
267  O << "********** INTERVALS **********\n";
268  for (const_iterator I = begin(), E = end(); I != E; ++I) {
269    I->second->print(O, tri_);
270    O << "\n";
271  }
272
273  O << "********** MACHINEINSTRS **********\n";
274  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275       mbbi != mbbe; ++mbbi) {
276    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277    for (MachineBasicBlock::iterator mii = mbbi->begin(),
278           mie = mbbi->end(); mii != mie; ++mii) {
279      O << getInstructionIndex(mii) << '\t' << *mii;
280    }
281  }
282}
283
284/// conflictsWithPhysRegDef - Returns true if the specified register
285/// is defined during the duration of the specified interval.
286bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287                                            VirtRegMap &vrm, unsigned reg) {
288  for (LiveInterval::Ranges::const_iterator
289         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290    for (unsigned index = getBaseIndex(I->start),
291           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292         index += InstrSlots::NUM) {
293      // skip deleted instructions
294      while (index != end && !getInstructionFromIndex(index))
295        index += InstrSlots::NUM;
296      if (index == end) break;
297
298      MachineInstr *MI = getInstructionFromIndex(index);
299      unsigned SrcReg, DstReg;
300      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301        if (SrcReg == li.reg || DstReg == li.reg)
302          continue;
303      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304        MachineOperand& mop = MI->getOperand(i);
305        if (!mop.isRegister())
306          continue;
307        unsigned PhysReg = mop.getReg();
308        if (PhysReg == 0 || PhysReg == li.reg)
309          continue;
310        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311          if (!vrm.hasPhys(PhysReg))
312            continue;
313          PhysReg = vrm.getPhys(PhysReg);
314        }
315        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
316          return true;
317      }
318    }
319  }
320
321  return false;
322}
323
324void LiveIntervals::printRegName(unsigned reg) const {
325  if (TargetRegisterInfo::isPhysicalRegister(reg))
326    cerr << tri_->getName(reg);
327  else
328    cerr << "%reg" << reg;
329}
330
331void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332                                             MachineBasicBlock::iterator mi,
333                                             unsigned MIIdx, MachineOperand& MO,
334                                             unsigned MOIdx,
335                                             LiveInterval &interval) {
336  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
338
339  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340    DOUT << "is a implicit_def\n";
341    return;
342  }
343
344  // Virtual registers may be defined multiple times (due to phi
345  // elimination and 2-addr elimination).  Much of what we do only has to be
346  // done once for the vreg.  We use an empty interval to detect the first
347  // time we see a vreg.
348  if (interval.empty()) {
349    // Get the Idx of the defining instructions.
350    unsigned defIndex = getDefIndex(MIIdx);
351    VNInfo *ValNo;
352    MachineInstr *CopyMI = NULL;
353    unsigned SrcReg, DstReg;
354    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
355        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
356        tii_->isMoveInstr(*mi, SrcReg, DstReg))
357      CopyMI = mi;
358    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
359
360    assert(ValNo->id == 0 && "First value in interval is not 0?");
361
362    // Loop over all of the blocks that the vreg is defined in.  There are
363    // two cases we have to handle here.  The most common case is a vreg
364    // whose lifetime is contained within a basic block.  In this case there
365    // will be a single kill, in MBB, which comes after the definition.
366    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
367      // FIXME: what about dead vars?
368      unsigned killIdx;
369      if (vi.Kills[0] != mi)
370        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
371      else
372        killIdx = defIndex+1;
373
374      // If the kill happens after the definition, we have an intra-block
375      // live range.
376      if (killIdx > defIndex) {
377        assert(vi.AliveBlocks.none() &&
378               "Shouldn't be alive across any blocks!");
379        LiveRange LR(defIndex, killIdx, ValNo);
380        interval.addRange(LR);
381        DOUT << " +" << LR << "\n";
382        interval.addKill(ValNo, killIdx);
383        return;
384      }
385    }
386
387    // The other case we handle is when a virtual register lives to the end
388    // of the defining block, potentially live across some blocks, then is
389    // live into some number of blocks, but gets killed.  Start by adding a
390    // range that goes from this definition to the end of the defining block.
391    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
392    DOUT << " +" << NewLR;
393    interval.addRange(NewLR);
394
395    // Iterate over all of the blocks that the variable is completely
396    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
397    // live interval.
398    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
399      if (vi.AliveBlocks[i]) {
400        LiveRange LR(getMBBStartIdx(i),
401                     getMBBEndIdx(i)+1,  // MBB ends at -1.
402                     ValNo);
403        interval.addRange(LR);
404        DOUT << " +" << LR;
405      }
406    }
407
408    // Finally, this virtual register is live from the start of any killing
409    // block to the 'use' slot of the killing instruction.
410    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
411      MachineInstr *Kill = vi.Kills[i];
412      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
413      LiveRange LR(getMBBStartIdx(Kill->getParent()),
414                   killIdx, ValNo);
415      interval.addRange(LR);
416      interval.addKill(ValNo, killIdx);
417      DOUT << " +" << LR;
418    }
419
420  } else {
421    // If this is the second time we see a virtual register definition, it
422    // must be due to phi elimination or two addr elimination.  If this is
423    // the result of two address elimination, then the vreg is one of the
424    // def-and-use register operand.
425    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
426      // If this is a two-address definition, then we have already processed
427      // the live range.  The only problem is that we didn't realize there
428      // are actually two values in the live interval.  Because of this we
429      // need to take the LiveRegion that defines this register and split it
430      // into two values.
431      assert(interval.containsOneValue());
432      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
433      unsigned RedefIndex = getDefIndex(MIIdx);
434
435      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
436      VNInfo *OldValNo = OldLR->valno;
437
438      // Delete the initial value, which should be short and continuous,
439      // because the 2-addr copy must be in the same MBB as the redef.
440      interval.removeRange(DefIndex, RedefIndex);
441
442      // Two-address vregs should always only be redefined once.  This means
443      // that at this point, there should be exactly one value number in it.
444      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
445
446      // The new value number (#1) is defined by the instruction we claimed
447      // defined value #0.
448      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
449                                            VNInfoAllocator);
450
451      // Value#0 is now defined by the 2-addr instruction.
452      OldValNo->def  = RedefIndex;
453      OldValNo->copy = 0;
454
455      // Add the new live interval which replaces the range for the input copy.
456      LiveRange LR(DefIndex, RedefIndex, ValNo);
457      DOUT << " replace range with " << LR;
458      interval.addRange(LR);
459      interval.addKill(ValNo, RedefIndex);
460
461      // If this redefinition is dead, we need to add a dummy unit live
462      // range covering the def slot.
463      if (MO.isDead())
464        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
465
466      DOUT << " RESULT: ";
467      interval.print(DOUT, tri_);
468
469    } else {
470      // Otherwise, this must be because of phi elimination.  If this is the
471      // first redefinition of the vreg that we have seen, go back and change
472      // the live range in the PHI block to be a different value number.
473      if (interval.containsOneValue()) {
474        assert(vi.Kills.size() == 1 &&
475               "PHI elimination vreg should have one kill, the PHI itself!");
476
477        // Remove the old range that we now know has an incorrect number.
478        VNInfo *VNI = interval.getValNumInfo(0);
479        MachineInstr *Killer = vi.Kills[0];
480        unsigned Start = getMBBStartIdx(Killer->getParent());
481        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
482        DOUT << " Removing [" << Start << "," << End << "] from: ";
483        interval.print(DOUT, tri_); DOUT << "\n";
484        interval.removeRange(Start, End);
485        VNI->hasPHIKill = true;
486        DOUT << " RESULT: "; interval.print(DOUT, tri_);
487
488        // Replace the interval with one of a NEW value number.  Note that this
489        // value number isn't actually defined by an instruction, weird huh? :)
490        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
491        DOUT << " replace range with " << LR;
492        interval.addRange(LR);
493        interval.addKill(LR.valno, End);
494        DOUT << " RESULT: "; interval.print(DOUT, tri_);
495      }
496
497      // In the case of PHI elimination, each variable definition is only
498      // live until the end of the block.  We've already taken care of the
499      // rest of the live range.
500      unsigned defIndex = getDefIndex(MIIdx);
501
502      VNInfo *ValNo;
503      MachineInstr *CopyMI = NULL;
504      unsigned SrcReg, DstReg;
505      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
506          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
507          tii_->isMoveInstr(*mi, SrcReg, DstReg))
508        CopyMI = mi;
509      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
510
511      unsigned killIndex = getMBBEndIdx(mbb) + 1;
512      LiveRange LR(defIndex, killIndex, ValNo);
513      interval.addRange(LR);
514      interval.addKill(ValNo, killIndex);
515      ValNo->hasPHIKill = true;
516      DOUT << " +" << LR;
517    }
518  }
519
520  DOUT << '\n';
521}
522
523void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
524                                              MachineBasicBlock::iterator mi,
525                                              unsigned MIIdx,
526                                              MachineOperand& MO,
527                                              LiveInterval &interval,
528                                              MachineInstr *CopyMI) {
529  // A physical register cannot be live across basic block, so its
530  // lifetime must end somewhere in its defining basic block.
531  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
532
533  unsigned baseIndex = MIIdx;
534  unsigned start = getDefIndex(baseIndex);
535  unsigned end = start;
536
537  // If it is not used after definition, it is considered dead at
538  // the instruction defining it. Hence its interval is:
539  // [defSlot(def), defSlot(def)+1)
540  if (MO.isDead()) {
541    DOUT << " dead";
542    end = getDefIndex(start) + 1;
543    goto exit;
544  }
545
546  // If it is not dead on definition, it must be killed by a
547  // subsequent instruction. Hence its interval is:
548  // [defSlot(def), useSlot(kill)+1)
549  baseIndex += InstrSlots::NUM;
550  while (++mi != MBB->end()) {
551    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
552           getInstructionFromIndex(baseIndex) == 0)
553      baseIndex += InstrSlots::NUM;
554    if (mi->killsRegister(interval.reg, tri_)) {
555      DOUT << " killed";
556      end = getUseIndex(baseIndex) + 1;
557      goto exit;
558    } else if (mi->modifiesRegister(interval.reg, tri_)) {
559      // Another instruction redefines the register before it is ever read.
560      // Then the register is essentially dead at the instruction that defines
561      // it. Hence its interval is:
562      // [defSlot(def), defSlot(def)+1)
563      DOUT << " dead";
564      end = getDefIndex(start) + 1;
565      goto exit;
566    }
567
568    baseIndex += InstrSlots::NUM;
569  }
570
571  // The only case we should have a dead physreg here without a killing or
572  // instruction where we know it's dead is if it is live-in to the function
573  // and never used.
574  assert(!CopyMI && "physreg was not killed in defining block!");
575  end = getDefIndex(start) + 1;  // It's dead.
576
577exit:
578  assert(start < end && "did not find end of interval?");
579
580  // Already exists? Extend old live interval.
581  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
582  VNInfo *ValNo = (OldLR != interval.end())
583    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
584  LiveRange LR(start, end, ValNo);
585  interval.addRange(LR);
586  interval.addKill(LR.valno, end);
587  DOUT << " +" << LR << '\n';
588}
589
590void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
591                                      MachineBasicBlock::iterator MI,
592                                      unsigned MIIdx,
593                                      MachineOperand& MO,
594                                      unsigned MOIdx) {
595  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
596    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
597                             getOrCreateInterval(MO.getReg()));
598  else if (allocatableRegs_[MO.getReg()]) {
599    MachineInstr *CopyMI = NULL;
600    unsigned SrcReg, DstReg;
601    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
602        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
603        tii_->isMoveInstr(*MI, SrcReg, DstReg))
604      CopyMI = MI;
605    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606                              getOrCreateInterval(MO.getReg()), CopyMI);
607    // Def of a register also defines its sub-registers.
608    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609      // If MI also modifies the sub-register explicitly, avoid processing it
610      // more than once. Do not pass in TRI here so it checks for exact match.
611      if (!MI->modifiesRegister(*AS))
612        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613                                  getOrCreateInterval(*AS), 0);
614  }
615}
616
617void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
618                                         unsigned MIIdx,
619                                         LiveInterval &interval, bool isAlias) {
620  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
621
622  // Look for kills, if it reaches a def before it's killed, then it shouldn't
623  // be considered a livein.
624  MachineBasicBlock::iterator mi = MBB->begin();
625  unsigned baseIndex = MIIdx;
626  unsigned start = baseIndex;
627  unsigned end = start;
628  while (mi != MBB->end()) {
629    if (mi->killsRegister(interval.reg, tri_)) {
630      DOUT << " killed";
631      end = getUseIndex(baseIndex) + 1;
632      goto exit;
633    } else if (mi->modifiesRegister(interval.reg, tri_)) {
634      // Another instruction redefines the register before it is ever read.
635      // Then the register is essentially dead at the instruction that defines
636      // it. Hence its interval is:
637      // [defSlot(def), defSlot(def)+1)
638      DOUT << " dead";
639      end = getDefIndex(start) + 1;
640      goto exit;
641    }
642
643    baseIndex += InstrSlots::NUM;
644    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
645           getInstructionFromIndex(baseIndex) == 0)
646      baseIndex += InstrSlots::NUM;
647    ++mi;
648  }
649
650exit:
651  // Live-in register might not be used at all.
652  if (end == MIIdx) {
653    if (isAlias) {
654      DOUT << " dead";
655      end = getDefIndex(MIIdx) + 1;
656    } else {
657      DOUT << " live through";
658      end = baseIndex;
659    }
660  }
661
662  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
663  interval.addRange(LR);
664  interval.addKill(LR.valno, end);
665  DOUT << " +" << LR << '\n';
666}
667
668/// computeIntervals - computes the live intervals for virtual
669/// registers. for some ordering of the machine instructions [1,N] a
670/// live interval is an interval [i, j) where 1 <= i <= j < N for
671/// which a variable is live
672void LiveIntervals::computeIntervals() {
673  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
674       << "********** Function: "
675       << ((Value*)mf_->getFunction())->getName() << '\n';
676  // Track the index of the current machine instr.
677  unsigned MIIndex = 0;
678
679  // Skip over empty initial indices.
680  while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
681         getInstructionFromIndex(MIIndex) == 0)
682    MIIndex += InstrSlots::NUM;
683
684  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
685       MBBI != E; ++MBBI) {
686    MachineBasicBlock *MBB = MBBI;
687    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
688
689    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
690
691    // Create intervals for live-ins to this BB first.
692    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
693           LE = MBB->livein_end(); LI != LE; ++LI) {
694      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
695      // Multiple live-ins can alias the same register.
696      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
697        if (!hasInterval(*AS))
698          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
699                               true);
700    }
701
702    for (; MI != miEnd; ++MI) {
703      DOUT << MIIndex << "\t" << *MI;
704
705      // Handle defs.
706      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707        MachineOperand &MO = MI->getOperand(i);
708        // handle register defs - build intervals
709        if (MO.isRegister() && MO.getReg() && MO.isDef())
710          handleRegisterDef(MBB, MI, MIIndex, MO, i);
711      }
712
713      MIIndex += InstrSlots::NUM;
714
715      // Skip over empty indices.
716      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
717             getInstructionFromIndex(MIIndex) == 0)
718        MIIndex += InstrSlots::NUM;
719    }
720  }
721}
722
723bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
724                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
725  std::vector<IdxMBBPair>::const_iterator I =
726    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
727
728  bool ResVal = false;
729  while (I != Idx2MBBMap.end()) {
730    if (LR.end <= I->first)
731      break;
732    MBBs.push_back(I->second);
733    ResVal = true;
734    ++I;
735  }
736  return ResVal;
737}
738
739
740LiveInterval* LiveIntervals::createInterval(unsigned reg) {
741  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
742                       HUGE_VALF : 0.0F;
743  return new LiveInterval(reg, Weight);
744}
745
746/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
747/// copy field and returns the source register that defines it.
748unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
749  if (!VNI->copy)
750    return 0;
751
752  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
753    return VNI->copy->getOperand(1).getReg();
754  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
755    return VNI->copy->getOperand(2).getReg();
756  unsigned SrcReg, DstReg;
757  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
758    return SrcReg;
759  assert(0 && "Unrecognized copy instruction!");
760  return 0;
761}
762
763//===----------------------------------------------------------------------===//
764// Register allocator hooks.
765//
766
767/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768/// allow one) virtual register operand, then its uses are implicitly using
769/// the register. Returns the virtual register.
770unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771                                            MachineInstr *MI) const {
772  unsigned RegOp = 0;
773  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774    MachineOperand &MO = MI->getOperand(i);
775    if (!MO.isRegister() || !MO.isUse())
776      continue;
777    unsigned Reg = MO.getReg();
778    if (Reg == 0 || Reg == li.reg)
779      continue;
780    // FIXME: For now, only remat MI with at most one register operand.
781    assert(!RegOp &&
782           "Can't rematerialize instruction with multiple register operand!");
783    RegOp = MO.getReg();
784#ifndef NDEBUG
785    break;
786#endif
787  }
788  return RegOp;
789}
790
791/// isValNoAvailableAt - Return true if the val# of the specified interval
792/// which reaches the given instruction also reaches the specified use index.
793bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794                                       unsigned UseIdx) const {
795  unsigned Index = getInstructionIndex(MI);
796  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798  return UI != li.end() && UI->valno == ValNo;
799}
800
801/// isReMaterializable - Returns true if the definition MI of the specified
802/// val# of the specified interval is re-materializable.
803bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804                                       const VNInfo *ValNo, MachineInstr *MI,
805                                       bool &isLoad) {
806  if (DisableReMat)
807    return false;
808
809  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
810    return true;
811
812  int FrameIdx = 0;
813  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
814      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
815    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
816    // this but remember this is not safe to fold into a two-address
817    // instruction.
818    // This is a load from fixed stack slot. It can be rematerialized.
819    return true;
820
821  // If the target-specific rules don't identify an instruction as
822  // being trivially rematerializable, use some target-independent
823  // rules.
824  if (!MI->getDesc().isRematerializable() ||
825      !tii_->isTriviallyReMaterializable(MI)) {
826    if (!EnableAggressiveRemat)
827      return false;
828
829    // If the instruction accesses memory but the memoperands have been lost,
830    // we can't analyze it.
831    const TargetInstrDesc &TID = MI->getDesc();
832    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
833      return false;
834
835    // Avoid instructions obviously unsafe for remat.
836    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
837      return false;
838
839    // If the instruction accesses memory and the memory could be non-constant,
840    // assume the instruction is not rematerializable.
841    for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
842         E = MI->memoperands_end(); I != E; ++I) {
843      const MachineMemOperand &MMO = *I;
844      if (MMO.isVolatile() || MMO.isStore())
845        return false;
846      const Value *V = MMO.getValue();
847      if (!V)
848        return false;
849      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
850        if (!PSV->isConstant(mf_->getFrameInfo()))
851          return false;
852      } else if (!aa_->pointsToConstantMemory(V))
853        return false;
854    }
855
856    // If any of the registers accessed are non-constant, conservatively assume
857    // the instruction is not rematerializable.
858    unsigned ImpUse = 0;
859    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
860      const MachineOperand &MO = MI->getOperand(i);
861      if (MO.isReg()) {
862        unsigned Reg = MO.getReg();
863        if (Reg == 0)
864          continue;
865        if (TargetRegisterInfo::isPhysicalRegister(Reg))
866          return false;
867
868        // Only allow one def, and that in the first operand.
869        if (MO.isDef() != (i == 0))
870          return false;
871
872        // Only allow constant-valued registers.
873        bool IsLiveIn = mri_->isLiveIn(Reg);
874        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
875                                          E = mri_->def_end();
876
877        // For the def, it should be the only def.
878        if (MO.isDef() && (next(I) != E || IsLiveIn))
879          return false;
880
881        if (MO.isUse()) {
882          // Only allow one use other register use, as that's all the
883          // remat mechanisms support currently.
884          if (Reg != li.reg) {
885            if (ImpUse == 0)
886              ImpUse = Reg;
887            else if (Reg != ImpUse)
888              return false;
889          }
890          // For uses, there should be only one associate def.
891          if (I != E && (next(I) != E || IsLiveIn))
892            return false;
893        }
894      }
895    }
896  }
897
898  unsigned ImpUse = getReMatImplicitUse(li, MI);
899  if (ImpUse) {
900    const LiveInterval &ImpLi = getInterval(ImpUse);
901    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
902           re = mri_->use_end(); ri != re; ++ri) {
903      MachineInstr *UseMI = &*ri;
904      unsigned UseIdx = getInstructionIndex(UseMI);
905      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
906        continue;
907      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
908        return false;
909    }
910  }
911  return true;
912}
913
914/// isReMaterializable - Returns true if every definition of MI of every
915/// val# of the specified interval is re-materializable.
916bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
917  isLoad = false;
918  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
919       i != e; ++i) {
920    const VNInfo *VNI = *i;
921    unsigned DefIdx = VNI->def;
922    if (DefIdx == ~1U)
923      continue; // Dead val#.
924    // Is the def for the val# rematerializable?
925    if (DefIdx == ~0u)
926      return false;
927    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
928    bool DefIsLoad = false;
929    if (!ReMatDefMI ||
930        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
931      return false;
932    isLoad |= DefIsLoad;
933  }
934  return true;
935}
936
937/// FilterFoldedOps - Filter out two-address use operands. Return
938/// true if it finds any issue with the operands that ought to prevent
939/// folding.
940static bool FilterFoldedOps(MachineInstr *MI,
941                            SmallVector<unsigned, 2> &Ops,
942                            unsigned &MRInfo,
943                            SmallVector<unsigned, 2> &FoldOps) {
944  const TargetInstrDesc &TID = MI->getDesc();
945
946  MRInfo = 0;
947  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
948    unsigned OpIdx = Ops[i];
949    MachineOperand &MO = MI->getOperand(OpIdx);
950    // FIXME: fold subreg use.
951    if (MO.getSubReg())
952      return true;
953    if (MO.isDef())
954      MRInfo |= (unsigned)VirtRegMap::isMod;
955    else {
956      // Filter out two-address use operand(s).
957      if (!MO.isImplicit() &&
958          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
959        MRInfo = VirtRegMap::isModRef;
960        continue;
961      }
962      MRInfo |= (unsigned)VirtRegMap::isRef;
963    }
964    FoldOps.push_back(OpIdx);
965  }
966  return false;
967}
968
969
970/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
971/// slot / to reg or any rematerialized load into ith operand of specified
972/// MI. If it is successul, MI is updated with the newly created MI and
973/// returns true.
974bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
975                                         VirtRegMap &vrm, MachineInstr *DefMI,
976                                         unsigned InstrIdx,
977                                         SmallVector<unsigned, 2> &Ops,
978                                         bool isSS, int Slot, unsigned Reg) {
979  // If it is an implicit def instruction, just delete it.
980  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
981    RemoveMachineInstrFromMaps(MI);
982    vrm.RemoveMachineInstrFromMaps(MI);
983    MI->eraseFromParent();
984    ++numFolds;
985    return true;
986  }
987
988  // Filter the list of operand indexes that are to be folded. Abort if
989  // any operand will prevent folding.
990  unsigned MRInfo = 0;
991  SmallVector<unsigned, 2> FoldOps;
992  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
993    return false;
994
995  // The only time it's safe to fold into a two address instruction is when
996  // it's folding reload and spill from / into a spill stack slot.
997  if (DefMI && (MRInfo & VirtRegMap::isMod))
998    return false;
999
1000  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1001                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1002  if (fmi) {
1003    // Remember this instruction uses the spill slot.
1004    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1005
1006    // Attempt to fold the memory reference into the instruction. If
1007    // we can do this, we don't need to insert spill code.
1008    MachineBasicBlock &MBB = *MI->getParent();
1009    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1010      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1011    vrm.transferSpillPts(MI, fmi);
1012    vrm.transferRestorePts(MI, fmi);
1013    vrm.transferEmergencySpills(MI, fmi);
1014    mi2iMap_.erase(MI);
1015    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1016    mi2iMap_[fmi] = InstrIdx;
1017    MI = MBB.insert(MBB.erase(MI), fmi);
1018    ++numFolds;
1019    return true;
1020  }
1021  return false;
1022}
1023
1024/// canFoldMemoryOperand - Returns true if the specified load / store
1025/// folding is possible.
1026bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1027                                         SmallVector<unsigned, 2> &Ops,
1028                                         bool ReMat) const {
1029  // Filter the list of operand indexes that are to be folded. Abort if
1030  // any operand will prevent folding.
1031  unsigned MRInfo = 0;
1032  SmallVector<unsigned, 2> FoldOps;
1033  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1034    return false;
1035
1036  // It's only legal to remat for a use, not a def.
1037  if (ReMat && (MRInfo & VirtRegMap::isMod))
1038    return false;
1039
1040  return tii_->canFoldMemoryOperand(MI, FoldOps);
1041}
1042
1043bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1044  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1045  for (LiveInterval::Ranges::const_iterator
1046         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1047    std::vector<IdxMBBPair>::const_iterator II =
1048      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1049    if (II == Idx2MBBMap.end())
1050      continue;
1051    if (I->end > II->first)  // crossing a MBB.
1052      return false;
1053    MBBs.insert(II->second);
1054    if (MBBs.size() > 1)
1055      return false;
1056  }
1057  return true;
1058}
1059
1060/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1061/// interval on to-be re-materialized operands of MI) with new register.
1062void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1063                                       MachineInstr *MI, unsigned NewVReg,
1064                                       VirtRegMap &vrm) {
1065  // There is an implicit use. That means one of the other operand is
1066  // being remat'ed and the remat'ed instruction has li.reg as an
1067  // use operand. Make sure we rewrite that as well.
1068  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1069    MachineOperand &MO = MI->getOperand(i);
1070    if (!MO.isRegister())
1071      continue;
1072    unsigned Reg = MO.getReg();
1073    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1074      continue;
1075    if (!vrm.isReMaterialized(Reg))
1076      continue;
1077    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1078    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1079    if (UseMO)
1080      UseMO->setReg(NewVReg);
1081  }
1082}
1083
1084/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1085/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1086bool LiveIntervals::
1087rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1088                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1089                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1090                 unsigned Slot, int LdSlot,
1091                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1092                 VirtRegMap &vrm,
1093                 const TargetRegisterClass* rc,
1094                 SmallVector<int, 4> &ReMatIds,
1095                 const MachineLoopInfo *loopInfo,
1096                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1097                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1098                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1099  MachineBasicBlock *MBB = MI->getParent();
1100  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1101  bool CanFold = false;
1102 RestartInstruction:
1103  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1104    MachineOperand& mop = MI->getOperand(i);
1105    if (!mop.isRegister())
1106      continue;
1107    unsigned Reg = mop.getReg();
1108    unsigned RegI = Reg;
1109    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1110      continue;
1111    if (Reg != li.reg)
1112      continue;
1113
1114    bool TryFold = !DefIsReMat;
1115    bool FoldSS = true; // Default behavior unless it's a remat.
1116    int FoldSlot = Slot;
1117    if (DefIsReMat) {
1118      // If this is the rematerializable definition MI itself and
1119      // all of its uses are rematerialized, simply delete it.
1120      if (MI == ReMatOrigDefMI && CanDelete) {
1121        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1122        DOUT << MI << '\n';
1123        RemoveMachineInstrFromMaps(MI);
1124        vrm.RemoveMachineInstrFromMaps(MI);
1125        MI->eraseFromParent();
1126        break;
1127      }
1128
1129      // If def for this use can't be rematerialized, then try folding.
1130      // If def is rematerializable and it's a load, also try folding.
1131      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1132      if (isLoad) {
1133        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1134        FoldSS = isLoadSS;
1135        FoldSlot = LdSlot;
1136      }
1137    }
1138
1139    // Scan all of the operands of this instruction rewriting operands
1140    // to use NewVReg instead of li.reg as appropriate.  We do this for
1141    // two reasons:
1142    //
1143    //   1. If the instr reads the same spilled vreg multiple times, we
1144    //      want to reuse the NewVReg.
1145    //   2. If the instr is a two-addr instruction, we are required to
1146    //      keep the src/dst regs pinned.
1147    //
1148    // Keep track of whether we replace a use and/or def so that we can
1149    // create the spill interval with the appropriate range.
1150
1151    HasUse = mop.isUse();
1152    HasDef = mop.isDef();
1153    SmallVector<unsigned, 2> Ops;
1154    Ops.push_back(i);
1155    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1156      const MachineOperand &MOj = MI->getOperand(j);
1157      if (!MOj.isRegister())
1158        continue;
1159      unsigned RegJ = MOj.getReg();
1160      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1161        continue;
1162      if (RegJ == RegI) {
1163        Ops.push_back(j);
1164        HasUse |= MOj.isUse();
1165        HasDef |= MOj.isDef();
1166      }
1167    }
1168
1169    if (HasUse && !li.liveAt(getUseIndex(index)))
1170      // Must be defined by an implicit def. It should not be spilled. Note,
1171      // this is for correctness reason. e.g.
1172      // 8   %reg1024<def> = IMPLICIT_DEF
1173      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1174      // The live range [12, 14) are not part of the r1024 live interval since
1175      // it's defined by an implicit def. It will not conflicts with live
1176      // interval of r1025. Now suppose both registers are spilled, you can
1177      // easily see a situation where both registers are reloaded before
1178      // the INSERT_SUBREG and both target registers that would overlap.
1179      HasUse = false;
1180
1181    // Update stack slot spill weight if we are splitting.
1182    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1183      if (!TrySplit)
1184      SSWeight += Weight;
1185
1186    if (!TryFold)
1187      CanFold = false;
1188    else {
1189      // Do not fold load / store here if we are splitting. We'll find an
1190      // optimal point to insert a load / store later.
1191      if (!TrySplit) {
1192        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1193                                 Ops, FoldSS, FoldSlot, Reg)) {
1194          // Folding the load/store can completely change the instruction in
1195          // unpredictable ways, rescan it from the beginning.
1196          HasUse = false;
1197          HasDef = false;
1198          CanFold = false;
1199          if (isRemoved(MI)) {
1200            SSWeight -= Weight;
1201            break;
1202          }
1203          goto RestartInstruction;
1204        }
1205      } else {
1206        // We'll try to fold it later if it's profitable.
1207        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1208      }
1209    }
1210
1211    // Create a new virtual register for the spill interval.
1212    bool CreatedNewVReg = false;
1213    if (NewVReg == 0) {
1214      NewVReg = mri_->createVirtualRegister(rc);
1215      vrm.grow();
1216      CreatedNewVReg = true;
1217    }
1218    mop.setReg(NewVReg);
1219    if (mop.isImplicit())
1220      rewriteImplicitOps(li, MI, NewVReg, vrm);
1221
1222    // Reuse NewVReg for other reads.
1223    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1224      MachineOperand &mopj = MI->getOperand(Ops[j]);
1225      mopj.setReg(NewVReg);
1226      if (mopj.isImplicit())
1227        rewriteImplicitOps(li, MI, NewVReg, vrm);
1228    }
1229
1230    if (CreatedNewVReg) {
1231      if (DefIsReMat) {
1232        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1233        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1234          // Each valnum may have its own remat id.
1235          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1236        } else {
1237          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1238        }
1239        if (!CanDelete || (HasUse && HasDef)) {
1240          // If this is a two-addr instruction then its use operands are
1241          // rematerializable but its def is not. It should be assigned a
1242          // stack slot.
1243          vrm.assignVirt2StackSlot(NewVReg, Slot);
1244        }
1245      } else {
1246        vrm.assignVirt2StackSlot(NewVReg, Slot);
1247      }
1248    } else if (HasUse && HasDef &&
1249               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1250      // If this interval hasn't been assigned a stack slot (because earlier
1251      // def is a deleted remat def), do it now.
1252      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1253      vrm.assignVirt2StackSlot(NewVReg, Slot);
1254    }
1255
1256    // Re-matting an instruction with virtual register use. Add the
1257    // register as an implicit use on the use MI.
1258    if (DefIsReMat && ImpUse)
1259      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1260
1261    // create a new register interval for this spill / remat.
1262    LiveInterval &nI = getOrCreateInterval(NewVReg);
1263    if (CreatedNewVReg) {
1264      NewLIs.push_back(&nI);
1265      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1266      if (TrySplit)
1267        vrm.setIsSplitFromReg(NewVReg, li.reg);
1268    }
1269
1270    if (HasUse) {
1271      if (CreatedNewVReg) {
1272        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1273                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1274        DOUT << " +" << LR;
1275        nI.addRange(LR);
1276      } else {
1277        // Extend the split live interval to this def / use.
1278        unsigned End = getUseIndex(index)+1;
1279        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1280                     nI.getValNumInfo(nI.getNumValNums()-1));
1281        DOUT << " +" << LR;
1282        nI.addRange(LR);
1283      }
1284    }
1285    if (HasDef) {
1286      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1287                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1288      DOUT << " +" << LR;
1289      nI.addRange(LR);
1290    }
1291
1292    DOUT << "\t\t\t\tAdded new interval: ";
1293    nI.print(DOUT, tri_);
1294    DOUT << '\n';
1295  }
1296  return CanFold;
1297}
1298bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1299                                   const VNInfo *VNI,
1300                                   MachineBasicBlock *MBB, unsigned Idx) const {
1301  unsigned End = getMBBEndIdx(MBB);
1302  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1303    unsigned KillIdx = VNI->kills[j];
1304    if (KillIdx > Idx && KillIdx < End)
1305      return true;
1306  }
1307  return false;
1308}
1309
1310/// RewriteInfo - Keep track of machine instrs that will be rewritten
1311/// during spilling.
1312namespace {
1313  struct RewriteInfo {
1314    unsigned Index;
1315    MachineInstr *MI;
1316    bool HasUse;
1317    bool HasDef;
1318    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1319      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1320  };
1321
1322  struct RewriteInfoCompare {
1323    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1324      return LHS.Index < RHS.Index;
1325    }
1326  };
1327}
1328
1329void LiveIntervals::
1330rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1331                    LiveInterval::Ranges::const_iterator &I,
1332                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1333                    unsigned Slot, int LdSlot,
1334                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1335                    VirtRegMap &vrm,
1336                    const TargetRegisterClass* rc,
1337                    SmallVector<int, 4> &ReMatIds,
1338                    const MachineLoopInfo *loopInfo,
1339                    BitVector &SpillMBBs,
1340                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1341                    BitVector &RestoreMBBs,
1342                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1343                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1344                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1345  bool AllCanFold = true;
1346  unsigned NewVReg = 0;
1347  unsigned start = getBaseIndex(I->start);
1348  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1349
1350  // First collect all the def / use in this live range that will be rewritten.
1351  // Make sure they are sorted according to instruction index.
1352  std::vector<RewriteInfo> RewriteMIs;
1353  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1354         re = mri_->reg_end(); ri != re; ) {
1355    MachineInstr *MI = &*ri;
1356    MachineOperand &O = ri.getOperand();
1357    ++ri;
1358    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1359    unsigned index = getInstructionIndex(MI);
1360    if (index < start || index >= end)
1361      continue;
1362    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1363      // Must be defined by an implicit def. It should not be spilled. Note,
1364      // this is for correctness reason. e.g.
1365      // 8   %reg1024<def> = IMPLICIT_DEF
1366      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1367      // The live range [12, 14) are not part of the r1024 live interval since
1368      // it's defined by an implicit def. It will not conflicts with live
1369      // interval of r1025. Now suppose both registers are spilled, you can
1370      // easily see a situation where both registers are reloaded before
1371      // the INSERT_SUBREG and both target registers that would overlap.
1372      continue;
1373    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1374  }
1375  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1376
1377  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1378  // Now rewrite the defs and uses.
1379  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1380    RewriteInfo &rwi = RewriteMIs[i];
1381    ++i;
1382    unsigned index = rwi.Index;
1383    bool MIHasUse = rwi.HasUse;
1384    bool MIHasDef = rwi.HasDef;
1385    MachineInstr *MI = rwi.MI;
1386    // If MI def and/or use the same register multiple times, then there
1387    // are multiple entries.
1388    unsigned NumUses = MIHasUse;
1389    while (i != e && RewriteMIs[i].MI == MI) {
1390      assert(RewriteMIs[i].Index == index);
1391      bool isUse = RewriteMIs[i].HasUse;
1392      if (isUse) ++NumUses;
1393      MIHasUse |= isUse;
1394      MIHasDef |= RewriteMIs[i].HasDef;
1395      ++i;
1396    }
1397    MachineBasicBlock *MBB = MI->getParent();
1398
1399    if (ImpUse && MI != ReMatDefMI) {
1400      // Re-matting an instruction with virtual register use. Update the
1401      // register interval's spill weight to HUGE_VALF to prevent it from
1402      // being spilled.
1403      LiveInterval &ImpLi = getInterval(ImpUse);
1404      ImpLi.weight = HUGE_VALF;
1405    }
1406
1407    unsigned MBBId = MBB->getNumber();
1408    unsigned ThisVReg = 0;
1409    if (TrySplit) {
1410      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1411      if (NVI != MBBVRegsMap.end()) {
1412        ThisVReg = NVI->second;
1413        // One common case:
1414        // x = use
1415        // ...
1416        // ...
1417        // def = ...
1418        //     = use
1419        // It's better to start a new interval to avoid artifically
1420        // extend the new interval.
1421        if (MIHasDef && !MIHasUse) {
1422          MBBVRegsMap.erase(MBB->getNumber());
1423          ThisVReg = 0;
1424        }
1425      }
1426    }
1427
1428    bool IsNew = ThisVReg == 0;
1429    if (IsNew) {
1430      // This ends the previous live interval. If all of its def / use
1431      // can be folded, give it a low spill weight.
1432      if (NewVReg && TrySplit && AllCanFold) {
1433        LiveInterval &nI = getOrCreateInterval(NewVReg);
1434        nI.weight /= 10.0F;
1435      }
1436      AllCanFold = true;
1437    }
1438    NewVReg = ThisVReg;
1439
1440    bool HasDef = false;
1441    bool HasUse = false;
1442    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1443                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1444                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1445                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1446                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1447    if (!HasDef && !HasUse)
1448      continue;
1449
1450    AllCanFold &= CanFold;
1451
1452    // Update weight of spill interval.
1453    LiveInterval &nI = getOrCreateInterval(NewVReg);
1454    if (!TrySplit) {
1455      // The spill weight is now infinity as it cannot be spilled again.
1456      nI.weight = HUGE_VALF;
1457      continue;
1458    }
1459
1460    // Keep track of the last def and first use in each MBB.
1461    if (HasDef) {
1462      if (MI != ReMatOrigDefMI || !CanDelete) {
1463        bool HasKill = false;
1464        if (!HasUse)
1465          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1466        else {
1467          // If this is a two-address code, then this index starts a new VNInfo.
1468          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1469          if (VNI)
1470            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1471        }
1472        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1473          SpillIdxes.find(MBBId);
1474        if (!HasKill) {
1475          if (SII == SpillIdxes.end()) {
1476            std::vector<SRInfo> S;
1477            S.push_back(SRInfo(index, NewVReg, true));
1478            SpillIdxes.insert(std::make_pair(MBBId, S));
1479          } else if (SII->second.back().vreg != NewVReg) {
1480            SII->second.push_back(SRInfo(index, NewVReg, true));
1481          } else if ((int)index > SII->second.back().index) {
1482            // If there is an earlier def and this is a two-address
1483            // instruction, then it's not possible to fold the store (which
1484            // would also fold the load).
1485            SRInfo &Info = SII->second.back();
1486            Info.index = index;
1487            Info.canFold = !HasUse;
1488          }
1489          SpillMBBs.set(MBBId);
1490        } else if (SII != SpillIdxes.end() &&
1491                   SII->second.back().vreg == NewVReg &&
1492                   (int)index > SII->second.back().index) {
1493          // There is an earlier def that's not killed (must be two-address).
1494          // The spill is no longer needed.
1495          SII->second.pop_back();
1496          if (SII->second.empty()) {
1497            SpillIdxes.erase(MBBId);
1498            SpillMBBs.reset(MBBId);
1499          }
1500        }
1501      }
1502    }
1503
1504    if (HasUse) {
1505      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1506        SpillIdxes.find(MBBId);
1507      if (SII != SpillIdxes.end() &&
1508          SII->second.back().vreg == NewVReg &&
1509          (int)index > SII->second.back().index)
1510        // Use(s) following the last def, it's not safe to fold the spill.
1511        SII->second.back().canFold = false;
1512      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1513        RestoreIdxes.find(MBBId);
1514      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1515        // If we are splitting live intervals, only fold if it's the first
1516        // use and there isn't another use later in the MBB.
1517        RII->second.back().canFold = false;
1518      else if (IsNew) {
1519        // Only need a reload if there isn't an earlier def / use.
1520        if (RII == RestoreIdxes.end()) {
1521          std::vector<SRInfo> Infos;
1522          Infos.push_back(SRInfo(index, NewVReg, true));
1523          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1524        } else {
1525          RII->second.push_back(SRInfo(index, NewVReg, true));
1526        }
1527        RestoreMBBs.set(MBBId);
1528      }
1529    }
1530
1531    // Update spill weight.
1532    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1533    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1534  }
1535
1536  if (NewVReg && TrySplit && AllCanFold) {
1537    // If all of its def / use can be folded, give it a low spill weight.
1538    LiveInterval &nI = getOrCreateInterval(NewVReg);
1539    nI.weight /= 10.0F;
1540  }
1541}
1542
1543bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1544                        BitVector &RestoreMBBs,
1545                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1546  if (!RestoreMBBs[Id])
1547    return false;
1548  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1549  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1550    if (Restores[i].index == index &&
1551        Restores[i].vreg == vr &&
1552        Restores[i].canFold)
1553      return true;
1554  return false;
1555}
1556
1557void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1558                        BitVector &RestoreMBBs,
1559                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1560  if (!RestoreMBBs[Id])
1561    return;
1562  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1563  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1564    if (Restores[i].index == index && Restores[i].vreg)
1565      Restores[i].index = -1;
1566}
1567
1568/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1569/// spilled and create empty intervals for their uses.
1570void
1571LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1572                                    const TargetRegisterClass* rc,
1573                                    std::vector<LiveInterval*> &NewLIs) {
1574  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1575         re = mri_->reg_end(); ri != re; ) {
1576    MachineOperand &O = ri.getOperand();
1577    MachineInstr *MI = &*ri;
1578    ++ri;
1579    if (O.isDef()) {
1580      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1581             "Register def was not rewritten?");
1582      RemoveMachineInstrFromMaps(MI);
1583      vrm.RemoveMachineInstrFromMaps(MI);
1584      MI->eraseFromParent();
1585    } else {
1586      // This must be an use of an implicit_def so it's not part of the live
1587      // interval. Create a new empty live interval for it.
1588      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1589      unsigned NewVReg = mri_->createVirtualRegister(rc);
1590      vrm.grow();
1591      vrm.setIsImplicitlyDefined(NewVReg);
1592      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1593      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1594        MachineOperand &MO = MI->getOperand(i);
1595        if (MO.isReg() && MO.getReg() == li.reg)
1596          MO.setReg(NewVReg);
1597      }
1598    }
1599  }
1600}
1601
1602namespace {
1603  struct LISorter {
1604    bool operator()(LiveInterval* A, LiveInterval* B) {
1605      return A->beginNumber() < B->beginNumber();
1606    }
1607  };
1608}
1609
1610std::vector<LiveInterval*> LiveIntervals::
1611addIntervalsForSpillsFast(const LiveInterval &li,
1612                          const MachineLoopInfo *loopInfo,
1613                          VirtRegMap &vrm, float& SSWeight) {
1614  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1615
1616  std::vector<LiveInterval*> added;
1617
1618  assert(li.weight != HUGE_VALF &&
1619         "attempt to spill already spilled interval!");
1620
1621  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1622  DEBUG(li.dump());
1623  DOUT << '\n';
1624
1625  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1626
1627  DenseMap<MachineInstr*, unsigned> VRegMap;
1628  DenseMap<MachineInstr*, VNInfo*> VNMap;
1629
1630  SSWeight = 0.0f;
1631
1632  for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg),
1633       RE = mri_->reg_end(); RI != RE; ) {
1634    // Create a new virtual register for the spill interval.
1635    MachineOperand& MO = RI.getOperand();
1636    unsigned NewVReg = 0;
1637    bool newInt = false;
1638    if (!VRegMap.count(MO.getParent())) {
1639      VRegMap[MO.getParent()] = NewVReg = mri_->createVirtualRegister(rc);
1640      vrm.grow();
1641      vrm.assignVirt2StackSlot(NewVReg, slot);
1642
1643      newInt = true;
1644    } else
1645      NewVReg = VRegMap[MO.getParent()];
1646
1647    // Increment iterator to avoid invalidation.
1648    ++RI;
1649
1650    MO.setReg(NewVReg);
1651
1652    // create a new register for this spill
1653    LiveInterval &nI = getOrCreateInterval(NewVReg);
1654
1655    // the spill weight is now infinity as it
1656    // cannot be spilled again
1657    nI.weight = HUGE_VALF;
1658
1659    unsigned index = getInstructionIndex(MO.getParent());
1660    bool HasUse = MO.isUse();
1661    bool HasDef = MO.isDef();
1662    if (!VNMap.count(MO.getParent()))
1663      VNMap[MO.getParent()] = nI.getNextValue(~0U, 0, getVNInfoAllocator());
1664    if (HasUse) {
1665      LiveRange LR(getLoadIndex(index), getUseIndex(index),
1666                   VNMap[MO.getParent()]);
1667      DOUT << " +" << LR;
1668      nI.addRange(LR);
1669      vrm.addRestorePoint(NewVReg, MO.getParent());
1670      MO.setIsKill(true);
1671    }
1672    if (HasDef) {
1673      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1674                   VNMap[MO.getParent()]);
1675      DOUT << " +" << LR;
1676      nI.addRange(LR);
1677      vrm.addSpillPoint(NewVReg, true, MO.getParent());
1678    }
1679
1680    if (newInt)
1681      added.push_back(&nI);
1682
1683    DOUT << "\t\t\t\tadded new interval: ";
1684    DEBUG(nI.dump());
1685    DOUT << '\n';
1686
1687    unsigned loopDepth = loopInfo->getLoopDepth(MO.getParent()->getParent());
1688    if (HasUse) {
1689      if (HasDef)
1690        SSWeight += getSpillWeight(true, true, loopDepth);
1691      else
1692        SSWeight += getSpillWeight(false, true, loopDepth);
1693    } else
1694      SSWeight += getSpillWeight(true, false, loopDepth);
1695
1696  }
1697
1698  std::sort(added.begin(), added.end(), LISorter());
1699
1700  return added;
1701}
1702
1703std::vector<LiveInterval*> LiveIntervals::
1704addIntervalsForSpills(const LiveInterval &li,
1705                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1706                      float &SSWeight) {
1707
1708  if (EnableFastSpilling)
1709    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1710
1711  assert(li.weight != HUGE_VALF &&
1712         "attempt to spill already spilled interval!");
1713
1714  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1715  li.print(DOUT, tri_);
1716  DOUT << '\n';
1717
1718  // Spill slot weight.
1719  SSWeight = 0.0f;
1720
1721  // Each bit specify whether it a spill is required in the MBB.
1722  BitVector SpillMBBs(mf_->getNumBlockIDs());
1723  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1724  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1725  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1726  DenseMap<unsigned,unsigned> MBBVRegsMap;
1727  std::vector<LiveInterval*> NewLIs;
1728  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1729
1730  unsigned NumValNums = li.getNumValNums();
1731  SmallVector<MachineInstr*, 4> ReMatDefs;
1732  ReMatDefs.resize(NumValNums, NULL);
1733  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1734  ReMatOrigDefs.resize(NumValNums, NULL);
1735  SmallVector<int, 4> ReMatIds;
1736  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1737  BitVector ReMatDelete(NumValNums);
1738  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1739
1740  // Spilling a split live interval. It cannot be split any further. Also,
1741  // it's also guaranteed to be a single val# / range interval.
1742  if (vrm.getPreSplitReg(li.reg)) {
1743    vrm.setIsSplitFromReg(li.reg, 0);
1744    // Unset the split kill marker on the last use.
1745    unsigned KillIdx = vrm.getKillPoint(li.reg);
1746    if (KillIdx) {
1747      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1748      assert(KillMI && "Last use disappeared?");
1749      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1750      assert(KillOp != -1 && "Last use disappeared?");
1751      KillMI->getOperand(KillOp).setIsKill(false);
1752    }
1753    vrm.removeKillPoint(li.reg);
1754    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1755    Slot = vrm.getStackSlot(li.reg);
1756    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1757    MachineInstr *ReMatDefMI = DefIsReMat ?
1758      vrm.getReMaterializedMI(li.reg) : NULL;
1759    int LdSlot = 0;
1760    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1761    bool isLoad = isLoadSS ||
1762      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1763    bool IsFirstRange = true;
1764    for (LiveInterval::Ranges::const_iterator
1765           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1766      // If this is a split live interval with multiple ranges, it means there
1767      // are two-address instructions that re-defined the value. Only the
1768      // first def can be rematerialized!
1769      if (IsFirstRange) {
1770        // Note ReMatOrigDefMI has already been deleted.
1771        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1772                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1773                             false, vrm, rc, ReMatIds, loopInfo,
1774                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1775                             MBBVRegsMap, NewLIs, SSWeight);
1776      } else {
1777        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1778                             Slot, 0, false, false, false,
1779                             false, vrm, rc, ReMatIds, loopInfo,
1780                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1781                             MBBVRegsMap, NewLIs, SSWeight);
1782      }
1783      IsFirstRange = false;
1784    }
1785
1786    SSWeight = 0.0f;  // Already accounted for when split.
1787    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1788    return NewLIs;
1789  }
1790
1791  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1792  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1793    TrySplit = false;
1794  if (TrySplit)
1795    ++numSplits;
1796  bool NeedStackSlot = false;
1797  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1798       i != e; ++i) {
1799    const VNInfo *VNI = *i;
1800    unsigned VN = VNI->id;
1801    unsigned DefIdx = VNI->def;
1802    if (DefIdx == ~1U)
1803      continue; // Dead val#.
1804    // Is the def for the val# rematerializable?
1805    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1806      ? 0 : getInstructionFromIndex(DefIdx);
1807    bool dummy;
1808    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1809      // Remember how to remat the def of this val#.
1810      ReMatOrigDefs[VN] = ReMatDefMI;
1811      // Original def may be modified so we have to make a copy here.
1812      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1813      ClonedMIs.push_back(Clone);
1814      ReMatDefs[VN] = Clone;
1815
1816      bool CanDelete = true;
1817      if (VNI->hasPHIKill) {
1818        // A kill is a phi node, not all of its uses can be rematerialized.
1819        // It must not be deleted.
1820        CanDelete = false;
1821        // Need a stack slot if there is any live range where uses cannot be
1822        // rematerialized.
1823        NeedStackSlot = true;
1824      }
1825      if (CanDelete)
1826        ReMatDelete.set(VN);
1827    } else {
1828      // Need a stack slot if there is any live range where uses cannot be
1829      // rematerialized.
1830      NeedStackSlot = true;
1831    }
1832  }
1833
1834  // One stack slot per live interval.
1835  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1836    Slot = vrm.assignVirt2StackSlot(li.reg);
1837
1838  // Create new intervals and rewrite defs and uses.
1839  for (LiveInterval::Ranges::const_iterator
1840         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1841    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1842    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1843    bool DefIsReMat = ReMatDefMI != NULL;
1844    bool CanDelete = ReMatDelete[I->valno->id];
1845    int LdSlot = 0;
1846    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1847    bool isLoad = isLoadSS ||
1848      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1849    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1850                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1851                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1852                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1853                               MBBVRegsMap, NewLIs, SSWeight);
1854  }
1855
1856  // Insert spills / restores if we are splitting.
1857  if (!TrySplit) {
1858    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1859    return NewLIs;
1860  }
1861
1862  SmallPtrSet<LiveInterval*, 4> AddedKill;
1863  SmallVector<unsigned, 2> Ops;
1864  if (NeedStackSlot) {
1865    int Id = SpillMBBs.find_first();
1866    while (Id != -1) {
1867      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1868      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1869      std::vector<SRInfo> &spills = SpillIdxes[Id];
1870      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1871        int index = spills[i].index;
1872        unsigned VReg = spills[i].vreg;
1873        LiveInterval &nI = getOrCreateInterval(VReg);
1874        bool isReMat = vrm.isReMaterialized(VReg);
1875        MachineInstr *MI = getInstructionFromIndex(index);
1876        bool CanFold = false;
1877        bool FoundUse = false;
1878        Ops.clear();
1879        if (spills[i].canFold) {
1880          CanFold = true;
1881          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1882            MachineOperand &MO = MI->getOperand(j);
1883            if (!MO.isRegister() || MO.getReg() != VReg)
1884              continue;
1885
1886            Ops.push_back(j);
1887            if (MO.isDef())
1888              continue;
1889            if (isReMat ||
1890                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1891                                                RestoreMBBs, RestoreIdxes))) {
1892              // MI has two-address uses of the same register. If the use
1893              // isn't the first and only use in the BB, then we can't fold
1894              // it. FIXME: Move this to rewriteInstructionsForSpills.
1895              CanFold = false;
1896              break;
1897            }
1898            FoundUse = true;
1899          }
1900        }
1901        // Fold the store into the def if possible.
1902        bool Folded = false;
1903        if (CanFold && !Ops.empty()) {
1904          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1905            Folded = true;
1906            if (FoundUse > 0) {
1907              // Also folded uses, do not issue a load.
1908              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1909              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1910            }
1911            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1912          }
1913        }
1914
1915        // Otherwise tell the spiller to issue a spill.
1916        if (!Folded) {
1917          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1918          bool isKill = LR->end == getStoreIndex(index);
1919          if (!MI->registerDefIsDead(nI.reg))
1920            // No need to spill a dead def.
1921            vrm.addSpillPoint(VReg, isKill, MI);
1922          if (isKill)
1923            AddedKill.insert(&nI);
1924        }
1925
1926        // Update spill slot weight.
1927        if (!isReMat)
1928          SSWeight += getSpillWeight(true, false, loopDepth);
1929      }
1930      Id = SpillMBBs.find_next(Id);
1931    }
1932  }
1933
1934  int Id = RestoreMBBs.find_first();
1935  while (Id != -1) {
1936    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1937    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1938
1939    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1940    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1941      int index = restores[i].index;
1942      if (index == -1)
1943        continue;
1944      unsigned VReg = restores[i].vreg;
1945      LiveInterval &nI = getOrCreateInterval(VReg);
1946      bool isReMat = vrm.isReMaterialized(VReg);
1947      MachineInstr *MI = getInstructionFromIndex(index);
1948      bool CanFold = false;
1949      Ops.clear();
1950      if (restores[i].canFold) {
1951        CanFold = true;
1952        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1953          MachineOperand &MO = MI->getOperand(j);
1954          if (!MO.isRegister() || MO.getReg() != VReg)
1955            continue;
1956
1957          if (MO.isDef()) {
1958            // If this restore were to be folded, it would have been folded
1959            // already.
1960            CanFold = false;
1961            break;
1962          }
1963          Ops.push_back(j);
1964        }
1965      }
1966
1967      // Fold the load into the use if possible.
1968      bool Folded = false;
1969      if (CanFold && !Ops.empty()) {
1970        if (!isReMat)
1971          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1972        else {
1973          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1974          int LdSlot = 0;
1975          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1976          // If the rematerializable def is a load, also try to fold it.
1977          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1978            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1979                                          Ops, isLoadSS, LdSlot, VReg);
1980          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1981          if (ImpUse) {
1982            // Re-matting an instruction with virtual register use. Add the
1983            // register as an implicit use on the use MI and update the register
1984            // interval's spill weight to HUGE_VALF to prevent it from being
1985            // spilled.
1986            LiveInterval &ImpLi = getInterval(ImpUse);
1987            ImpLi.weight = HUGE_VALF;
1988            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1989          }
1990        }
1991      }
1992      // If folding is not possible / failed, then tell the spiller to issue a
1993      // load / rematerialization for us.
1994      if (Folded)
1995        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1996      else
1997        vrm.addRestorePoint(VReg, MI);
1998
1999      // Update spill slot weight.
2000      if (!isReMat)
2001        SSWeight += getSpillWeight(false, true, loopDepth);
2002    }
2003    Id = RestoreMBBs.find_next(Id);
2004  }
2005
2006  // Finalize intervals: add kills, finalize spill weights, and filter out
2007  // dead intervals.
2008  std::vector<LiveInterval*> RetNewLIs;
2009  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2010    LiveInterval *LI = NewLIs[i];
2011    if (!LI->empty()) {
2012      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2013      if (!AddedKill.count(LI)) {
2014        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2015        unsigned LastUseIdx = getBaseIndex(LR->end);
2016        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2017        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2018        assert(UseIdx != -1);
2019        if (LastUse->getOperand(UseIdx).isImplicit() ||
2020            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2021          LastUse->getOperand(UseIdx).setIsKill();
2022          vrm.addKillPoint(LI->reg, LastUseIdx);
2023        }
2024      }
2025      RetNewLIs.push_back(LI);
2026    }
2027  }
2028
2029  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2030  return RetNewLIs;
2031}
2032
2033/// hasAllocatableSuperReg - Return true if the specified physical register has
2034/// any super register that's allocatable.
2035bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2036  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2037    if (allocatableRegs_[*AS] && hasInterval(*AS))
2038      return true;
2039  return false;
2040}
2041
2042/// getRepresentativeReg - Find the largest super register of the specified
2043/// physical register.
2044unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2045  // Find the largest super-register that is allocatable.
2046  unsigned BestReg = Reg;
2047  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2048    unsigned SuperReg = *AS;
2049    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2050      BestReg = SuperReg;
2051      break;
2052    }
2053  }
2054  return BestReg;
2055}
2056
2057/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2058/// specified interval that conflicts with the specified physical register.
2059unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2060                                                   unsigned PhysReg) const {
2061  unsigned NumConflicts = 0;
2062  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2063  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2064         E = mri_->reg_end(); I != E; ++I) {
2065    MachineOperand &O = I.getOperand();
2066    MachineInstr *MI = O.getParent();
2067    unsigned Index = getInstructionIndex(MI);
2068    if (pli.liveAt(Index))
2069      ++NumConflicts;
2070  }
2071  return NumConflicts;
2072}
2073
2074/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2075/// around all defs and uses of the specified interval.
2076void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2077                                            unsigned PhysReg, VirtRegMap &vrm) {
2078  unsigned SpillReg = getRepresentativeReg(PhysReg);
2079
2080  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2081    // If there are registers which alias PhysReg, but which are not a
2082    // sub-register of the chosen representative super register. Assert
2083    // since we can't handle it yet.
2084    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2085           tri_->isSuperRegister(*AS, SpillReg));
2086
2087  LiveInterval &pli = getInterval(SpillReg);
2088  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2089  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2090         E = mri_->reg_end(); I != E; ++I) {
2091    MachineOperand &O = I.getOperand();
2092    MachineInstr *MI = O.getParent();
2093    if (SeenMIs.count(MI))
2094      continue;
2095    SeenMIs.insert(MI);
2096    unsigned Index = getInstructionIndex(MI);
2097    if (pli.liveAt(Index)) {
2098      vrm.addEmergencySpill(SpillReg, MI);
2099      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2100      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2101        if (!hasInterval(*AS))
2102          continue;
2103        LiveInterval &spli = getInterval(*AS);
2104        if (spli.liveAt(Index))
2105          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2106      }
2107    }
2108  }
2109}
2110
2111LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2112                                                   MachineInstr* startInst) {
2113  LiveInterval& Interval = getOrCreateInterval(reg);
2114  VNInfo* VN = Interval.getNextValue(
2115            getInstructionIndex(startInst) + InstrSlots::DEF,
2116            startInst, getVNInfoAllocator());
2117  VN->hasPHIKill = true;
2118  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2119  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2120               getMBBEndIdx(startInst->getParent()) + 1, VN);
2121  Interval.addRange(LR);
2122
2123  return LR;
2124}
2125