LiveIntervalAnalysis.cpp revision 745825f431920662e97bdab5c1bcfac62e48c52f
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39// Hidden options for help debugging.
40static cl::opt<bool> DisableReMat("disable-rematerialization",
41                                  cl::init(false), cl::Hidden);
42
43static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44                               cl::init(true), cl::Hidden);
45static cl::opt<int> SplitLimit("split-limit",
46                               cl::init(-1), cl::Hidden);
47
48STATISTIC(numIntervals, "Number of original intervals");
49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
51STATISTIC(numSplits   , "Number of intervals split");
52
53char LiveIntervals::ID = 0;
54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
55
56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57  AU.addPreserved<LiveVariables>();
58  AU.addRequired<LiveVariables>();
59  AU.addPreservedID(MachineLoopInfoID);
60  AU.addPreservedID(MachineDominatorsID);
61  AU.addPreservedID(PHIEliminationID);
62  AU.addRequiredID(PHIEliminationID);
63  AU.addRequiredID(TwoAddressInstructionPassID);
64  MachineFunctionPass::getAnalysisUsage(AU);
65}
66
67void LiveIntervals::releaseMemory() {
68  Idx2MBBMap.clear();
69  mi2iMap_.clear();
70  i2miMap_.clear();
71  r2iMap_.clear();
72  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
73  VNInfoAllocator.Reset();
74  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
75    delete ClonedMIs[i];
76}
77
78void LiveIntervals::computeNumbering() {
79  Index2MiMap OldI2MI = i2miMap_;
80
81  Idx2MBBMap.clear();
82  MBB2IdxMap.clear();
83  mi2iMap_.clear();
84  i2miMap_.clear();
85
86  // Number MachineInstrs and MachineBasicBlocks.
87  // Initialize MBB indexes to a sentinal.
88  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
89
90  unsigned MIIndex = 0;
91  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
92       MBB != E; ++MBB) {
93    unsigned StartIdx = MIIndex;
94
95    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
96         I != E; ++I) {
97      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
98      assert(inserted && "multiple MachineInstr -> index mappings");
99      i2miMap_.push_back(I);
100      MIIndex += InstrSlots::NUM;
101    }
102
103    // Set the MBB2IdxMap entry for this MBB.
104    MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex)
105      ? std::make_pair(StartIdx, StartIdx)  // Empty MBB
106      : std::make_pair(StartIdx, MIIndex - 1);
107    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
108  }
109  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
110
111  if (!OldI2MI.empty())
112    for (iterator I = begin(), E = end(); I != E; ++I)
113      for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
114           LI != LE; ++LI) {
115        LI->start = mi2iMap_[OldI2MI[LI->start]];
116        LI->end = mi2iMap_[OldI2MI[LI->end]];
117
118        VNInfo* vni = LI->valno;
119        vni->def = mi2iMap_[OldI2MI[vni->def]];
120
121        for (size_t i = 0; i < vni->kills.size(); ++i)
122          vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i]]];
123      }
124}
125
126/// runOnMachineFunction - Register allocate the whole function
127///
128bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
129  mf_ = &fn;
130  mri_ = &mf_->getRegInfo();
131  tm_ = &fn.getTarget();
132  tri_ = tm_->getRegisterInfo();
133  tii_ = tm_->getInstrInfo();
134  lv_ = &getAnalysis<LiveVariables>();
135  allocatableRegs_ = tri_->getAllocatableSet(fn);
136
137  computeNumbering();
138  computeIntervals();
139
140  numIntervals += getNumIntervals();
141
142  DOUT << "********** INTERVALS **********\n";
143  for (iterator I = begin(), E = end(); I != E; ++I) {
144    I->second.print(DOUT, tri_);
145    DOUT << "\n";
146  }
147
148  numIntervalsAfter += getNumIntervals();
149  DEBUG(dump());
150  return true;
151}
152
153/// print - Implement the dump method.
154void LiveIntervals::print(std::ostream &O, const Module* ) const {
155  O << "********** INTERVALS **********\n";
156  for (const_iterator I = begin(), E = end(); I != E; ++I) {
157    I->second.print(DOUT, tri_);
158    DOUT << "\n";
159  }
160
161  O << "********** MACHINEINSTRS **********\n";
162  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
163       mbbi != mbbe; ++mbbi) {
164    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
165    for (MachineBasicBlock::iterator mii = mbbi->begin(),
166           mie = mbbi->end(); mii != mie; ++mii) {
167      O << getInstructionIndex(mii) << '\t' << *mii;
168    }
169  }
170}
171
172/// conflictsWithPhysRegDef - Returns true if the specified register
173/// is defined during the duration of the specified interval.
174bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
175                                            VirtRegMap &vrm, unsigned reg) {
176  for (LiveInterval::Ranges::const_iterator
177         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
178    for (unsigned index = getBaseIndex(I->start),
179           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
180         index += InstrSlots::NUM) {
181      // skip deleted instructions
182      while (index != end && !getInstructionFromIndex(index))
183        index += InstrSlots::NUM;
184      if (index == end) break;
185
186      MachineInstr *MI = getInstructionFromIndex(index);
187      unsigned SrcReg, DstReg;
188      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
189        if (SrcReg == li.reg || DstReg == li.reg)
190          continue;
191      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
192        MachineOperand& mop = MI->getOperand(i);
193        if (!mop.isRegister())
194          continue;
195        unsigned PhysReg = mop.getReg();
196        if (PhysReg == 0 || PhysReg == li.reg)
197          continue;
198        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
199          if (!vrm.hasPhys(PhysReg))
200            continue;
201          PhysReg = vrm.getPhys(PhysReg);
202        }
203        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
204          return true;
205      }
206    }
207  }
208
209  return false;
210}
211
212void LiveIntervals::printRegName(unsigned reg) const {
213  if (TargetRegisterInfo::isPhysicalRegister(reg))
214    cerr << tri_->getName(reg);
215  else
216    cerr << "%reg" << reg;
217}
218
219void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
220                                             MachineBasicBlock::iterator mi,
221                                             unsigned MIIdx,
222                                             LiveInterval &interval) {
223  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
224  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
225
226  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
227    DOUT << "is a implicit_def\n";
228    return;
229  }
230
231  // Virtual registers may be defined multiple times (due to phi
232  // elimination and 2-addr elimination).  Much of what we do only has to be
233  // done once for the vreg.  We use an empty interval to detect the first
234  // time we see a vreg.
235  if (interval.empty()) {
236    // Get the Idx of the defining instructions.
237    unsigned defIndex = getDefIndex(MIIdx);
238    VNInfo *ValNo;
239    MachineInstr *CopyMI = NULL;
240    unsigned SrcReg, DstReg;
241    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
242        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
243        tii_->isMoveInstr(*mi, SrcReg, DstReg))
244      CopyMI = mi;
245    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
246
247    assert(ValNo->id == 0 && "First value in interval is not 0?");
248
249    // Loop over all of the blocks that the vreg is defined in.  There are
250    // two cases we have to handle here.  The most common case is a vreg
251    // whose lifetime is contained within a basic block.  In this case there
252    // will be a single kill, in MBB, which comes after the definition.
253    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
254      // FIXME: what about dead vars?
255      unsigned killIdx;
256      if (vi.Kills[0] != mi)
257        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
258      else
259        killIdx = defIndex+1;
260
261      // If the kill happens after the definition, we have an intra-block
262      // live range.
263      if (killIdx > defIndex) {
264        assert(vi.AliveBlocks.none() &&
265               "Shouldn't be alive across any blocks!");
266        LiveRange LR(defIndex, killIdx, ValNo);
267        interval.addRange(LR);
268        DOUT << " +" << LR << "\n";
269        interval.addKill(ValNo, killIdx);
270        return;
271      }
272    }
273
274    // The other case we handle is when a virtual register lives to the end
275    // of the defining block, potentially live across some blocks, then is
276    // live into some number of blocks, but gets killed.  Start by adding a
277    // range that goes from this definition to the end of the defining block.
278    LiveRange NewLR(defIndex,
279                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
280                    ValNo);
281    DOUT << " +" << NewLR;
282    interval.addRange(NewLR);
283
284    // Iterate over all of the blocks that the variable is completely
285    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
286    // live interval.
287    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
288      if (vi.AliveBlocks[i]) {
289        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
290        if (!MBB->empty()) {
291          LiveRange LR(getMBBStartIdx(i),
292                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
293                       ValNo);
294          interval.addRange(LR);
295          DOUT << " +" << LR;
296        }
297      }
298    }
299
300    // Finally, this virtual register is live from the start of any killing
301    // block to the 'use' slot of the killing instruction.
302    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
303      MachineInstr *Kill = vi.Kills[i];
304      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
305      LiveRange LR(getMBBStartIdx(Kill->getParent()),
306                   killIdx, ValNo);
307      interval.addRange(LR);
308      interval.addKill(ValNo, killIdx);
309      DOUT << " +" << LR;
310    }
311
312  } else {
313    // If this is the second time we see a virtual register definition, it
314    // must be due to phi elimination or two addr elimination.  If this is
315    // the result of two address elimination, then the vreg is one of the
316    // def-and-use register operand.
317    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
318      // If this is a two-address definition, then we have already processed
319      // the live range.  The only problem is that we didn't realize there
320      // are actually two values in the live interval.  Because of this we
321      // need to take the LiveRegion that defines this register and split it
322      // into two values.
323      assert(interval.containsOneValue());
324      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
325      unsigned RedefIndex = getDefIndex(MIIdx);
326
327      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
328      VNInfo *OldValNo = OldLR->valno;
329
330      // Delete the initial value, which should be short and continuous,
331      // because the 2-addr copy must be in the same MBB as the redef.
332      interval.removeRange(DefIndex, RedefIndex);
333
334      // Two-address vregs should always only be redefined once.  This means
335      // that at this point, there should be exactly one value number in it.
336      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
337
338      // The new value number (#1) is defined by the instruction we claimed
339      // defined value #0.
340      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
341                                            VNInfoAllocator);
342
343      // Value#0 is now defined by the 2-addr instruction.
344      OldValNo->def  = RedefIndex;
345      OldValNo->copy = 0;
346
347      // Add the new live interval which replaces the range for the input copy.
348      LiveRange LR(DefIndex, RedefIndex, ValNo);
349      DOUT << " replace range with " << LR;
350      interval.addRange(LR);
351      interval.addKill(ValNo, RedefIndex);
352
353      // If this redefinition is dead, we need to add a dummy unit live
354      // range covering the def slot.
355      if (mi->registerDefIsDead(interval.reg, tri_))
356        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
357
358      DOUT << " RESULT: ";
359      interval.print(DOUT, tri_);
360
361    } else {
362      // Otherwise, this must be because of phi elimination.  If this is the
363      // first redefinition of the vreg that we have seen, go back and change
364      // the live range in the PHI block to be a different value number.
365      if (interval.containsOneValue()) {
366        assert(vi.Kills.size() == 1 &&
367               "PHI elimination vreg should have one kill, the PHI itself!");
368
369        // Remove the old range that we now know has an incorrect number.
370        VNInfo *VNI = interval.getValNumInfo(0);
371        MachineInstr *Killer = vi.Kills[0];
372        unsigned Start = getMBBStartIdx(Killer->getParent());
373        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
374        DOUT << " Removing [" << Start << "," << End << "] from: ";
375        interval.print(DOUT, tri_); DOUT << "\n";
376        interval.removeRange(Start, End);
377        VNI->hasPHIKill = true;
378        DOUT << " RESULT: "; interval.print(DOUT, tri_);
379
380        // Replace the interval with one of a NEW value number.  Note that this
381        // value number isn't actually defined by an instruction, weird huh? :)
382        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
383        DOUT << " replace range with " << LR;
384        interval.addRange(LR);
385        interval.addKill(LR.valno, End);
386        DOUT << " RESULT: "; interval.print(DOUT, tri_);
387      }
388
389      // In the case of PHI elimination, each variable definition is only
390      // live until the end of the block.  We've already taken care of the
391      // rest of the live range.
392      unsigned defIndex = getDefIndex(MIIdx);
393
394      VNInfo *ValNo;
395      MachineInstr *CopyMI = NULL;
396      unsigned SrcReg, DstReg;
397      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
398          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
399          tii_->isMoveInstr(*mi, SrcReg, DstReg))
400        CopyMI = mi;
401      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
402
403      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
404      LiveRange LR(defIndex, killIndex, ValNo);
405      interval.addRange(LR);
406      interval.addKill(ValNo, killIndex);
407      ValNo->hasPHIKill = true;
408      DOUT << " +" << LR;
409    }
410  }
411
412  DOUT << '\n';
413}
414
415void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
416                                              MachineBasicBlock::iterator mi,
417                                              unsigned MIIdx,
418                                              LiveInterval &interval,
419                                              MachineInstr *CopyMI) {
420  // A physical register cannot be live across basic block, so its
421  // lifetime must end somewhere in its defining basic block.
422  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
423
424  unsigned baseIndex = MIIdx;
425  unsigned start = getDefIndex(baseIndex);
426  unsigned end = start;
427
428  // If it is not used after definition, it is considered dead at
429  // the instruction defining it. Hence its interval is:
430  // [defSlot(def), defSlot(def)+1)
431  if (mi->registerDefIsDead(interval.reg, tri_)) {
432    DOUT << " dead";
433    end = getDefIndex(start) + 1;
434    goto exit;
435  }
436
437  // If it is not dead on definition, it must be killed by a
438  // subsequent instruction. Hence its interval is:
439  // [defSlot(def), useSlot(kill)+1)
440  while (++mi != MBB->end()) {
441    baseIndex += InstrSlots::NUM;
442    if (mi->killsRegister(interval.reg, tri_)) {
443      DOUT << " killed";
444      end = getUseIndex(baseIndex) + 1;
445      goto exit;
446    } else if (mi->modifiesRegister(interval.reg, tri_)) {
447      // Another instruction redefines the register before it is ever read.
448      // Then the register is essentially dead at the instruction that defines
449      // it. Hence its interval is:
450      // [defSlot(def), defSlot(def)+1)
451      DOUT << " dead";
452      end = getDefIndex(start) + 1;
453      goto exit;
454    }
455  }
456
457  // The only case we should have a dead physreg here without a killing or
458  // instruction where we know it's dead is if it is live-in to the function
459  // and never used.
460  assert(!CopyMI && "physreg was not killed in defining block!");
461  end = getDefIndex(start) + 1;  // It's dead.
462
463exit:
464  assert(start < end && "did not find end of interval?");
465
466  // Already exists? Extend old live interval.
467  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
468  VNInfo *ValNo = (OldLR != interval.end())
469    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
470  LiveRange LR(start, end, ValNo);
471  interval.addRange(LR);
472  interval.addKill(LR.valno, end);
473  DOUT << " +" << LR << '\n';
474}
475
476void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
477                                      MachineBasicBlock::iterator MI,
478                                      unsigned MIIdx,
479                                      unsigned reg) {
480  if (TargetRegisterInfo::isVirtualRegister(reg))
481    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
482  else if (allocatableRegs_[reg]) {
483    MachineInstr *CopyMI = NULL;
484    unsigned SrcReg, DstReg;
485    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
486        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
487        tii_->isMoveInstr(*MI, SrcReg, DstReg))
488      CopyMI = MI;
489    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
490    // Def of a register also defines its sub-registers.
491    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
492      // If MI also modifies the sub-register explicitly, avoid processing it
493      // more than once. Do not pass in TRI here so it checks for exact match.
494      if (!MI->modifiesRegister(*AS))
495        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
496  }
497}
498
499void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
500                                         unsigned MIIdx,
501                                         LiveInterval &interval, bool isAlias) {
502  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
503
504  // Look for kills, if it reaches a def before it's killed, then it shouldn't
505  // be considered a livein.
506  MachineBasicBlock::iterator mi = MBB->begin();
507  unsigned baseIndex = MIIdx;
508  unsigned start = baseIndex;
509  unsigned end = start;
510  while (mi != MBB->end()) {
511    if (mi->killsRegister(interval.reg, tri_)) {
512      DOUT << " killed";
513      end = getUseIndex(baseIndex) + 1;
514      goto exit;
515    } else if (mi->modifiesRegister(interval.reg, tri_)) {
516      // Another instruction redefines the register before it is ever read.
517      // Then the register is essentially dead at the instruction that defines
518      // it. Hence its interval is:
519      // [defSlot(def), defSlot(def)+1)
520      DOUT << " dead";
521      end = getDefIndex(start) + 1;
522      goto exit;
523    }
524
525    baseIndex += InstrSlots::NUM;
526    ++mi;
527  }
528
529exit:
530  // Live-in register might not be used at all.
531  if (end == MIIdx) {
532    if (isAlias) {
533      DOUT << " dead";
534      end = getDefIndex(MIIdx) + 1;
535    } else {
536      DOUT << " live through";
537      end = baseIndex;
538    }
539  }
540
541  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
542  interval.addRange(LR);
543  interval.addKill(LR.valno, end);
544  DOUT << " +" << LR << '\n';
545}
546
547/// computeIntervals - computes the live intervals for virtual
548/// registers. for some ordering of the machine instructions [1,N] a
549/// live interval is an interval [i, j) where 1 <= i <= j < N for
550/// which a variable is live
551void LiveIntervals::computeIntervals() {
552  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
553       << "********** Function: "
554       << ((Value*)mf_->getFunction())->getName() << '\n';
555  // Track the index of the current machine instr.
556  unsigned MIIndex = 0;
557  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
558       MBBI != E; ++MBBI) {
559    MachineBasicBlock *MBB = MBBI;
560    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
561
562    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
563
564    // Create intervals for live-ins to this BB first.
565    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
566           LE = MBB->livein_end(); LI != LE; ++LI) {
567      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
568      // Multiple live-ins can alias the same register.
569      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
570        if (!hasInterval(*AS))
571          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
572                               true);
573    }
574
575    for (; MI != miEnd; ++MI) {
576      DOUT << MIIndex << "\t" << *MI;
577
578      // Handle defs.
579      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
580        MachineOperand &MO = MI->getOperand(i);
581        // handle register defs - build intervals
582        if (MO.isRegister() && MO.getReg() && MO.isDef())
583          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
584      }
585
586      MIIndex += InstrSlots::NUM;
587    }
588  }
589}
590
591bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
592                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
593  std::vector<IdxMBBPair>::const_iterator I =
594    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
595
596  bool ResVal = false;
597  while (I != Idx2MBBMap.end()) {
598    if (LR.end <= I->first)
599      break;
600    MBBs.push_back(I->second);
601    ResVal = true;
602    ++I;
603  }
604  return ResVal;
605}
606
607
608LiveInterval LiveIntervals::createInterval(unsigned reg) {
609  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
610                       HUGE_VALF : 0.0F;
611  return LiveInterval(reg, Weight);
612}
613
614/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
615/// copy field and returns the source register that defines it.
616unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
617  if (!VNI->copy)
618    return 0;
619
620  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
621    return VNI->copy->getOperand(1).getReg();
622  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
623    return VNI->copy->getOperand(2).getReg();
624  unsigned SrcReg, DstReg;
625  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
626    return SrcReg;
627  assert(0 && "Unrecognized copy instruction!");
628  return 0;
629}
630
631//===----------------------------------------------------------------------===//
632// Register allocator hooks.
633//
634
635/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
636/// allow one) virtual register operand, then its uses are implicitly using
637/// the register. Returns the virtual register.
638unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
639                                            MachineInstr *MI) const {
640  unsigned RegOp = 0;
641  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
642    MachineOperand &MO = MI->getOperand(i);
643    if (!MO.isRegister() || !MO.isUse())
644      continue;
645    unsigned Reg = MO.getReg();
646    if (Reg == 0 || Reg == li.reg)
647      continue;
648    // FIXME: For now, only remat MI with at most one register operand.
649    assert(!RegOp &&
650           "Can't rematerialize instruction with multiple register operand!");
651    RegOp = MO.getReg();
652    break;
653  }
654  return RegOp;
655}
656
657/// isValNoAvailableAt - Return true if the val# of the specified interval
658/// which reaches the given instruction also reaches the specified use index.
659bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
660                                       unsigned UseIdx) const {
661  unsigned Index = getInstructionIndex(MI);
662  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
663  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
664  return UI != li.end() && UI->valno == ValNo;
665}
666
667/// isReMaterializable - Returns true if the definition MI of the specified
668/// val# of the specified interval is re-materializable.
669bool LiveIntervals::isReMaterializable(const LiveInterval &li,
670                                       const VNInfo *ValNo, MachineInstr *MI,
671                                       bool &isLoad) {
672  if (DisableReMat)
673    return false;
674
675  isLoad = false;
676  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
677    return true;
678
679  int FrameIdx = 0;
680  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
681      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
682    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
683    // this but remember this is not safe to fold into a two-address
684    // instruction.
685    // This is a load from fixed stack slot. It can be rematerialized.
686    return true;
687
688  if (tii_->isTriviallyReMaterializable(MI)) {
689    const TargetInstrDesc &TID = MI->getDesc();
690    isLoad = TID.isSimpleLoad();
691
692    unsigned ImpUse = getReMatImplicitUse(li, MI);
693    if (ImpUse) {
694      const LiveInterval &ImpLi = getInterval(ImpUse);
695      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
696             re = mri_->use_end(); ri != re; ++ri) {
697        MachineInstr *UseMI = &*ri;
698        unsigned UseIdx = getInstructionIndex(UseMI);
699        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
700          continue;
701        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
702          return false;
703      }
704    }
705    return true;
706  }
707
708  return false;
709}
710
711/// isReMaterializable - Returns true if every definition of MI of every
712/// val# of the specified interval is re-materializable.
713bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
714  isLoad = false;
715  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
716       i != e; ++i) {
717    const VNInfo *VNI = *i;
718    unsigned DefIdx = VNI->def;
719    if (DefIdx == ~1U)
720      continue; // Dead val#.
721    // Is the def for the val# rematerializable?
722    if (DefIdx == ~0u)
723      return false;
724    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
725    bool DefIsLoad = false;
726    if (!ReMatDefMI ||
727        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
728      return false;
729    isLoad |= DefIsLoad;
730  }
731  return true;
732}
733
734/// FilterFoldedOps - Filter out two-address use operands. Return
735/// true if it finds any issue with the operands that ought to prevent
736/// folding.
737static bool FilterFoldedOps(MachineInstr *MI,
738                            SmallVector<unsigned, 2> &Ops,
739                            unsigned &MRInfo,
740                            SmallVector<unsigned, 2> &FoldOps) {
741  const TargetInstrDesc &TID = MI->getDesc();
742
743  MRInfo = 0;
744  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
745    unsigned OpIdx = Ops[i];
746    MachineOperand &MO = MI->getOperand(OpIdx);
747    // FIXME: fold subreg use.
748    if (MO.getSubReg())
749      return true;
750    if (MO.isDef())
751      MRInfo |= (unsigned)VirtRegMap::isMod;
752    else {
753      // Filter out two-address use operand(s).
754      if (!MO.isImplicit() &&
755          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
756        MRInfo = VirtRegMap::isModRef;
757        continue;
758      }
759      MRInfo |= (unsigned)VirtRegMap::isRef;
760    }
761    FoldOps.push_back(OpIdx);
762  }
763  return false;
764}
765
766
767/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
768/// slot / to reg or any rematerialized load into ith operand of specified
769/// MI. If it is successul, MI is updated with the newly created MI and
770/// returns true.
771bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
772                                         VirtRegMap &vrm, MachineInstr *DefMI,
773                                         unsigned InstrIdx,
774                                         SmallVector<unsigned, 2> &Ops,
775                                         bool isSS, int Slot, unsigned Reg) {
776  // If it is an implicit def instruction, just delete it.
777  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
778    RemoveMachineInstrFromMaps(MI);
779    vrm.RemoveMachineInstrFromMaps(MI);
780    MI->eraseFromParent();
781    ++numFolds;
782    return true;
783  }
784
785  // Filter the list of operand indexes that are to be folded. Abort if
786  // any operand will prevent folding.
787  unsigned MRInfo = 0;
788  SmallVector<unsigned, 2> FoldOps;
789  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
790    return false;
791
792  // The only time it's safe to fold into a two address instruction is when
793  // it's folding reload and spill from / into a spill stack slot.
794  if (DefMI && (MRInfo & VirtRegMap::isMod))
795    return false;
796
797  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
798                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
799  if (fmi) {
800    // Remember this instruction uses the spill slot.
801    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
802
803    // Attempt to fold the memory reference into the instruction. If
804    // we can do this, we don't need to insert spill code.
805    if (lv_)
806      lv_->instructionChanged(MI, fmi);
807    else
808      fmi->copyKillDeadInfo(MI, tri_);
809    MachineBasicBlock &MBB = *MI->getParent();
810    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
811      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
812    vrm.transferSpillPts(MI, fmi);
813    vrm.transferRestorePts(MI, fmi);
814    vrm.transferEmergencySpills(MI, fmi);
815    mi2iMap_.erase(MI);
816    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
817    mi2iMap_[fmi] = InstrIdx;
818    MI = MBB.insert(MBB.erase(MI), fmi);
819    ++numFolds;
820    return true;
821  }
822  return false;
823}
824
825/// canFoldMemoryOperand - Returns true if the specified load / store
826/// folding is possible.
827bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
828                                         SmallVector<unsigned, 2> &Ops,
829                                         bool ReMat) const {
830  // Filter the list of operand indexes that are to be folded. Abort if
831  // any operand will prevent folding.
832  unsigned MRInfo = 0;
833  SmallVector<unsigned, 2> FoldOps;
834  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
835    return false;
836
837  // It's only legal to remat for a use, not a def.
838  if (ReMat && (MRInfo & VirtRegMap::isMod))
839    return false;
840
841  return tii_->canFoldMemoryOperand(MI, FoldOps);
842}
843
844bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
845  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
846  for (LiveInterval::Ranges::const_iterator
847         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
848    std::vector<IdxMBBPair>::const_iterator II =
849      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
850    if (II == Idx2MBBMap.end())
851      continue;
852    if (I->end > II->first)  // crossing a MBB.
853      return false;
854    MBBs.insert(II->second);
855    if (MBBs.size() > 1)
856      return false;
857  }
858  return true;
859}
860
861/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
862/// interval on to-be re-materialized operands of MI) with new register.
863void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
864                                       MachineInstr *MI, unsigned NewVReg,
865                                       VirtRegMap &vrm) {
866  // There is an implicit use. That means one of the other operand is
867  // being remat'ed and the remat'ed instruction has li.reg as an
868  // use operand. Make sure we rewrite that as well.
869  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
870    MachineOperand &MO = MI->getOperand(i);
871    if (!MO.isRegister())
872      continue;
873    unsigned Reg = MO.getReg();
874    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
875      continue;
876    if (!vrm.isReMaterialized(Reg))
877      continue;
878    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
879    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
880    if (UseMO)
881      UseMO->setReg(NewVReg);
882  }
883}
884
885/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
886/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
887bool LiveIntervals::
888rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
889                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
890                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
891                 unsigned Slot, int LdSlot,
892                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
893                 VirtRegMap &vrm,
894                 const TargetRegisterClass* rc,
895                 SmallVector<int, 4> &ReMatIds,
896                 const MachineLoopInfo *loopInfo,
897                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
898                 std::map<unsigned,unsigned> &MBBVRegsMap,
899                 std::vector<LiveInterval*> &NewLIs) {
900  bool CanFold = false;
901 RestartInstruction:
902  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
903    MachineOperand& mop = MI->getOperand(i);
904    if (!mop.isRegister())
905      continue;
906    unsigned Reg = mop.getReg();
907    unsigned RegI = Reg;
908    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
909      continue;
910    if (Reg != li.reg)
911      continue;
912
913    bool TryFold = !DefIsReMat;
914    bool FoldSS = true; // Default behavior unless it's a remat.
915    int FoldSlot = Slot;
916    if (DefIsReMat) {
917      // If this is the rematerializable definition MI itself and
918      // all of its uses are rematerialized, simply delete it.
919      if (MI == ReMatOrigDefMI && CanDelete) {
920        DOUT << "\t\t\t\tErasing re-materlizable def: ";
921        DOUT << MI << '\n';
922        RemoveMachineInstrFromMaps(MI);
923        vrm.RemoveMachineInstrFromMaps(MI);
924        MI->eraseFromParent();
925        break;
926      }
927
928      // If def for this use can't be rematerialized, then try folding.
929      // If def is rematerializable and it's a load, also try folding.
930      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
931      if (isLoad) {
932        // Try fold loads (from stack slot, constant pool, etc.) into uses.
933        FoldSS = isLoadSS;
934        FoldSlot = LdSlot;
935      }
936    }
937
938    // Scan all of the operands of this instruction rewriting operands
939    // to use NewVReg instead of li.reg as appropriate.  We do this for
940    // two reasons:
941    //
942    //   1. If the instr reads the same spilled vreg multiple times, we
943    //      want to reuse the NewVReg.
944    //   2. If the instr is a two-addr instruction, we are required to
945    //      keep the src/dst regs pinned.
946    //
947    // Keep track of whether we replace a use and/or def so that we can
948    // create the spill interval with the appropriate range.
949
950    HasUse = mop.isUse();
951    HasDef = mop.isDef();
952    SmallVector<unsigned, 2> Ops;
953    Ops.push_back(i);
954    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
955      const MachineOperand &MOj = MI->getOperand(j);
956      if (!MOj.isRegister())
957        continue;
958      unsigned RegJ = MOj.getReg();
959      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
960        continue;
961      if (RegJ == RegI) {
962        Ops.push_back(j);
963        HasUse |= MOj.isUse();
964        HasDef |= MOj.isDef();
965      }
966    }
967
968    if (TryFold) {
969      // Do not fold load / store here if we are splitting. We'll find an
970      // optimal point to insert a load / store later.
971      if (!TrySplit) {
972        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
973                                 Ops, FoldSS, FoldSlot, Reg)) {
974          // Folding the load/store can completely change the instruction in
975          // unpredictable ways, rescan it from the beginning.
976          HasUse = false;
977          HasDef = false;
978          CanFold = false;
979          if (isRemoved(MI))
980            break;
981          goto RestartInstruction;
982        }
983      } else {
984        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
985      }
986    } else
987      CanFold = false;
988
989    // Create a new virtual register for the spill interval.
990    bool CreatedNewVReg = false;
991    if (NewVReg == 0) {
992      NewVReg = mri_->createVirtualRegister(rc);
993      vrm.grow();
994      CreatedNewVReg = true;
995    }
996    mop.setReg(NewVReg);
997    if (mop.isImplicit())
998      rewriteImplicitOps(li, MI, NewVReg, vrm);
999
1000    // Reuse NewVReg for other reads.
1001    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1002      MachineOperand &mopj = MI->getOperand(Ops[j]);
1003      mopj.setReg(NewVReg);
1004      if (mopj.isImplicit())
1005        rewriteImplicitOps(li, MI, NewVReg, vrm);
1006    }
1007
1008    if (CreatedNewVReg) {
1009      if (DefIsReMat) {
1010        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1011        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1012          // Each valnum may have its own remat id.
1013          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1014        } else {
1015          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1016        }
1017        if (!CanDelete || (HasUse && HasDef)) {
1018          // If this is a two-addr instruction then its use operands are
1019          // rematerializable but its def is not. It should be assigned a
1020          // stack slot.
1021          vrm.assignVirt2StackSlot(NewVReg, Slot);
1022        }
1023      } else {
1024        vrm.assignVirt2StackSlot(NewVReg, Slot);
1025      }
1026    } else if (HasUse && HasDef &&
1027               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1028      // If this interval hasn't been assigned a stack slot (because earlier
1029      // def is a deleted remat def), do it now.
1030      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1031      vrm.assignVirt2StackSlot(NewVReg, Slot);
1032    }
1033
1034    // Re-matting an instruction with virtual register use. Add the
1035    // register as an implicit use on the use MI.
1036    if (DefIsReMat && ImpUse)
1037      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1038
1039    // create a new register interval for this spill / remat.
1040    LiveInterval &nI = getOrCreateInterval(NewVReg);
1041    if (CreatedNewVReg) {
1042      NewLIs.push_back(&nI);
1043      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1044      if (TrySplit)
1045        vrm.setIsSplitFromReg(NewVReg, li.reg);
1046    }
1047
1048    if (HasUse) {
1049      if (CreatedNewVReg) {
1050        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1051                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1052        DOUT << " +" << LR;
1053        nI.addRange(LR);
1054      } else {
1055        // Extend the split live interval to this def / use.
1056        unsigned End = getUseIndex(index)+1;
1057        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1058                     nI.getValNumInfo(nI.getNumValNums()-1));
1059        DOUT << " +" << LR;
1060        nI.addRange(LR);
1061      }
1062    }
1063    if (HasDef) {
1064      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1065                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1066      DOUT << " +" << LR;
1067      nI.addRange(LR);
1068    }
1069
1070    DOUT << "\t\t\t\tAdded new interval: ";
1071    nI.print(DOUT, tri_);
1072    DOUT << '\n';
1073  }
1074  return CanFold;
1075}
1076bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1077                                   const VNInfo *VNI,
1078                                   MachineBasicBlock *MBB, unsigned Idx) const {
1079  unsigned End = getMBBEndIdx(MBB);
1080  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1081    unsigned KillIdx = VNI->kills[j];
1082    if (KillIdx > Idx && KillIdx < End)
1083      return true;
1084  }
1085  return false;
1086}
1087
1088static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1089  const VNInfo *VNI = NULL;
1090  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1091         e = li.vni_end(); i != e; ++i)
1092    if ((*i)->def == DefIdx) {
1093      VNI = *i;
1094      break;
1095    }
1096  return VNI;
1097}
1098
1099/// RewriteInfo - Keep track of machine instrs that will be rewritten
1100/// during spilling.
1101namespace {
1102  struct RewriteInfo {
1103    unsigned Index;
1104    MachineInstr *MI;
1105    bool HasUse;
1106    bool HasDef;
1107    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1108      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1109  };
1110
1111  struct RewriteInfoCompare {
1112    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1113      return LHS.Index < RHS.Index;
1114    }
1115  };
1116}
1117
1118void LiveIntervals::
1119rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1120                    LiveInterval::Ranges::const_iterator &I,
1121                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1122                    unsigned Slot, int LdSlot,
1123                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1124                    VirtRegMap &vrm,
1125                    const TargetRegisterClass* rc,
1126                    SmallVector<int, 4> &ReMatIds,
1127                    const MachineLoopInfo *loopInfo,
1128                    BitVector &SpillMBBs,
1129                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1130                    BitVector &RestoreMBBs,
1131                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1132                    std::map<unsigned,unsigned> &MBBVRegsMap,
1133                    std::vector<LiveInterval*> &NewLIs) {
1134  bool AllCanFold = true;
1135  unsigned NewVReg = 0;
1136  unsigned start = getBaseIndex(I->start);
1137  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1138
1139  // First collect all the def / use in this live range that will be rewritten.
1140  // Make sure they are sorted according to instruction index.
1141  std::vector<RewriteInfo> RewriteMIs;
1142  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1143         re = mri_->reg_end(); ri != re; ) {
1144    MachineInstr *MI = &*ri;
1145    MachineOperand &O = ri.getOperand();
1146    ++ri;
1147    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1148    unsigned index = getInstructionIndex(MI);
1149    if (index < start || index >= end)
1150      continue;
1151    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1152  }
1153  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1154
1155  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1156  // Now rewrite the defs and uses.
1157  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1158    RewriteInfo &rwi = RewriteMIs[i];
1159    ++i;
1160    unsigned index = rwi.Index;
1161    bool MIHasUse = rwi.HasUse;
1162    bool MIHasDef = rwi.HasDef;
1163    MachineInstr *MI = rwi.MI;
1164    // If MI def and/or use the same register multiple times, then there
1165    // are multiple entries.
1166    unsigned NumUses = MIHasUse;
1167    while (i != e && RewriteMIs[i].MI == MI) {
1168      assert(RewriteMIs[i].Index == index);
1169      bool isUse = RewriteMIs[i].HasUse;
1170      if (isUse) ++NumUses;
1171      MIHasUse |= isUse;
1172      MIHasDef |= RewriteMIs[i].HasDef;
1173      ++i;
1174    }
1175    MachineBasicBlock *MBB = MI->getParent();
1176
1177    if (ImpUse && MI != ReMatDefMI) {
1178      // Re-matting an instruction with virtual register use. Update the
1179      // register interval's spill weight to HUGE_VALF to prevent it from
1180      // being spilled.
1181      LiveInterval &ImpLi = getInterval(ImpUse);
1182      ImpLi.weight = HUGE_VALF;
1183    }
1184
1185    unsigned MBBId = MBB->getNumber();
1186    unsigned ThisVReg = 0;
1187    if (TrySplit) {
1188      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1189      if (NVI != MBBVRegsMap.end()) {
1190        ThisVReg = NVI->second;
1191        // One common case:
1192        // x = use
1193        // ...
1194        // ...
1195        // def = ...
1196        //     = use
1197        // It's better to start a new interval to avoid artifically
1198        // extend the new interval.
1199        if (MIHasDef && !MIHasUse) {
1200          MBBVRegsMap.erase(MBB->getNumber());
1201          ThisVReg = 0;
1202        }
1203      }
1204    }
1205
1206    bool IsNew = ThisVReg == 0;
1207    if (IsNew) {
1208      // This ends the previous live interval. If all of its def / use
1209      // can be folded, give it a low spill weight.
1210      if (NewVReg && TrySplit && AllCanFold) {
1211        LiveInterval &nI = getOrCreateInterval(NewVReg);
1212        nI.weight /= 10.0F;
1213      }
1214      AllCanFold = true;
1215    }
1216    NewVReg = ThisVReg;
1217
1218    bool HasDef = false;
1219    bool HasUse = false;
1220    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1221                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1222                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1223                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1224                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1225    if (!HasDef && !HasUse)
1226      continue;
1227
1228    AllCanFold &= CanFold;
1229
1230    // Update weight of spill interval.
1231    LiveInterval &nI = getOrCreateInterval(NewVReg);
1232    if (!TrySplit) {
1233      // The spill weight is now infinity as it cannot be spilled again.
1234      nI.weight = HUGE_VALF;
1235      continue;
1236    }
1237
1238    // Keep track of the last def and first use in each MBB.
1239    if (HasDef) {
1240      if (MI != ReMatOrigDefMI || !CanDelete) {
1241        bool HasKill = false;
1242        if (!HasUse)
1243          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1244        else {
1245          // If this is a two-address code, then this index starts a new VNInfo.
1246          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1247          if (VNI)
1248            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1249        }
1250        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1251          SpillIdxes.find(MBBId);
1252        if (!HasKill) {
1253          if (SII == SpillIdxes.end()) {
1254            std::vector<SRInfo> S;
1255            S.push_back(SRInfo(index, NewVReg, true));
1256            SpillIdxes.insert(std::make_pair(MBBId, S));
1257          } else if (SII->second.back().vreg != NewVReg) {
1258            SII->second.push_back(SRInfo(index, NewVReg, true));
1259          } else if ((int)index > SII->second.back().index) {
1260            // If there is an earlier def and this is a two-address
1261            // instruction, then it's not possible to fold the store (which
1262            // would also fold the load).
1263            SRInfo &Info = SII->second.back();
1264            Info.index = index;
1265            Info.canFold = !HasUse;
1266          }
1267          SpillMBBs.set(MBBId);
1268        } else if (SII != SpillIdxes.end() &&
1269                   SII->second.back().vreg == NewVReg &&
1270                   (int)index > SII->second.back().index) {
1271          // There is an earlier def that's not killed (must be two-address).
1272          // The spill is no longer needed.
1273          SII->second.pop_back();
1274          if (SII->second.empty()) {
1275            SpillIdxes.erase(MBBId);
1276            SpillMBBs.reset(MBBId);
1277          }
1278        }
1279      }
1280    }
1281
1282    if (HasUse) {
1283      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1284        SpillIdxes.find(MBBId);
1285      if (SII != SpillIdxes.end() &&
1286          SII->second.back().vreg == NewVReg &&
1287          (int)index > SII->second.back().index)
1288        // Use(s) following the last def, it's not safe to fold the spill.
1289        SII->second.back().canFold = false;
1290      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1291        RestoreIdxes.find(MBBId);
1292      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1293        // If we are splitting live intervals, only fold if it's the first
1294        // use and there isn't another use later in the MBB.
1295        RII->second.back().canFold = false;
1296      else if (IsNew) {
1297        // Only need a reload if there isn't an earlier def / use.
1298        if (RII == RestoreIdxes.end()) {
1299          std::vector<SRInfo> Infos;
1300          Infos.push_back(SRInfo(index, NewVReg, true));
1301          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1302        } else {
1303          RII->second.push_back(SRInfo(index, NewVReg, true));
1304        }
1305        RestoreMBBs.set(MBBId);
1306      }
1307    }
1308
1309    // Update spill weight.
1310    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1311    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1312  }
1313
1314  if (NewVReg && TrySplit && AllCanFold) {
1315    // If all of its def / use can be folded, give it a low spill weight.
1316    LiveInterval &nI = getOrCreateInterval(NewVReg);
1317    nI.weight /= 10.0F;
1318  }
1319}
1320
1321bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1322                        BitVector &RestoreMBBs,
1323                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1324  if (!RestoreMBBs[Id])
1325    return false;
1326  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1327  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1328    if (Restores[i].index == index &&
1329        Restores[i].vreg == vr &&
1330        Restores[i].canFold)
1331      return true;
1332  return false;
1333}
1334
1335void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1336                        BitVector &RestoreMBBs,
1337                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1338  if (!RestoreMBBs[Id])
1339    return;
1340  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1341  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1342    if (Restores[i].index == index && Restores[i].vreg)
1343      Restores[i].index = -1;
1344}
1345
1346/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1347/// spilled and create empty intervals for their uses.
1348void
1349LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1350                                    const TargetRegisterClass* rc,
1351                                    std::vector<LiveInterval*> &NewLIs) {
1352  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1353         re = mri_->reg_end(); ri != re; ) {
1354    MachineOperand &O = ri.getOperand();
1355    MachineInstr *MI = &*ri;
1356    ++ri;
1357    if (O.isDef()) {
1358      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1359             "Register def was not rewritten?");
1360      RemoveMachineInstrFromMaps(MI);
1361      vrm.RemoveMachineInstrFromMaps(MI);
1362      MI->eraseFromParent();
1363    } else {
1364      // This must be an use of an implicit_def so it's not part of the live
1365      // interval. Create a new empty live interval for it.
1366      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1367      unsigned NewVReg = mri_->createVirtualRegister(rc);
1368      vrm.grow();
1369      vrm.setIsImplicitlyDefined(NewVReg);
1370      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1371      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1372        MachineOperand &MO = MI->getOperand(i);
1373        if (MO.isReg() && MO.getReg() == li.reg)
1374          MO.setReg(NewVReg);
1375      }
1376    }
1377  }
1378}
1379
1380
1381std::vector<LiveInterval*> LiveIntervals::
1382addIntervalsForSpills(const LiveInterval &li,
1383                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1384  // Since this is called after the analysis is done we don't know if
1385  // LiveVariables is available
1386  lv_ = getAnalysisToUpdate<LiveVariables>();
1387
1388  assert(li.weight != HUGE_VALF &&
1389         "attempt to spill already spilled interval!");
1390
1391  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1392  li.print(DOUT, tri_);
1393  DOUT << '\n';
1394
1395  // Each bit specify whether it a spill is required in the MBB.
1396  BitVector SpillMBBs(mf_->getNumBlockIDs());
1397  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1398  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1399  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1400  std::map<unsigned,unsigned> MBBVRegsMap;
1401  std::vector<LiveInterval*> NewLIs;
1402  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1403
1404  unsigned NumValNums = li.getNumValNums();
1405  SmallVector<MachineInstr*, 4> ReMatDefs;
1406  ReMatDefs.resize(NumValNums, NULL);
1407  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1408  ReMatOrigDefs.resize(NumValNums, NULL);
1409  SmallVector<int, 4> ReMatIds;
1410  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1411  BitVector ReMatDelete(NumValNums);
1412  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1413
1414  // Spilling a split live interval. It cannot be split any further. Also,
1415  // it's also guaranteed to be a single val# / range interval.
1416  if (vrm.getPreSplitReg(li.reg)) {
1417    vrm.setIsSplitFromReg(li.reg, 0);
1418    // Unset the split kill marker on the last use.
1419    unsigned KillIdx = vrm.getKillPoint(li.reg);
1420    if (KillIdx) {
1421      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1422      assert(KillMI && "Last use disappeared?");
1423      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1424      assert(KillOp != -1 && "Last use disappeared?");
1425      KillMI->getOperand(KillOp).setIsKill(false);
1426    }
1427    vrm.removeKillPoint(li.reg);
1428    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1429    Slot = vrm.getStackSlot(li.reg);
1430    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1431    MachineInstr *ReMatDefMI = DefIsReMat ?
1432      vrm.getReMaterializedMI(li.reg) : NULL;
1433    int LdSlot = 0;
1434    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1435    bool isLoad = isLoadSS ||
1436      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1437    bool IsFirstRange = true;
1438    for (LiveInterval::Ranges::const_iterator
1439           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1440      // If this is a split live interval with multiple ranges, it means there
1441      // are two-address instructions that re-defined the value. Only the
1442      // first def can be rematerialized!
1443      if (IsFirstRange) {
1444        // Note ReMatOrigDefMI has already been deleted.
1445        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1446                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1447                             false, vrm, rc, ReMatIds, loopInfo,
1448                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1449                             MBBVRegsMap, NewLIs);
1450      } else {
1451        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1452                             Slot, 0, false, false, false,
1453                             false, vrm, rc, ReMatIds, loopInfo,
1454                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1455                             MBBVRegsMap, NewLIs);
1456      }
1457      IsFirstRange = false;
1458    }
1459
1460    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1461    return NewLIs;
1462  }
1463
1464  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1465  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1466    TrySplit = false;
1467  if (TrySplit)
1468    ++numSplits;
1469  bool NeedStackSlot = false;
1470  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1471       i != e; ++i) {
1472    const VNInfo *VNI = *i;
1473    unsigned VN = VNI->id;
1474    unsigned DefIdx = VNI->def;
1475    if (DefIdx == ~1U)
1476      continue; // Dead val#.
1477    // Is the def for the val# rematerializable?
1478    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1479      ? 0 : getInstructionFromIndex(DefIdx);
1480    bool dummy;
1481    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1482      // Remember how to remat the def of this val#.
1483      ReMatOrigDefs[VN] = ReMatDefMI;
1484      // Original def may be modified so we have to make a copy here. vrm must
1485      // delete these!
1486      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1487
1488      bool CanDelete = true;
1489      if (VNI->hasPHIKill) {
1490        // A kill is a phi node, not all of its uses can be rematerialized.
1491        // It must not be deleted.
1492        CanDelete = false;
1493        // Need a stack slot if there is any live range where uses cannot be
1494        // rematerialized.
1495        NeedStackSlot = true;
1496      }
1497      if (CanDelete)
1498        ReMatDelete.set(VN);
1499    } else {
1500      // Need a stack slot if there is any live range where uses cannot be
1501      // rematerialized.
1502      NeedStackSlot = true;
1503    }
1504  }
1505
1506  // One stack slot per live interval.
1507  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1508    Slot = vrm.assignVirt2StackSlot(li.reg);
1509
1510  // Create new intervals and rewrite defs and uses.
1511  for (LiveInterval::Ranges::const_iterator
1512         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1513    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1514    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1515    bool DefIsReMat = ReMatDefMI != NULL;
1516    bool CanDelete = ReMatDelete[I->valno->id];
1517    int LdSlot = 0;
1518    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1519    bool isLoad = isLoadSS ||
1520      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1521    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1522                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1523                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1524                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1525                               MBBVRegsMap, NewLIs);
1526  }
1527
1528  // Insert spills / restores if we are splitting.
1529  if (!TrySplit) {
1530    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1531    return NewLIs;
1532  }
1533
1534  SmallPtrSet<LiveInterval*, 4> AddedKill;
1535  SmallVector<unsigned, 2> Ops;
1536  if (NeedStackSlot) {
1537    int Id = SpillMBBs.find_first();
1538    while (Id != -1) {
1539      std::vector<SRInfo> &spills = SpillIdxes[Id];
1540      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1541        int index = spills[i].index;
1542        unsigned VReg = spills[i].vreg;
1543        LiveInterval &nI = getOrCreateInterval(VReg);
1544        bool isReMat = vrm.isReMaterialized(VReg);
1545        MachineInstr *MI = getInstructionFromIndex(index);
1546        bool CanFold = false;
1547        bool FoundUse = false;
1548        Ops.clear();
1549        if (spills[i].canFold) {
1550          CanFold = true;
1551          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1552            MachineOperand &MO = MI->getOperand(j);
1553            if (!MO.isRegister() || MO.getReg() != VReg)
1554              continue;
1555
1556            Ops.push_back(j);
1557            if (MO.isDef())
1558              continue;
1559            if (isReMat ||
1560                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1561                                                RestoreMBBs, RestoreIdxes))) {
1562              // MI has two-address uses of the same register. If the use
1563              // isn't the first and only use in the BB, then we can't fold
1564              // it. FIXME: Move this to rewriteInstructionsForSpills.
1565              CanFold = false;
1566              break;
1567            }
1568            FoundUse = true;
1569          }
1570        }
1571        // Fold the store into the def if possible.
1572        bool Folded = false;
1573        if (CanFold && !Ops.empty()) {
1574          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1575            Folded = true;
1576            if (FoundUse > 0) {
1577              // Also folded uses, do not issue a load.
1578              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1579              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1580            }
1581            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1582          }
1583        }
1584
1585        // Otherwise tell the spiller to issue a spill.
1586        if (!Folded) {
1587          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1588          bool isKill = LR->end == getStoreIndex(index);
1589          if (!MI->registerDefIsDead(nI.reg))
1590            // No need to spill a dead def.
1591            vrm.addSpillPoint(VReg, isKill, MI);
1592          if (isKill)
1593            AddedKill.insert(&nI);
1594        }
1595      }
1596      Id = SpillMBBs.find_next(Id);
1597    }
1598  }
1599
1600  int Id = RestoreMBBs.find_first();
1601  while (Id != -1) {
1602    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1603    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1604      int index = restores[i].index;
1605      if (index == -1)
1606        continue;
1607      unsigned VReg = restores[i].vreg;
1608      LiveInterval &nI = getOrCreateInterval(VReg);
1609      MachineInstr *MI = getInstructionFromIndex(index);
1610      bool CanFold = false;
1611      Ops.clear();
1612      if (restores[i].canFold) {
1613        CanFold = true;
1614        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1615          MachineOperand &MO = MI->getOperand(j);
1616          if (!MO.isRegister() || MO.getReg() != VReg)
1617            continue;
1618
1619          if (MO.isDef()) {
1620            // If this restore were to be folded, it would have been folded
1621            // already.
1622            CanFold = false;
1623            break;
1624          }
1625          Ops.push_back(j);
1626        }
1627      }
1628
1629      // Fold the load into the use if possible.
1630      bool Folded = false;
1631      if (CanFold && !Ops.empty()) {
1632        if (!vrm.isReMaterialized(VReg))
1633          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1634        else {
1635          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1636          int LdSlot = 0;
1637          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1638          // If the rematerializable def is a load, also try to fold it.
1639          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1640            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1641                                          Ops, isLoadSS, LdSlot, VReg);
1642          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1643          if (ImpUse) {
1644            // Re-matting an instruction with virtual register use. Add the
1645            // register as an implicit use on the use MI and update the register
1646            // interval's spill weight to HUGE_VALF to prevent it from being
1647            // spilled.
1648            LiveInterval &ImpLi = getInterval(ImpUse);
1649            ImpLi.weight = HUGE_VALF;
1650            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1651          }
1652        }
1653      }
1654      // If folding is not possible / failed, then tell the spiller to issue a
1655      // load / rematerialization for us.
1656      if (Folded)
1657        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1658      else
1659        vrm.addRestorePoint(VReg, MI);
1660    }
1661    Id = RestoreMBBs.find_next(Id);
1662  }
1663
1664  // Finalize intervals: add kills, finalize spill weights, and filter out
1665  // dead intervals.
1666  std::vector<LiveInterval*> RetNewLIs;
1667  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1668    LiveInterval *LI = NewLIs[i];
1669    if (!LI->empty()) {
1670      LI->weight /= LI->getSize();
1671      if (!AddedKill.count(LI)) {
1672        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1673        unsigned LastUseIdx = getBaseIndex(LR->end);
1674        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1675        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1676        assert(UseIdx != -1);
1677        if (LastUse->getOperand(UseIdx).isImplicit() ||
1678            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1679          LastUse->getOperand(UseIdx).setIsKill();
1680          vrm.addKillPoint(LI->reg, LastUseIdx);
1681        }
1682      }
1683      RetNewLIs.push_back(LI);
1684    }
1685  }
1686
1687  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1688  return RetNewLIs;
1689}
1690
1691/// hasAllocatableSuperReg - Return true if the specified physical register has
1692/// any super register that's allocatable.
1693bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1694  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1695    if (allocatableRegs_[*AS] && hasInterval(*AS))
1696      return true;
1697  return false;
1698}
1699
1700/// getRepresentativeReg - Find the largest super register of the specified
1701/// physical register.
1702unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1703  // Find the largest super-register that is allocatable.
1704  unsigned BestReg = Reg;
1705  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1706    unsigned SuperReg = *AS;
1707    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1708      BestReg = SuperReg;
1709      break;
1710    }
1711  }
1712  return BestReg;
1713}
1714
1715/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1716/// specified interval that conflicts with the specified physical register.
1717unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1718                                                   unsigned PhysReg) const {
1719  unsigned NumConflicts = 0;
1720  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1721  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1722         E = mri_->reg_end(); I != E; ++I) {
1723    MachineOperand &O = I.getOperand();
1724    MachineInstr *MI = O.getParent();
1725    unsigned Index = getInstructionIndex(MI);
1726    if (pli.liveAt(Index))
1727      ++NumConflicts;
1728  }
1729  return NumConflicts;
1730}
1731
1732/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1733/// around all defs and uses of the specified interval.
1734void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1735                                            unsigned PhysReg, VirtRegMap &vrm) {
1736  unsigned SpillReg = getRepresentativeReg(PhysReg);
1737
1738  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1739    // If there are registers which alias PhysReg, but which are not a
1740    // sub-register of the chosen representative super register. Assert
1741    // since we can't handle it yet.
1742    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1743           tri_->isSuperRegister(*AS, SpillReg));
1744
1745  LiveInterval &pli = getInterval(SpillReg);
1746  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1747  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1748         E = mri_->reg_end(); I != E; ++I) {
1749    MachineOperand &O = I.getOperand();
1750    MachineInstr *MI = O.getParent();
1751    if (SeenMIs.count(MI))
1752      continue;
1753    SeenMIs.insert(MI);
1754    unsigned Index = getInstructionIndex(MI);
1755    if (pli.liveAt(Index)) {
1756      vrm.addEmergencySpill(SpillReg, MI);
1757      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1758      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1759        if (!hasInterval(*AS))
1760          continue;
1761        LiveInterval &spli = getInterval(*AS);
1762        if (spli.liveAt(Index))
1763          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1764      }
1765    }
1766  }
1767}
1768