LiveIntervalAnalysis.cpp revision fdc1706d009363c2e0008684b1813721f2d6e82a
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82/// runOnMachineFunction - Register allocate the whole function
83///
84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
85  mf_ = &fn;
86  mri_ = &mf_->getRegInfo();
87  tm_ = &fn.getTarget();
88  tri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = tri_->getAllocatableSet(fn);
92
93  // Number MachineInstrs and MachineBasicBlocks.
94  // Initialize MBB indexes to a sentinal.
95  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
96
97  unsigned MIIndex = 0;
98  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99       MBB != E; ++MBB) {
100    unsigned StartIdx = MIIndex;
101
102    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103         I != E; ++I) {
104      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105      assert(inserted && "multiple MachineInstr -> index mappings");
106      i2miMap_.push_back(I);
107      MIIndex += InstrSlots::NUM;
108    }
109
110    // Set the MBB2IdxMap entry for this MBB.
111    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113  }
114  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
115
116  computeIntervals();
117
118  numIntervals += getNumIntervals();
119
120  DOUT << "********** INTERVALS **********\n";
121  for (iterator I = begin(), E = end(); I != E; ++I) {
122    I->second.print(DOUT, tri_);
123    DOUT << "\n";
124  }
125
126  numIntervalsAfter += getNumIntervals();
127  DEBUG(dump());
128  return true;
129}
130
131/// print - Implement the dump method.
132void LiveIntervals::print(std::ostream &O, const Module* ) const {
133  O << "********** INTERVALS **********\n";
134  for (const_iterator I = begin(), E = end(); I != E; ++I) {
135    I->second.print(DOUT, tri_);
136    DOUT << "\n";
137  }
138
139  O << "********** MACHINEINSTRS **********\n";
140  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141       mbbi != mbbe; ++mbbi) {
142    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143    for (MachineBasicBlock::iterator mii = mbbi->begin(),
144           mie = mbbi->end(); mii != mie; ++mii) {
145      O << getInstructionIndex(mii) << '\t' << *mii;
146    }
147  }
148}
149
150/// conflictsWithPhysRegDef - Returns true if the specified register
151/// is defined during the duration of the specified interval.
152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153                                            VirtRegMap &vrm, unsigned reg) {
154  for (LiveInterval::Ranges::const_iterator
155         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156    for (unsigned index = getBaseIndex(I->start),
157           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158         index += InstrSlots::NUM) {
159      // skip deleted instructions
160      while (index != end && !getInstructionFromIndex(index))
161        index += InstrSlots::NUM;
162      if (index == end) break;
163
164      MachineInstr *MI = getInstructionFromIndex(index);
165      unsigned SrcReg, DstReg;
166      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167        if (SrcReg == li.reg || DstReg == li.reg)
168          continue;
169      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170        MachineOperand& mop = MI->getOperand(i);
171        if (!mop.isRegister())
172          continue;
173        unsigned PhysReg = mop.getReg();
174        if (PhysReg == 0 || PhysReg == li.reg)
175          continue;
176        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177          if (!vrm.hasPhys(PhysReg))
178            continue;
179          PhysReg = vrm.getPhys(PhysReg);
180        }
181        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
182          return true;
183      }
184    }
185  }
186
187  return false;
188}
189
190void LiveIntervals::printRegName(unsigned reg) const {
191  if (TargetRegisterInfo::isPhysicalRegister(reg))
192    cerr << tri_->getName(reg);
193  else
194    cerr << "%reg" << reg;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             unsigned MIIdx,
200                                             LiveInterval &interval) {
201  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203
204  // Virtual registers may be defined multiple times (due to phi
205  // elimination and 2-addr elimination).  Much of what we do only has to be
206  // done once for the vreg.  We use an empty interval to detect the first
207  // time we see a vreg.
208  if (interval.empty()) {
209    // Get the Idx of the defining instructions.
210    unsigned defIndex = getDefIndex(MIIdx);
211    VNInfo *ValNo;
212    MachineInstr *CopyMI = NULL;
213    unsigned SrcReg, DstReg;
214    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215        tii_->isMoveInstr(*mi, SrcReg, DstReg))
216      CopyMI = mi;
217    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      unsigned killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230      else
231        killIdx = defIndex+1;
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.none() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DOUT << " +" << LR << "\n";
241        interval.addKill(ValNo, killIdx);
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex,
251                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252                    ValNo);
253    DOUT << " +" << NewLR;
254    interval.addRange(NewLR);
255
256    // Iterate over all of the blocks that the variable is completely
257    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258    // live interval.
259    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260      if (vi.AliveBlocks[i]) {
261        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262        if (!MBB->empty()) {
263          LiveRange LR(getMBBStartIdx(i),
264                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265                       ValNo);
266          interval.addRange(LR);
267          DOUT << " +" << LR;
268        }
269      }
270    }
271
272    // Finally, this virtual register is live from the start of any killing
273    // block to the 'use' slot of the killing instruction.
274    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275      MachineInstr *Kill = vi.Kills[i];
276      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277      LiveRange LR(getMBBStartIdx(Kill->getParent()),
278                   killIdx, ValNo);
279      interval.addRange(LR);
280      interval.addKill(ValNo, killIdx);
281      DOUT << " +" << LR;
282    }
283
284  } else {
285    // If this is the second time we see a virtual register definition, it
286    // must be due to phi elimination or two addr elimination.  If this is
287    // the result of two address elimination, then the vreg is one of the
288    // def-and-use register operand.
289    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290      // If this is a two-address definition, then we have already processed
291      // the live range.  The only problem is that we didn't realize there
292      // are actually two values in the live interval.  Because of this we
293      // need to take the LiveRegion that defines this register and split it
294      // into two values.
295      assert(interval.containsOneValue());
296      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297      unsigned RedefIndex = getDefIndex(MIIdx);
298
299      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300      VNInfo *OldValNo = OldLR->valno;
301
302      // Delete the initial value, which should be short and continuous,
303      // because the 2-addr copy must be in the same MBB as the redef.
304      interval.removeRange(DefIndex, RedefIndex);
305
306      // Two-address vregs should always only be redefined once.  This means
307      // that at this point, there should be exactly one value number in it.
308      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309
310      // The new value number (#1) is defined by the instruction we claimed
311      // defined value #0.
312      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
313                                            VNInfoAllocator);
314
315      // Value#0 is now defined by the 2-addr instruction.
316      OldValNo->def  = RedefIndex;
317      OldValNo->copy = 0;
318
319      // Add the new live interval which replaces the range for the input copy.
320      LiveRange LR(DefIndex, RedefIndex, ValNo);
321      DOUT << " replace range with " << LR;
322      interval.addRange(LR);
323      interval.addKill(ValNo, RedefIndex);
324
325      // If this redefinition is dead, we need to add a dummy unit live
326      // range covering the def slot.
327      if (mi->registerDefIsDead(interval.reg, tri_))
328        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
329
330      DOUT << " RESULT: ";
331      interval.print(DOUT, tri_);
332
333    } else {
334      // Otherwise, this must be because of phi elimination.  If this is the
335      // first redefinition of the vreg that we have seen, go back and change
336      // the live range in the PHI block to be a different value number.
337      if (interval.containsOneValue()) {
338        assert(vi.Kills.size() == 1 &&
339               "PHI elimination vreg should have one kill, the PHI itself!");
340
341        // Remove the old range that we now know has an incorrect number.
342        VNInfo *VNI = interval.getValNumInfo(0);
343        MachineInstr *Killer = vi.Kills[0];
344        unsigned Start = getMBBStartIdx(Killer->getParent());
345        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346        DOUT << " Removing [" << Start << "," << End << "] from: ";
347        interval.print(DOUT, tri_); DOUT << "\n";
348        interval.removeRange(Start, End);
349        VNI->hasPHIKill = true;
350        DOUT << " RESULT: "; interval.print(DOUT, tri_);
351
352        // Replace the interval with one of a NEW value number.  Note that this
353        // value number isn't actually defined by an instruction, weird huh? :)
354        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355        DOUT << " replace range with " << LR;
356        interval.addRange(LR);
357        interval.addKill(LR.valno, End);
358        DOUT << " RESULT: "; interval.print(DOUT, tri_);
359      }
360
361      // In the case of PHI elimination, each variable definition is only
362      // live until the end of the block.  We've already taken care of the
363      // rest of the live range.
364      unsigned defIndex = getDefIndex(MIIdx);
365
366      VNInfo *ValNo;
367      MachineInstr *CopyMI = NULL;
368      unsigned SrcReg, DstReg;
369      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370          tii_->isMoveInstr(*mi, SrcReg, DstReg))
371        CopyMI = mi;
372      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373
374      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375      LiveRange LR(defIndex, killIndex, ValNo);
376      interval.addRange(LR);
377      interval.addKill(ValNo, killIndex);
378      ValNo->hasPHIKill = true;
379      DOUT << " +" << LR;
380    }
381  }
382
383  DOUT << '\n';
384}
385
386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387                                              MachineBasicBlock::iterator mi,
388                                              unsigned MIIdx,
389                                              LiveInterval &interval,
390                                              MachineInstr *CopyMI) {
391  // A physical register cannot be live across basic block, so its
392  // lifetime must end somewhere in its defining basic block.
393  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394
395  unsigned baseIndex = MIIdx;
396  unsigned start = getDefIndex(baseIndex);
397  unsigned end = start;
398
399  // If it is not used after definition, it is considered dead at
400  // the instruction defining it. Hence its interval is:
401  // [defSlot(def), defSlot(def)+1)
402  if (mi->registerDefIsDead(interval.reg, tri_)) {
403    DOUT << " dead";
404    end = getDefIndex(start) + 1;
405    goto exit;
406  }
407
408  // If it is not dead on definition, it must be killed by a
409  // subsequent instruction. Hence its interval is:
410  // [defSlot(def), useSlot(kill)+1)
411  while (++mi != MBB->end()) {
412    baseIndex += InstrSlots::NUM;
413    if (mi->killsRegister(interval.reg, tri_)) {
414      DOUT << " killed";
415      end = getUseIndex(baseIndex) + 1;
416      goto exit;
417    } else if (mi->modifiesRegister(interval.reg, tri_)) {
418      // Another instruction redefines the register before it is ever read.
419      // Then the register is essentially dead at the instruction that defines
420      // it. Hence its interval is:
421      // [defSlot(def), defSlot(def)+1)
422      DOUT << " dead";
423      end = getDefIndex(start) + 1;
424      goto exit;
425    }
426  }
427
428  // The only case we should have a dead physreg here without a killing or
429  // instruction where we know it's dead is if it is live-in to the function
430  // and never used.
431  assert(!CopyMI && "physreg was not killed in defining block!");
432  end = getDefIndex(start) + 1;  // It's dead.
433
434exit:
435  assert(start < end && "did not find end of interval?");
436
437  // Already exists? Extend old live interval.
438  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439  VNInfo *ValNo = (OldLR != interval.end())
440    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441  LiveRange LR(start, end, ValNo);
442  interval.addRange(LR);
443  interval.addKill(LR.valno, end);
444  DOUT << " +" << LR << '\n';
445}
446
447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448                                      MachineBasicBlock::iterator MI,
449                                      unsigned MIIdx,
450                                      unsigned reg) {
451  if (TargetRegisterInfo::isVirtualRegister(reg))
452    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453  else if (allocatableRegs_[reg]) {
454    MachineInstr *CopyMI = NULL;
455    unsigned SrcReg, DstReg;
456    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457        tii_->isMoveInstr(*MI, SrcReg, DstReg))
458      CopyMI = MI;
459    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460    // Def of a register also defines its sub-registers.
461    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462      // If MI also modifies the sub-register explicitly, avoid processing it
463      // more than once. Do not pass in TRI here so it checks for exact match.
464      if (!MI->modifiesRegister(*AS))
465        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
466  }
467}
468
469void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
470                                         unsigned MIIdx,
471                                         LiveInterval &interval, bool isAlias) {
472  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
473
474  // Look for kills, if it reaches a def before it's killed, then it shouldn't
475  // be considered a livein.
476  MachineBasicBlock::iterator mi = MBB->begin();
477  unsigned baseIndex = MIIdx;
478  unsigned start = baseIndex;
479  unsigned end = start;
480  while (mi != MBB->end()) {
481    if (mi->killsRegister(interval.reg, tri_)) {
482      DOUT << " killed";
483      end = getUseIndex(baseIndex) + 1;
484      goto exit;
485    } else if (mi->modifiesRegister(interval.reg, tri_)) {
486      // Another instruction redefines the register before it is ever read.
487      // Then the register is essentially dead at the instruction that defines
488      // it. Hence its interval is:
489      // [defSlot(def), defSlot(def)+1)
490      DOUT << " dead";
491      end = getDefIndex(start) + 1;
492      goto exit;
493    }
494
495    baseIndex += InstrSlots::NUM;
496    ++mi;
497  }
498
499exit:
500  // Live-in register might not be used at all.
501  if (end == MIIdx) {
502    if (isAlias) {
503      DOUT << " dead";
504      end = getDefIndex(MIIdx) + 1;
505    } else {
506      DOUT << " live through";
507      end = baseIndex;
508    }
509  }
510
511  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
512  interval.addRange(LR);
513  interval.addKill(LR.valno, end);
514  DOUT << " +" << LR << '\n';
515}
516
517/// computeIntervals - computes the live intervals for virtual
518/// registers. for some ordering of the machine instructions [1,N] a
519/// live interval is an interval [i, j) where 1 <= i <= j < N for
520/// which a variable is live
521void LiveIntervals::computeIntervals() {
522  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
523       << "********** Function: "
524       << ((Value*)mf_->getFunction())->getName() << '\n';
525  // Track the index of the current machine instr.
526  unsigned MIIndex = 0;
527  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
528       MBBI != E; ++MBBI) {
529    MachineBasicBlock *MBB = MBBI;
530    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
531
532    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
533
534    // Create intervals for live-ins to this BB first.
535    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
536           LE = MBB->livein_end(); LI != LE; ++LI) {
537      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
538      // Multiple live-ins can alias the same register.
539      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
540        if (!hasInterval(*AS))
541          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
542                               true);
543    }
544
545    for (; MI != miEnd; ++MI) {
546      DOUT << MIIndex << "\t" << *MI;
547
548      // Handle defs.
549      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
550        MachineOperand &MO = MI->getOperand(i);
551        // handle register defs - build intervals
552        if (MO.isRegister() && MO.getReg() && MO.isDef())
553          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
554      }
555
556      MIIndex += InstrSlots::NUM;
557    }
558  }
559}
560
561bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
562                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
563  std::vector<IdxMBBPair>::const_iterator I =
564    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
565
566  bool ResVal = false;
567  while (I != Idx2MBBMap.end()) {
568    if (LR.end <= I->first)
569      break;
570    MBBs.push_back(I->second);
571    ResVal = true;
572    ++I;
573  }
574  return ResVal;
575}
576
577
578LiveInterval LiveIntervals::createInterval(unsigned reg) {
579  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
580                       HUGE_VALF : 0.0F;
581  return LiveInterval(reg, Weight);
582}
583
584/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
585/// copy field and returns the source register that defines it.
586unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
587  if (!VNI->copy)
588    return 0;
589
590  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
591    return VNI->copy->getOperand(1).getReg();
592  unsigned SrcReg, DstReg;
593  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
594    return SrcReg;
595  assert(0 && "Unrecognized copy instruction!");
596  return 0;
597}
598
599//===----------------------------------------------------------------------===//
600// Register allocator hooks.
601//
602
603/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
604/// allow one) virtual register operand, then its uses are implicitly using
605/// the register. Returns the virtual register.
606unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
607                                            MachineInstr *MI) const {
608  unsigned RegOp = 0;
609  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
610    MachineOperand &MO = MI->getOperand(i);
611    if (!MO.isRegister() || !MO.isUse())
612      continue;
613    unsigned Reg = MO.getReg();
614    if (Reg == 0 || Reg == li.reg)
615      continue;
616    // FIXME: For now, only remat MI with at most one register operand.
617    assert(!RegOp &&
618           "Can't rematerialize instruction with multiple register operand!");
619    RegOp = MO.getReg();
620    break;
621  }
622  return RegOp;
623}
624
625/// isValNoAvailableAt - Return true if the val# of the specified interval
626/// which reaches the given instruction also reaches the specified use index.
627bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
628                                       unsigned UseIdx) const {
629  unsigned Index = getInstructionIndex(MI);
630  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
631  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
632  return UI != li.end() && UI->valno == ValNo;
633}
634
635/// isReMaterializable - Returns true if the definition MI of the specified
636/// val# of the specified interval is re-materializable.
637bool LiveIntervals::isReMaterializable(const LiveInterval &li,
638                                       const VNInfo *ValNo, MachineInstr *MI,
639                                       bool &isLoad) {
640  if (DisableReMat)
641    return false;
642
643  isLoad = false;
644  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
645    return true;
646
647  int FrameIdx = 0;
648  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
649      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
650    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
651    // this but remember this is not safe to fold into a two-address
652    // instruction.
653    // This is a load from fixed stack slot. It can be rematerialized.
654    return true;
655
656  if (tii_->isTriviallyReMaterializable(MI)) {
657    const TargetInstrDesc &TID = MI->getDesc();
658    isLoad = TID.isSimpleLoad();
659
660    unsigned ImpUse = getReMatImplicitUse(li, MI);
661    if (ImpUse) {
662      const LiveInterval &ImpLi = getInterval(ImpUse);
663      for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
664             re = mri_->use_end(); ri != re; ++ri) {
665        MachineInstr *UseMI = &*ri;
666        unsigned UseIdx = getInstructionIndex(UseMI);
667        if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
668          continue;
669        if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
670          return false;
671      }
672    }
673    return true;
674  }
675
676  return false;
677}
678
679/// isReMaterializable - Returns true if every definition of MI of every
680/// val# of the specified interval is re-materializable.
681bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
682  isLoad = false;
683  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
684       i != e; ++i) {
685    const VNInfo *VNI = *i;
686    unsigned DefIdx = VNI->def;
687    if (DefIdx == ~1U)
688      continue; // Dead val#.
689    // Is the def for the val# rematerializable?
690    if (DefIdx == ~0u)
691      return false;
692    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
693    bool DefIsLoad = false;
694    if (!ReMatDefMI ||
695        !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
696      return false;
697    isLoad |= DefIsLoad;
698  }
699  return true;
700}
701
702/// FilterFoldedOps - Filter out two-address use operands. Return
703/// true if it finds any issue with the operands that ought to prevent
704/// folding.
705static bool FilterFoldedOps(MachineInstr *MI,
706                            SmallVector<unsigned, 2> &Ops,
707                            unsigned &MRInfo,
708                            SmallVector<unsigned, 2> &FoldOps) {
709  const TargetInstrDesc &TID = MI->getDesc();
710
711  MRInfo = 0;
712  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
713    unsigned OpIdx = Ops[i];
714    MachineOperand &MO = MI->getOperand(OpIdx);
715    // FIXME: fold subreg use.
716    if (MO.getSubReg())
717      return true;
718    if (MO.isDef())
719      MRInfo |= (unsigned)VirtRegMap::isMod;
720    else {
721      // Filter out two-address use operand(s).
722      if (!MO.isImplicit() &&
723          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
724        MRInfo = VirtRegMap::isModRef;
725        continue;
726      }
727      MRInfo |= (unsigned)VirtRegMap::isRef;
728    }
729    FoldOps.push_back(OpIdx);
730  }
731  return false;
732}
733
734
735/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
736/// slot / to reg or any rematerialized load into ith operand of specified
737/// MI. If it is successul, MI is updated with the newly created MI and
738/// returns true.
739bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
740                                         VirtRegMap &vrm, MachineInstr *DefMI,
741                                         unsigned InstrIdx,
742                                         SmallVector<unsigned, 2> &Ops,
743                                         bool isSS, int Slot, unsigned Reg) {
744  // If it is an implicit def instruction, just delete it.
745  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
746    RemoveMachineInstrFromMaps(MI);
747    vrm.RemoveMachineInstrFromMaps(MI);
748    MI->eraseFromParent();
749    ++numFolds;
750    return true;
751  }
752
753  // Filter the list of operand indexes that are to be folded. Abort if
754  // any operand will prevent folding.
755  unsigned MRInfo = 0;
756  SmallVector<unsigned, 2> FoldOps;
757  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
758    return false;
759
760  // Can't fold a load from fixed stack slot into a two address instruction.
761  if (isSS && DefMI && (MRInfo & VirtRegMap::isMod))
762    return false;
763
764  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
765                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
766  if (fmi) {
767    // Remember this instruction uses the spill slot.
768    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
769
770    // Attempt to fold the memory reference into the instruction. If
771    // we can do this, we don't need to insert spill code.
772    if (lv_)
773      lv_->instructionChanged(MI, fmi);
774    else
775      fmi->copyKillDeadInfo(MI, tri_);
776    MachineBasicBlock &MBB = *MI->getParent();
777    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
778      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
779    vrm.transferSpillPts(MI, fmi);
780    vrm.transferRestorePts(MI, fmi);
781    vrm.transferEmergencySpills(MI, fmi);
782    mi2iMap_.erase(MI);
783    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
784    mi2iMap_[fmi] = InstrIdx;
785    MI = MBB.insert(MBB.erase(MI), fmi);
786    ++numFolds;
787    return true;
788  }
789  return false;
790}
791
792/// canFoldMemoryOperand - Returns true if the specified load / store
793/// folding is possible.
794bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
795                                         SmallVector<unsigned, 2> &Ops,
796                                         bool ReMatLoad) const {
797  // Filter the list of operand indexes that are to be folded. Abort if
798  // any operand will prevent folding.
799  unsigned MRInfo = 0;
800  SmallVector<unsigned, 2> FoldOps;
801  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
802    return false;
803
804  // Can't fold a remat'ed load into a two address instruction.
805  if (ReMatLoad && (MRInfo & VirtRegMap::isMod))
806    return false;
807
808  return tii_->canFoldMemoryOperand(MI, FoldOps);
809}
810
811bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
812  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
813  for (LiveInterval::Ranges::const_iterator
814         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
815    std::vector<IdxMBBPair>::const_iterator II =
816      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
817    if (II == Idx2MBBMap.end())
818      continue;
819    if (I->end > II->first)  // crossing a MBB.
820      return false;
821    MBBs.insert(II->second);
822    if (MBBs.size() > 1)
823      return false;
824  }
825  return true;
826}
827
828/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
829/// interval on to-be re-materialized operands of MI) with new register.
830void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
831                                       MachineInstr *MI, unsigned NewVReg,
832                                       VirtRegMap &vrm) {
833  // There is an implicit use. That means one of the other operand is
834  // being remat'ed and the remat'ed instruction has li.reg as an
835  // use operand. Make sure we rewrite that as well.
836  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
837    MachineOperand &MO = MI->getOperand(i);
838    if (!MO.isRegister())
839      continue;
840    unsigned Reg = MO.getReg();
841    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
842      continue;
843    if (!vrm.isReMaterialized(Reg))
844      continue;
845    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
846    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
847    if (UseMO)
848      UseMO->setReg(NewVReg);
849  }
850}
851
852/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
853/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
854bool LiveIntervals::
855rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
856                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
857                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
858                 unsigned Slot, int LdSlot,
859                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
860                 VirtRegMap &vrm,
861                 const TargetRegisterClass* rc,
862                 SmallVector<int, 4> &ReMatIds,
863                 const MachineLoopInfo *loopInfo,
864                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
865                 std::map<unsigned,unsigned> &MBBVRegsMap,
866                 std::vector<LiveInterval*> &NewLIs) {
867  bool CanFold = false;
868 RestartInstruction:
869  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
870    MachineOperand& mop = MI->getOperand(i);
871    if (!mop.isRegister())
872      continue;
873    unsigned Reg = mop.getReg();
874    unsigned RegI = Reg;
875    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
876      continue;
877    if (Reg != li.reg)
878      continue;
879
880    bool TryFold = !DefIsReMat;
881    bool FoldSS = true; // Default behavior unless it's a remat.
882    int FoldSlot = Slot;
883    if (DefIsReMat) {
884      // If this is the rematerializable definition MI itself and
885      // all of its uses are rematerialized, simply delete it.
886      if (MI == ReMatOrigDefMI && CanDelete) {
887        DOUT << "\t\t\t\tErasing re-materlizable def: ";
888        DOUT << MI << '\n';
889        unsigned ImpUse = getReMatImplicitUse(li, MI);
890        if (ImpUse) {
891          // To be deleted MI has a virtual register operand, update the
892          // spill weight of the register interval.
893          unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
894          LiveInterval &ImpLi = getInterval(ImpUse);
895          ImpLi.weight -=
896            getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
897        }
898        RemoveMachineInstrFromMaps(MI);
899        vrm.RemoveMachineInstrFromMaps(MI);
900        MI->eraseFromParent();
901        break;
902      }
903
904      // If def for this use can't be rematerialized, then try folding.
905      // If def is rematerializable and it's a load, also try folding.
906      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
907      if (isLoad) {
908        // Try fold loads (from stack slot, constant pool, etc.) into uses.
909        FoldSS = isLoadSS;
910        FoldSlot = LdSlot;
911      }
912    }
913
914    // Scan all of the operands of this instruction rewriting operands
915    // to use NewVReg instead of li.reg as appropriate.  We do this for
916    // two reasons:
917    //
918    //   1. If the instr reads the same spilled vreg multiple times, we
919    //      want to reuse the NewVReg.
920    //   2. If the instr is a two-addr instruction, we are required to
921    //      keep the src/dst regs pinned.
922    //
923    // Keep track of whether we replace a use and/or def so that we can
924    // create the spill interval with the appropriate range.
925
926    HasUse = mop.isUse();
927    HasDef = mop.isDef();
928    SmallVector<unsigned, 2> Ops;
929    Ops.push_back(i);
930    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
931      const MachineOperand &MOj = MI->getOperand(j);
932      if (!MOj.isRegister())
933        continue;
934      unsigned RegJ = MOj.getReg();
935      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
936        continue;
937      if (RegJ == RegI) {
938        Ops.push_back(j);
939        HasUse |= MOj.isUse();
940        HasDef |= MOj.isDef();
941      }
942    }
943
944    if (TryFold) {
945      // Do not fold load / store here if we are splitting. We'll find an
946      // optimal point to insert a load / store later.
947      if (!TrySplit) {
948        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
949                                 Ops, FoldSS, FoldSlot, Reg)) {
950          // Folding the load/store can completely change the instruction in
951          // unpredictable ways, rescan it from the beginning.
952          HasUse = false;
953          HasDef = false;
954          CanFold = false;
955          goto RestartInstruction;
956        }
957      } else {
958        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat && isLoad);
959      }
960    } else
961      CanFold = false;
962
963    // Create a new virtual register for the spill interval.
964    bool CreatedNewVReg = false;
965    if (NewVReg == 0) {
966      NewVReg = mri_->createVirtualRegister(rc);
967      vrm.grow();
968      CreatedNewVReg = true;
969    }
970    mop.setReg(NewVReg);
971    if (mop.isImplicit())
972      rewriteImplicitOps(li, MI, NewVReg, vrm);
973
974    // Reuse NewVReg for other reads.
975    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
976      MachineOperand &mopj = MI->getOperand(Ops[j]);
977      mopj.setReg(NewVReg);
978      if (mopj.isImplicit())
979        rewriteImplicitOps(li, MI, NewVReg, vrm);
980    }
981
982    if (CreatedNewVReg) {
983      if (DefIsReMat) {
984        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
985        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
986          // Each valnum may have its own remat id.
987          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
988        } else {
989          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
990        }
991        if (!CanDelete || (HasUse && HasDef)) {
992          // If this is a two-addr instruction then its use operands are
993          // rematerializable but its def is not. It should be assigned a
994          // stack slot.
995          vrm.assignVirt2StackSlot(NewVReg, Slot);
996        }
997      } else {
998        vrm.assignVirt2StackSlot(NewVReg, Slot);
999      }
1000    } else if (HasUse && HasDef &&
1001               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1002      // If this interval hasn't been assigned a stack slot (because earlier
1003      // def is a deleted remat def), do it now.
1004      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1005      vrm.assignVirt2StackSlot(NewVReg, Slot);
1006    }
1007
1008    // Re-matting an instruction with virtual register use. Add the
1009    // register as an implicit use on the use MI.
1010    if (DefIsReMat && ImpUse)
1011      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1012
1013    // create a new register interval for this spill / remat.
1014    LiveInterval &nI = getOrCreateInterval(NewVReg);
1015    if (CreatedNewVReg) {
1016      NewLIs.push_back(&nI);
1017      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1018      if (TrySplit)
1019        vrm.setIsSplitFromReg(NewVReg, li.reg);
1020    }
1021
1022    if (HasUse) {
1023      if (CreatedNewVReg) {
1024        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1025                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1026        DOUT << " +" << LR;
1027        nI.addRange(LR);
1028      } else {
1029        // Extend the split live interval to this def / use.
1030        unsigned End = getUseIndex(index)+1;
1031        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1032                     nI.getValNumInfo(nI.getNumValNums()-1));
1033        DOUT << " +" << LR;
1034        nI.addRange(LR);
1035      }
1036    }
1037    if (HasDef) {
1038      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1039                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1040      DOUT << " +" << LR;
1041      nI.addRange(LR);
1042    }
1043
1044    DOUT << "\t\t\t\tAdded new interval: ";
1045    nI.print(DOUT, tri_);
1046    DOUT << '\n';
1047  }
1048  return CanFold;
1049}
1050bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1051                                   const VNInfo *VNI,
1052                                   MachineBasicBlock *MBB, unsigned Idx) const {
1053  unsigned End = getMBBEndIdx(MBB);
1054  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1055    unsigned KillIdx = VNI->kills[j];
1056    if (KillIdx > Idx && KillIdx < End)
1057      return true;
1058  }
1059  return false;
1060}
1061
1062static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1063  const VNInfo *VNI = NULL;
1064  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1065         e = li.vni_end(); i != e; ++i)
1066    if ((*i)->def == DefIdx) {
1067      VNI = *i;
1068      break;
1069    }
1070  return VNI;
1071}
1072
1073/// RewriteInfo - Keep track of machine instrs that will be rewritten
1074/// during spilling.
1075struct RewriteInfo {
1076  unsigned Index;
1077  MachineInstr *MI;
1078  bool HasUse;
1079  bool HasDef;
1080  RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1081    : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1082};
1083
1084struct RewriteInfoCompare {
1085  bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1086    return LHS.Index < RHS.Index;
1087  }
1088};
1089
1090void LiveIntervals::
1091rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1092                    LiveInterval::Ranges::const_iterator &I,
1093                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1094                    unsigned Slot, int LdSlot,
1095                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1096                    VirtRegMap &vrm,
1097                    const TargetRegisterClass* rc,
1098                    SmallVector<int, 4> &ReMatIds,
1099                    const MachineLoopInfo *loopInfo,
1100                    BitVector &SpillMBBs,
1101                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1102                    BitVector &RestoreMBBs,
1103                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1104                    std::map<unsigned,unsigned> &MBBVRegsMap,
1105                    std::vector<LiveInterval*> &NewLIs) {
1106  bool AllCanFold = true;
1107  unsigned NewVReg = 0;
1108  unsigned start = getBaseIndex(I->start);
1109  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1110
1111  // First collect all the def / use in this live range that will be rewritten.
1112  // Make sure they are sorted according instruction index.
1113  std::vector<RewriteInfo> RewriteMIs;
1114  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1115         re = mri_->reg_end(); ri != re; ) {
1116    MachineInstr *MI = &(*ri);
1117    MachineOperand &O = ri.getOperand();
1118    ++ri;
1119    unsigned index = getInstructionIndex(MI);
1120    if (index < start || index >= end)
1121      continue;
1122    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1123  }
1124  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1125
1126  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1127  // Now rewrite the defs and uses.
1128  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1129    RewriteInfo &rwi = RewriteMIs[i];
1130    ++i;
1131    unsigned index = rwi.Index;
1132    bool MIHasUse = rwi.HasUse;
1133    bool MIHasDef = rwi.HasDef;
1134    MachineInstr *MI = rwi.MI;
1135    // If MI def and/or use the same register multiple times, then there
1136    // are multiple entries.
1137    unsigned NumUses = MIHasUse;
1138    while (i != e && RewriteMIs[i].MI == MI) {
1139      assert(RewriteMIs[i].Index == index);
1140      bool isUse = RewriteMIs[i].HasUse;
1141      if (isUse) ++NumUses;
1142      MIHasUse |= isUse;
1143      MIHasDef |= RewriteMIs[i].HasDef;
1144      ++i;
1145    }
1146    MachineBasicBlock *MBB = MI->getParent();
1147
1148    if (ImpUse && MI != ReMatDefMI) {
1149      // Re-matting an instruction with virtual register use. Update the
1150      // register interval's spill weight.
1151      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1152      LiveInterval &ImpLi = getInterval(ImpUse);
1153      ImpLi.weight +=
1154        getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize();
1155    }
1156
1157    unsigned MBBId = MBB->getNumber();
1158    unsigned ThisVReg = 0;
1159    if (TrySplit) {
1160      std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1161      if (NVI != MBBVRegsMap.end()) {
1162        ThisVReg = NVI->second;
1163        // One common case:
1164        // x = use
1165        // ...
1166        // ...
1167        // def = ...
1168        //     = use
1169        // It's better to start a new interval to avoid artifically
1170        // extend the new interval.
1171        if (MIHasDef && !MIHasUse) {
1172          MBBVRegsMap.erase(MBB->getNumber());
1173          ThisVReg = 0;
1174        }
1175      }
1176    }
1177
1178    bool IsNew = ThisVReg == 0;
1179    if (IsNew) {
1180      // This ends the previous live interval. If all of its def / use
1181      // can be folded, give it a low spill weight.
1182      if (NewVReg && TrySplit && AllCanFold) {
1183        LiveInterval &nI = getOrCreateInterval(NewVReg);
1184        nI.weight /= 10.0F;
1185      }
1186      AllCanFold = true;
1187    }
1188    NewVReg = ThisVReg;
1189
1190    bool HasDef = false;
1191    bool HasUse = false;
1192    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1193                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1194                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1195                                CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1196                                ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1197    if (!HasDef && !HasUse)
1198      continue;
1199
1200    AllCanFold &= CanFold;
1201
1202    // Update weight of spill interval.
1203    LiveInterval &nI = getOrCreateInterval(NewVReg);
1204    if (!TrySplit) {
1205      // The spill weight is now infinity as it cannot be spilled again.
1206      nI.weight = HUGE_VALF;
1207      continue;
1208    }
1209
1210    // Keep track of the last def and first use in each MBB.
1211    if (HasDef) {
1212      if (MI != ReMatOrigDefMI || !CanDelete) {
1213        bool HasKill = false;
1214        if (!HasUse)
1215          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1216        else {
1217          // If this is a two-address code, then this index starts a new VNInfo.
1218          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1219          if (VNI)
1220            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1221        }
1222        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1223          SpillIdxes.find(MBBId);
1224        if (!HasKill) {
1225          if (SII == SpillIdxes.end()) {
1226            std::vector<SRInfo> S;
1227            S.push_back(SRInfo(index, NewVReg, true));
1228            SpillIdxes.insert(std::make_pair(MBBId, S));
1229          } else if (SII->second.back().vreg != NewVReg) {
1230            SII->second.push_back(SRInfo(index, NewVReg, true));
1231          } else if ((int)index > SII->second.back().index) {
1232            // If there is an earlier def and this is a two-address
1233            // instruction, then it's not possible to fold the store (which
1234            // would also fold the load).
1235            SRInfo &Info = SII->second.back();
1236            Info.index = index;
1237            Info.canFold = !HasUse;
1238          }
1239          SpillMBBs.set(MBBId);
1240        } else if (SII != SpillIdxes.end() &&
1241                   SII->second.back().vreg == NewVReg &&
1242                   (int)index > SII->second.back().index) {
1243          // There is an earlier def that's not killed (must be two-address).
1244          // The spill is no longer needed.
1245          SII->second.pop_back();
1246          if (SII->second.empty()) {
1247            SpillIdxes.erase(MBBId);
1248            SpillMBBs.reset(MBBId);
1249          }
1250        }
1251      }
1252    }
1253
1254    if (HasUse) {
1255      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1256        SpillIdxes.find(MBBId);
1257      if (SII != SpillIdxes.end() &&
1258          SII->second.back().vreg == NewVReg &&
1259          (int)index > SII->second.back().index)
1260        // Use(s) following the last def, it's not safe to fold the spill.
1261        SII->second.back().canFold = false;
1262      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1263        RestoreIdxes.find(MBBId);
1264      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1265        // If we are splitting live intervals, only fold if it's the first
1266        // use and there isn't another use later in the MBB.
1267        RII->second.back().canFold = false;
1268      else if (IsNew) {
1269        // Only need a reload if there isn't an earlier def / use.
1270        if (RII == RestoreIdxes.end()) {
1271          std::vector<SRInfo> Infos;
1272          Infos.push_back(SRInfo(index, NewVReg, true));
1273          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1274        } else {
1275          RII->second.push_back(SRInfo(index, NewVReg, true));
1276        }
1277        RestoreMBBs.set(MBBId);
1278      }
1279    }
1280
1281    // Update spill weight.
1282    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1283    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1284  }
1285
1286  if (NewVReg && TrySplit && AllCanFold) {
1287    // If all of its def / use can be folded, give it a low spill weight.
1288    LiveInterval &nI = getOrCreateInterval(NewVReg);
1289    nI.weight /= 10.0F;
1290  }
1291}
1292
1293bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1294                        BitVector &RestoreMBBs,
1295                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1296  if (!RestoreMBBs[Id])
1297    return false;
1298  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1299  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1300    if (Restores[i].index == index &&
1301        Restores[i].vreg == vr &&
1302        Restores[i].canFold)
1303      return true;
1304  return false;
1305}
1306
1307void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1308                        BitVector &RestoreMBBs,
1309                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1310  if (!RestoreMBBs[Id])
1311    return;
1312  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1313  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1314    if (Restores[i].index == index && Restores[i].vreg)
1315      Restores[i].index = -1;
1316}
1317
1318
1319std::vector<LiveInterval*> LiveIntervals::
1320addIntervalsForSpills(const LiveInterval &li,
1321                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1322  // Since this is called after the analysis is done we don't know if
1323  // LiveVariables is available
1324  lv_ = getAnalysisToUpdate<LiveVariables>();
1325
1326  assert(li.weight != HUGE_VALF &&
1327         "attempt to spill already spilled interval!");
1328
1329  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1330  li.print(DOUT, tri_);
1331  DOUT << '\n';
1332
1333  // Each bit specify whether it a spill is required in the MBB.
1334  BitVector SpillMBBs(mf_->getNumBlockIDs());
1335  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1336  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1337  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1338  std::map<unsigned,unsigned> MBBVRegsMap;
1339  std::vector<LiveInterval*> NewLIs;
1340  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1341
1342  unsigned NumValNums = li.getNumValNums();
1343  SmallVector<MachineInstr*, 4> ReMatDefs;
1344  ReMatDefs.resize(NumValNums, NULL);
1345  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1346  ReMatOrigDefs.resize(NumValNums, NULL);
1347  SmallVector<int, 4> ReMatIds;
1348  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1349  BitVector ReMatDelete(NumValNums);
1350  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1351
1352  // Spilling a split live interval. It cannot be split any further. Also,
1353  // it's also guaranteed to be a single val# / range interval.
1354  if (vrm.getPreSplitReg(li.reg)) {
1355    vrm.setIsSplitFromReg(li.reg, 0);
1356    // Unset the split kill marker on the last use.
1357    unsigned KillIdx = vrm.getKillPoint(li.reg);
1358    if (KillIdx) {
1359      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1360      assert(KillMI && "Last use disappeared?");
1361      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1362      assert(KillOp != -1 && "Last use disappeared?");
1363      KillMI->getOperand(KillOp).setIsKill(false);
1364    }
1365    vrm.removeKillPoint(li.reg);
1366    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1367    Slot = vrm.getStackSlot(li.reg);
1368    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1369    MachineInstr *ReMatDefMI = DefIsReMat ?
1370      vrm.getReMaterializedMI(li.reg) : NULL;
1371    int LdSlot = 0;
1372    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1373    bool isLoad = isLoadSS ||
1374      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1375    bool IsFirstRange = true;
1376    for (LiveInterval::Ranges::const_iterator
1377           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1378      // If this is a split live interval with multiple ranges, it means there
1379      // are two-address instructions that re-defined the value. Only the
1380      // first def can be rematerialized!
1381      if (IsFirstRange) {
1382        // Note ReMatOrigDefMI has already been deleted.
1383        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1384                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1385                             false, vrm, rc, ReMatIds, loopInfo,
1386                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1387                             MBBVRegsMap, NewLIs);
1388      } else {
1389        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1390                             Slot, 0, false, false, false,
1391                             false, vrm, rc, ReMatIds, loopInfo,
1392                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1393                             MBBVRegsMap, NewLIs);
1394      }
1395      IsFirstRange = false;
1396    }
1397    return NewLIs;
1398  }
1399
1400  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1401  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1402    TrySplit = false;
1403  if (TrySplit)
1404    ++numSplits;
1405  bool NeedStackSlot = false;
1406  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1407       i != e; ++i) {
1408    const VNInfo *VNI = *i;
1409    unsigned VN = VNI->id;
1410    unsigned DefIdx = VNI->def;
1411    if (DefIdx == ~1U)
1412      continue; // Dead val#.
1413    // Is the def for the val# rematerializable?
1414    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1415      ? 0 : getInstructionFromIndex(DefIdx);
1416    bool dummy;
1417    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1418      // Remember how to remat the def of this val#.
1419      ReMatOrigDefs[VN] = ReMatDefMI;
1420      // Original def may be modified so we have to make a copy here. vrm must
1421      // delete these!
1422      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1423
1424      bool CanDelete = true;
1425      if (VNI->hasPHIKill) {
1426        // A kill is a phi node, not all of its uses can be rematerialized.
1427        // It must not be deleted.
1428        CanDelete = false;
1429        // Need a stack slot if there is any live range where uses cannot be
1430        // rematerialized.
1431        NeedStackSlot = true;
1432      }
1433      if (CanDelete)
1434        ReMatDelete.set(VN);
1435    } else {
1436      // Need a stack slot if there is any live range where uses cannot be
1437      // rematerialized.
1438      NeedStackSlot = true;
1439    }
1440  }
1441
1442  // One stack slot per live interval.
1443  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1444    Slot = vrm.assignVirt2StackSlot(li.reg);
1445
1446  // Create new intervals and rewrite defs and uses.
1447  for (LiveInterval::Ranges::const_iterator
1448         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1449    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1450    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1451    bool DefIsReMat = ReMatDefMI != NULL;
1452    bool CanDelete = ReMatDelete[I->valno->id];
1453    int LdSlot = 0;
1454    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1455    bool isLoad = isLoadSS ||
1456      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1457    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1458                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1459                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1460                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1461                               MBBVRegsMap, NewLIs);
1462  }
1463
1464  // Insert spills / restores if we are splitting.
1465  if (!TrySplit)
1466    return NewLIs;
1467
1468  SmallPtrSet<LiveInterval*, 4> AddedKill;
1469  SmallVector<unsigned, 2> Ops;
1470  if (NeedStackSlot) {
1471    int Id = SpillMBBs.find_first();
1472    while (Id != -1) {
1473      std::vector<SRInfo> &spills = SpillIdxes[Id];
1474      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1475        int index = spills[i].index;
1476        unsigned VReg = spills[i].vreg;
1477        LiveInterval &nI = getOrCreateInterval(VReg);
1478        bool isReMat = vrm.isReMaterialized(VReg);
1479        MachineInstr *MI = getInstructionFromIndex(index);
1480        bool CanFold = false;
1481        bool FoundUse = false;
1482        Ops.clear();
1483        if (spills[i].canFold) {
1484          CanFold = true;
1485          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1486            MachineOperand &MO = MI->getOperand(j);
1487            if (!MO.isRegister() || MO.getReg() != VReg)
1488              continue;
1489
1490            Ops.push_back(j);
1491            if (MO.isDef())
1492              continue;
1493            if (isReMat ||
1494                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1495                                                RestoreMBBs, RestoreIdxes))) {
1496              // MI has two-address uses of the same register. If the use
1497              // isn't the first and only use in the BB, then we can't fold
1498              // it. FIXME: Move this to rewriteInstructionsForSpills.
1499              CanFold = false;
1500              break;
1501            }
1502            FoundUse = true;
1503          }
1504        }
1505        // Fold the store into the def if possible.
1506        bool Folded = false;
1507        if (CanFold && !Ops.empty()) {
1508          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1509            Folded = true;
1510            if (FoundUse > 0) {
1511              // Also folded uses, do not issue a load.
1512              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1513              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1514            }
1515            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1516          }
1517        }
1518
1519        // Else tell the spiller to issue a spill.
1520        if (!Folded) {
1521          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1522          bool isKill = LR->end == getStoreIndex(index);
1523          vrm.addSpillPoint(VReg, isKill, MI);
1524          if (isKill)
1525            AddedKill.insert(&nI);
1526        }
1527      }
1528      Id = SpillMBBs.find_next(Id);
1529    }
1530  }
1531
1532  int Id = RestoreMBBs.find_first();
1533  while (Id != -1) {
1534    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1535    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1536      int index = restores[i].index;
1537      if (index == -1)
1538        continue;
1539      unsigned VReg = restores[i].vreg;
1540      LiveInterval &nI = getOrCreateInterval(VReg);
1541      MachineInstr *MI = getInstructionFromIndex(index);
1542      bool CanFold = false;
1543      Ops.clear();
1544      if (restores[i].canFold) {
1545        CanFold = true;
1546        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1547          MachineOperand &MO = MI->getOperand(j);
1548          if (!MO.isRegister() || MO.getReg() != VReg)
1549            continue;
1550
1551          if (MO.isDef()) {
1552            // If this restore were to be folded, it would have been folded
1553            // already.
1554            CanFold = false;
1555            break;
1556          }
1557          Ops.push_back(j);
1558        }
1559      }
1560
1561      // Fold the load into the use if possible.
1562      bool Folded = false;
1563      if (CanFold && !Ops.empty()) {
1564        if (!vrm.isReMaterialized(VReg))
1565          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1566        else {
1567          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1568          int LdSlot = 0;
1569          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1570          // If the rematerializable def is a load, also try to fold it.
1571          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1572            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1573                                          Ops, isLoadSS, LdSlot, VReg);
1574          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1575          if (ImpUse) {
1576            // Re-matting an instruction with virtual register use. Add the
1577            // register as an implicit use on the use MI and update the register
1578            // interval's spill weight.
1579            unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1580            LiveInterval &ImpLi = getInterval(ImpUse);
1581            ImpLi.weight +=
1582              getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
1583
1584            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1585          }
1586        }
1587      }
1588      // If folding is not possible / failed, then tell the spiller to issue a
1589      // load / rematerialization for us.
1590      if (Folded)
1591        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1592      else
1593        vrm.addRestorePoint(VReg, MI);
1594    }
1595    Id = RestoreMBBs.find_next(Id);
1596  }
1597
1598  // Finalize intervals: add kills, finalize spill weights, and filter out
1599  // dead intervals.
1600  std::vector<LiveInterval*> RetNewLIs;
1601  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1602    LiveInterval *LI = NewLIs[i];
1603    if (!LI->empty()) {
1604      LI->weight /= LI->getSize();
1605      if (!AddedKill.count(LI)) {
1606        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1607        unsigned LastUseIdx = getBaseIndex(LR->end);
1608        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1609        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1610        assert(UseIdx != -1);
1611        if (LastUse->getOperand(UseIdx).isImplicit() ||
1612            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1613          LastUse->getOperand(UseIdx).setIsKill();
1614          vrm.addKillPoint(LI->reg, LastUseIdx);
1615        }
1616      }
1617      RetNewLIs.push_back(LI);
1618    }
1619  }
1620
1621  return RetNewLIs;
1622}
1623
1624/// hasAllocatableSuperReg - Return true if the specified physical register has
1625/// any super register that's allocatable.
1626bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1627  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1628    if (allocatableRegs_[*AS] && hasInterval(*AS))
1629      return true;
1630  return false;
1631}
1632
1633/// getRepresentativeReg - Find the largest super register of the specified
1634/// physical register.
1635unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1636  // Find the largest super-register that is allocatable.
1637  unsigned BestReg = Reg;
1638  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1639    unsigned SuperReg = *AS;
1640    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1641      BestReg = SuperReg;
1642      break;
1643    }
1644  }
1645  return BestReg;
1646}
1647
1648/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1649/// specified interval that conflicts with the specified physical register.
1650unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1651                                                   unsigned PhysReg) const {
1652  unsigned NumConflicts = 0;
1653  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1654  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1655         E = mri_->reg_end(); I != E; ++I) {
1656    MachineOperand &O = I.getOperand();
1657    MachineInstr *MI = O.getParent();
1658    unsigned Index = getInstructionIndex(MI);
1659    if (pli.liveAt(Index))
1660      ++NumConflicts;
1661  }
1662  return NumConflicts;
1663}
1664
1665/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1666/// around all defs and uses of the specified interval.
1667void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1668                                            unsigned PhysReg, VirtRegMap &vrm) {
1669  unsigned SpillReg = getRepresentativeReg(PhysReg);
1670
1671  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1672    // If there are registers which alias PhysReg, but which are not a
1673    // sub-register of the chosen representative super register. Assert
1674    // since we can't handle it yet.
1675    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1676           tri_->isSuperRegister(*AS, SpillReg));
1677
1678  LiveInterval &pli = getInterval(SpillReg);
1679  SmallPtrSet<MachineInstr*, 8> SeenMIs;
1680  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1681         E = mri_->reg_end(); I != E; ++I) {
1682    MachineOperand &O = I.getOperand();
1683    MachineInstr *MI = O.getParent();
1684    if (SeenMIs.count(MI))
1685      continue;
1686    SeenMIs.insert(MI);
1687    unsigned Index = getInstructionIndex(MI);
1688    if (pli.liveAt(Index)) {
1689      vrm.addEmergencySpill(SpillReg, MI);
1690      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1691      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1692        if (!hasInterval(*AS))
1693          continue;
1694        LiveInterval &spli = getInterval(*AS);
1695        if (spli.liveAt(Index))
1696          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1697      }
1698    }
1699  }
1700}
1701