LiveIntervalAnalysis.cpp revision c95f075c43488234e44b1213dfd762ae0a7509ea
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82namespace llvm {
83  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
84    return V < IM.first;
85  }
86
87  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
88    return IM.first < V;
89  }
90
91  struct Idx2MBBCompare {
92    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
93      return LHS.first < RHS.first;
94    }
95  };
96}
97
98/// runOnMachineFunction - Register allocate the whole function
99///
100bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
101  mf_ = &fn;
102  tm_ = &fn.getTarget();
103  mri_ = tm_->getRegisterInfo();
104  tii_ = tm_->getInstrInfo();
105  lv_ = &getAnalysis<LiveVariables>();
106  allocatableRegs_ = mri_->getAllocatableSet(fn);
107
108  // Number MachineInstrs and MachineBasicBlocks.
109  // Initialize MBB indexes to a sentinal.
110  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111
112  unsigned MIIndex = 0;
113  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114       MBB != E; ++MBB) {
115    unsigned StartIdx = MIIndex;
116
117    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
118         I != E; ++I) {
119      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
120      assert(inserted && "multiple MachineInstr -> index mappings");
121      i2miMap_.push_back(I);
122      MIIndex += InstrSlots::NUM;
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  computeIntervals();
132
133  numIntervals += getNumIntervals();
134
135  DOUT << "********** INTERVALS **********\n";
136  for (iterator I = begin(), E = end(); I != E; ++I) {
137    I->second.print(DOUT, mri_);
138    DOUT << "\n";
139  }
140
141  numIntervalsAfter += getNumIntervals();
142  DEBUG(dump());
143  return true;
144}
145
146/// print - Implement the dump method.
147void LiveIntervals::print(std::ostream &O, const Module* ) const {
148  O << "********** INTERVALS **********\n";
149  for (const_iterator I = begin(), E = end(); I != E; ++I) {
150    I->second.print(DOUT, mri_);
151    DOUT << "\n";
152  }
153
154  O << "********** MACHINEINSTRS **********\n";
155  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
156       mbbi != mbbe; ++mbbi) {
157    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
158    for (MachineBasicBlock::iterator mii = mbbi->begin(),
159           mie = mbbi->end(); mii != mie; ++mii) {
160      O << getInstructionIndex(mii) << '\t' << *mii;
161    }
162  }
163}
164
165/// conflictsWithPhysRegDef - Returns true if the specified register
166/// is defined during the duration of the specified interval.
167bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
168                                            VirtRegMap &vrm, unsigned reg) {
169  for (LiveInterval::Ranges::const_iterator
170         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
171    for (unsigned index = getBaseIndex(I->start),
172           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
173         index += InstrSlots::NUM) {
174      // skip deleted instructions
175      while (index != end && !getInstructionFromIndex(index))
176        index += InstrSlots::NUM;
177      if (index == end) break;
178
179      MachineInstr *MI = getInstructionFromIndex(index);
180      unsigned SrcReg, DstReg;
181      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
182        if (SrcReg == li.reg || DstReg == li.reg)
183          continue;
184      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
185        MachineOperand& mop = MI->getOperand(i);
186        if (!mop.isRegister())
187          continue;
188        unsigned PhysReg = mop.getReg();
189        if (PhysReg == 0 || PhysReg == li.reg)
190          continue;
191        if (MRegisterInfo::isVirtualRegister(PhysReg)) {
192          if (!vrm.hasPhys(PhysReg))
193            continue;
194          PhysReg = vrm.getPhys(PhysReg);
195        }
196        if (PhysReg && mri_->regsOverlap(PhysReg, reg))
197          return true;
198      }
199    }
200  }
201
202  return false;
203}
204
205void LiveIntervals::printRegName(unsigned reg) const {
206  if (MRegisterInfo::isPhysicalRegister(reg))
207    cerr << mri_->getName(reg);
208  else
209    cerr << "%reg" << reg;
210}
211
212void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
213                                             MachineBasicBlock::iterator mi,
214                                             unsigned MIIdx,
215                                             LiveInterval &interval) {
216  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
217  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
218
219  // Virtual registers may be defined multiple times (due to phi
220  // elimination and 2-addr elimination).  Much of what we do only has to be
221  // done once for the vreg.  We use an empty interval to detect the first
222  // time we see a vreg.
223  if (interval.empty()) {
224    // Get the Idx of the defining instructions.
225    unsigned defIndex = getDefIndex(MIIdx);
226    VNInfo *ValNo;
227    unsigned SrcReg, DstReg;
228    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
229      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
230    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
231      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
232                                    VNInfoAllocator);
233    else
234      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
235
236    assert(ValNo->id == 0 && "First value in interval is not 0?");
237
238    // Loop over all of the blocks that the vreg is defined in.  There are
239    // two cases we have to handle here.  The most common case is a vreg
240    // whose lifetime is contained within a basic block.  In this case there
241    // will be a single kill, in MBB, which comes after the definition.
242    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
243      // FIXME: what about dead vars?
244      unsigned killIdx;
245      if (vi.Kills[0] != mi)
246        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
247      else
248        killIdx = defIndex+1;
249
250      // If the kill happens after the definition, we have an intra-block
251      // live range.
252      if (killIdx > defIndex) {
253        assert(vi.AliveBlocks.none() &&
254               "Shouldn't be alive across any blocks!");
255        LiveRange LR(defIndex, killIdx, ValNo);
256        interval.addRange(LR);
257        DOUT << " +" << LR << "\n";
258        interval.addKill(ValNo, killIdx);
259        return;
260      }
261    }
262
263    // The other case we handle is when a virtual register lives to the end
264    // of the defining block, potentially live across some blocks, then is
265    // live into some number of blocks, but gets killed.  Start by adding a
266    // range that goes from this definition to the end of the defining block.
267    LiveRange NewLR(defIndex,
268                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
269                    ValNo);
270    DOUT << " +" << NewLR;
271    interval.addRange(NewLR);
272
273    // Iterate over all of the blocks that the variable is completely
274    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
275    // live interval.
276    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
277      if (vi.AliveBlocks[i]) {
278        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
279        if (!MBB->empty()) {
280          LiveRange LR(getMBBStartIdx(i),
281                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
282                       ValNo);
283          interval.addRange(LR);
284          DOUT << " +" << LR;
285        }
286      }
287    }
288
289    // Finally, this virtual register is live from the start of any killing
290    // block to the 'use' slot of the killing instruction.
291    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
292      MachineInstr *Kill = vi.Kills[i];
293      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
294      LiveRange LR(getMBBStartIdx(Kill->getParent()),
295                   killIdx, ValNo);
296      interval.addRange(LR);
297      interval.addKill(ValNo, killIdx);
298      DOUT << " +" << LR;
299    }
300
301  } else {
302    // If this is the second time we see a virtual register definition, it
303    // must be due to phi elimination or two addr elimination.  If this is
304    // the result of two address elimination, then the vreg is one of the
305    // def-and-use register operand.
306    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
307      // If this is a two-address definition, then we have already processed
308      // the live range.  The only problem is that we didn't realize there
309      // are actually two values in the live interval.  Because of this we
310      // need to take the LiveRegion that defines this register and split it
311      // into two values.
312      MachineRegisterInfo& MRI = mbb->getParent()->getRegInfo();
313      unsigned lowIndex = ~0U;
314      for (MachineRegisterInfo::def_iterator DI = MRI.def_begin(interval.reg),
315          DE = MRI.def_end(); DI != DE; ++DI)
316        if (getInstructionIndex(&*DI) < lowIndex)
317          lowIndex = getInstructionIndex(&*DI);
318
319      unsigned DefIndex = getDefIndex(lowIndex);
320      unsigned RedefIndex = getDefIndex(MIIdx);
321
322      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
323      VNInfo *OldValNo = OldLR->valno;
324      unsigned OldEnd = OldLR->end;
325
326      // Delete the initial value, which should be short and continuous,
327      // because the 2-addr copy must be in the same MBB as the redef.
328      interval.removeRange(DefIndex, RedefIndex);
329
330      // Two-address vregs should always only be redefined once.  This means
331      // that at this point, there should be exactly one value number in it.
332      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
333
334      // The new value number (#1) is defined by the instruction we claimed
335      // defined value #0.
336      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
337      interval.copyValNumInfo(ValNo, OldValNo);
338
339      // Value#0 is now defined by the 2-addr instruction.
340      OldValNo->def = RedefIndex;
341      OldValNo->reg = 0;
342
343      // Add the new live interval which replaces the range for the input copy.
344      LiveRange LR(DefIndex, RedefIndex, ValNo);
345      DOUT << " replace range with " << LR;
346      interval.addRange(LR);
347      interval.addKill(ValNo, RedefIndex);
348      interval.removeKills(ValNo, RedefIndex, OldEnd);
349
350      // If this redefinition is dead, we need to add a dummy unit live
351      // range covering the def slot.
352      if (lv_->RegisterDefIsDead(mi, interval.reg))
353        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
354
355      DOUT << " RESULT: ";
356      interval.print(DOUT, mri_);
357
358    } else {
359      // Otherwise, this must be because of phi elimination.  If this is the
360      // first redefinition of the vreg that we have seen, go back and change
361      // the live range in the PHI block to be a different value number.
362      if (interval.containsOneValue()) {
363        assert(vi.Kills.size() == 1 &&
364               "PHI elimination vreg should have one kill, the PHI itself!");
365
366        // Remove the old range that we now know has an incorrect number.
367        VNInfo *VNI = interval.getValNumInfo(0);
368        MachineInstr *Killer = vi.Kills[0];
369        unsigned Start = getMBBStartIdx(Killer->getParent());
370        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
371        DOUT << " Removing [" << Start << "," << End << "] from: ";
372        interval.print(DOUT, mri_); DOUT << "\n";
373        interval.removeRange(Start, End);
374        interval.addKill(VNI, Start);
375        VNI->hasPHIKill = true;
376        DOUT << " RESULT: "; interval.print(DOUT, mri_);
377
378        // Replace the interval with one of a NEW value number.  Note that this
379        // value number isn't actually defined by an instruction, weird huh? :)
380        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
381        DOUT << " replace range with " << LR;
382        interval.addRange(LR);
383        interval.addKill(LR.valno, End);
384        DOUT << " RESULT: "; interval.print(DOUT, mri_);
385      }
386
387      // In the case of PHI elimination, each variable definition is only
388      // live until the end of the block.  We've already taken care of the
389      // rest of the live range.
390      unsigned defIndex = getDefIndex(MIIdx);
391
392      VNInfo *ValNo;
393      unsigned SrcReg, DstReg;
394      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
395        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
396      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
397        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
398                                      VNInfoAllocator);
399      else
400        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
401
402      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
403      LiveRange LR(defIndex, killIndex, ValNo);
404      interval.addRange(LR);
405      interval.addKill(ValNo, killIndex);
406      ValNo->hasPHIKill = true;
407      DOUT << " +" << LR;
408    }
409  }
410
411  DOUT << '\n';
412}
413
414void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
415                                              MachineBasicBlock::iterator mi,
416                                              unsigned MIIdx,
417                                              LiveInterval &interval,
418                                              unsigned SrcReg) {
419  // A physical register cannot be live across basic block, so its
420  // lifetime must end somewhere in its defining basic block.
421  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
422
423  unsigned baseIndex = MIIdx;
424  unsigned start = getDefIndex(baseIndex);
425  unsigned end = start;
426
427  // If it is not used after definition, it is considered dead at
428  // the instruction defining it. Hence its interval is:
429  // [defSlot(def), defSlot(def)+1)
430  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
431    DOUT << " dead";
432    end = getDefIndex(start) + 1;
433    goto exit;
434  }
435
436  // If it is not dead on definition, it must be killed by a
437  // subsequent instruction. Hence its interval is:
438  // [defSlot(def), useSlot(kill)+1)
439  while (++mi != MBB->end()) {
440    baseIndex += InstrSlots::NUM;
441    if (lv_->KillsRegister(mi, interval.reg)) {
442      DOUT << " killed";
443      end = getUseIndex(baseIndex) + 1;
444      goto exit;
445    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
446      // Another instruction redefines the register before it is ever read.
447      // Then the register is essentially dead at the instruction that defines
448      // it. Hence its interval is:
449      // [defSlot(def), defSlot(def)+1)
450      DOUT << " dead";
451      end = getDefIndex(start) + 1;
452      goto exit;
453    }
454  }
455
456  // The only case we should have a dead physreg here without a killing or
457  // instruction where we know it's dead is if it is live-in to the function
458  // and never used.
459  assert(!SrcReg && "physreg was not killed in defining block!");
460  end = getDefIndex(start) + 1;  // It's dead.
461
462exit:
463  assert(start < end && "did not find end of interval?");
464
465  // Already exists? Extend old live interval.
466  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
467  VNInfo *ValNo = (OldLR != interval.end())
468    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
469  LiveRange LR(start, end, ValNo);
470  interval.addRange(LR);
471  interval.addKill(LR.valno, end);
472  DOUT << " +" << LR << '\n';
473}
474
475void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
476                                      MachineBasicBlock::iterator MI,
477                                      unsigned MIIdx,
478                                      unsigned reg) {
479  if (MRegisterInfo::isVirtualRegister(reg))
480    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
481  else if (allocatableRegs_[reg]) {
482    unsigned SrcReg, DstReg;
483    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
484      SrcReg = MI->getOperand(1).getReg();
485    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
486      SrcReg = 0;
487    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
488    // Def of a register also defines its sub-registers.
489    for (const unsigned* AS = mri_->getSubRegisters(reg); *AS; ++AS)
490      // Avoid processing some defs more than once.
491      if (!MI->findRegisterDefOperand(*AS))
492        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
493  }
494}
495
496void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
497                                         unsigned MIIdx,
498                                         LiveInterval &interval, bool isAlias) {
499  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
500
501  // Look for kills, if it reaches a def before it's killed, then it shouldn't
502  // be considered a livein.
503  MachineBasicBlock::iterator mi = MBB->begin();
504  unsigned baseIndex = MIIdx;
505  unsigned start = baseIndex;
506  unsigned end = start;
507  while (mi != MBB->end()) {
508    if (lv_->KillsRegister(mi, interval.reg)) {
509      DOUT << " killed";
510      end = getUseIndex(baseIndex) + 1;
511      goto exit;
512    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
513      // Another instruction redefines the register before it is ever read.
514      // Then the register is essentially dead at the instruction that defines
515      // it. Hence its interval is:
516      // [defSlot(def), defSlot(def)+1)
517      DOUT << " dead";
518      end = getDefIndex(start) + 1;
519      goto exit;
520    }
521
522    baseIndex += InstrSlots::NUM;
523    ++mi;
524  }
525
526exit:
527  // Live-in register might not be used at all.
528  if (end == MIIdx) {
529    if (isAlias) {
530      DOUT << " dead";
531      end = getDefIndex(MIIdx) + 1;
532    } else {
533      DOUT << " live through";
534      end = baseIndex;
535    }
536  }
537
538  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
539  interval.addRange(LR);
540  interval.addKill(LR.valno, end);
541  DOUT << " +" << LR << '\n';
542}
543
544/// computeIntervals - computes the live intervals for virtual
545/// registers. for some ordering of the machine instructions [1,N] a
546/// live interval is an interval [i, j) where 1 <= i <= j < N for
547/// which a variable is live
548void LiveIntervals::computeIntervals() {
549  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
550       << "********** Function: "
551       << ((Value*)mf_->getFunction())->getName() << '\n';
552  // Track the index of the current machine instr.
553  unsigned MIIndex = 0;
554  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
555       MBBI != E; ++MBBI) {
556    MachineBasicBlock *MBB = MBBI;
557    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
558
559    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
560
561    // Create intervals for live-ins to this BB first.
562    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
563           LE = MBB->livein_end(); LI != LE; ++LI) {
564      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
565      // Multiple live-ins can alias the same register.
566      for (const unsigned* AS = mri_->getSubRegisters(*LI); *AS; ++AS)
567        if (!hasInterval(*AS))
568          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
569                               true);
570    }
571
572    for (; MI != miEnd; ++MI) {
573      DOUT << MIIndex << "\t" << *MI;
574
575      // Handle defs.
576      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
577        MachineOperand &MO = MI->getOperand(i);
578        // handle register defs - build intervals
579        if (MO.isRegister() && MO.getReg() && MO.isDef())
580          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
581      }
582
583      MIIndex += InstrSlots::NUM;
584    }
585  }
586}
587
588bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
589                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
590  std::vector<IdxMBBPair>::const_iterator I =
591    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
592
593  bool ResVal = false;
594  while (I != Idx2MBBMap.end()) {
595    if (LR.end <= I->first)
596      break;
597    MBBs.push_back(I->second);
598    ResVal = true;
599    ++I;
600  }
601  return ResVal;
602}
603
604
605LiveInterval LiveIntervals::createInterval(unsigned reg) {
606  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
607                       HUGE_VALF : 0.0F;
608  return LiveInterval(reg, Weight);
609}
610
611
612//===----------------------------------------------------------------------===//
613// Register allocator hooks.
614//
615
616/// isReMaterializable - Returns true if the definition MI of the specified
617/// val# of the specified interval is re-materializable.
618bool LiveIntervals::isReMaterializable(const LiveInterval &li,
619                                       const VNInfo *ValNo, MachineInstr *MI,
620                                       bool &isLoad) {
621  if (DisableReMat)
622    return false;
623
624  isLoad = false;
625  const TargetInstrDesc &TID = MI->getDesc();
626  if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) {
627    isLoad = TID.isSimpleLoad();
628    return true;
629  }
630
631  int FrameIdx = 0;
632  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
633      !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
634    return false;
635
636  // This is a load from fixed stack slot. It can be rematerialized unless it's
637  // re-defined by a two-address instruction.
638  isLoad = true;
639  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
640       i != e; ++i) {
641    const VNInfo *VNI = *i;
642    if (VNI == ValNo)
643      continue;
644    unsigned DefIdx = VNI->def;
645    if (DefIdx == ~1U)
646      continue; // Dead val#.
647    MachineInstr *DefMI = (DefIdx == ~0u)
648      ? NULL : getInstructionFromIndex(DefIdx);
649    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
650      isLoad = false;
651      return false;
652    }
653  }
654  return true;
655}
656
657/// isReMaterializable - Returns true if every definition of MI of every
658/// val# of the specified interval is re-materializable.
659bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
660  isLoad = false;
661  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
662       i != e; ++i) {
663    const VNInfo *VNI = *i;
664    unsigned DefIdx = VNI->def;
665    if (DefIdx == ~1U)
666      continue; // Dead val#.
667    // Is the def for the val# rematerializable?
668    if (DefIdx == ~0u)
669      return false;
670    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
671    bool DefIsLoad = false;
672    if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
673      return false;
674    isLoad |= DefIsLoad;
675  }
676  return true;
677}
678
679/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
680/// slot / to reg or any rematerialized load into ith operand of specified
681/// MI. If it is successul, MI is updated with the newly created MI and
682/// returns true.
683bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
684                                         VirtRegMap &vrm, MachineInstr *DefMI,
685                                         unsigned InstrIdx,
686                                         SmallVector<unsigned, 2> &Ops,
687                                         bool isSS, int Slot, unsigned Reg) {
688  unsigned MRInfo = 0;
689  const TargetInstrDesc &TID = MI->getDesc();
690  // If it is an implicit def instruction, just delete it.
691  if (TID.isImplicitDef()) {
692    RemoveMachineInstrFromMaps(MI);
693    vrm.RemoveMachineInstrFromMaps(MI);
694    MI->eraseFromParent();
695    ++numFolds;
696    return true;
697  }
698
699  SmallVector<unsigned, 2> FoldOps;
700  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
701    unsigned OpIdx = Ops[i];
702    // FIXME: fold subreg use.
703    if (MI->getOperand(OpIdx).getSubReg())
704      return false;
705    if (MI->getOperand(OpIdx).isDef())
706      MRInfo |= (unsigned)VirtRegMap::isMod;
707    else {
708      // Filter out two-address use operand(s).
709      if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
710        MRInfo = VirtRegMap::isModRef;
711        continue;
712      }
713      MRInfo |= (unsigned)VirtRegMap::isRef;
714    }
715    FoldOps.push_back(OpIdx);
716  }
717
718  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
719                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
720  if (fmi) {
721    // Attempt to fold the memory reference into the instruction. If
722    // we can do this, we don't need to insert spill code.
723    if (lv_)
724      lv_->instructionChanged(MI, fmi);
725    else
726      LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
727    MachineBasicBlock &MBB = *MI->getParent();
728    if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
729      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
730    vrm.transferSpillPts(MI, fmi);
731    vrm.transferRestorePts(MI, fmi);
732    mi2iMap_.erase(MI);
733    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
734    mi2iMap_[fmi] = InstrIdx;
735    MI = MBB.insert(MBB.erase(MI), fmi);
736    ++numFolds;
737    return true;
738  }
739  return false;
740}
741
742/// canFoldMemoryOperand - Returns true if the specified load / store
743/// folding is possible.
744bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
745                                         SmallVector<unsigned, 2> &Ops) const {
746  SmallVector<unsigned, 2> FoldOps;
747  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
748    unsigned OpIdx = Ops[i];
749    // FIXME: fold subreg use.
750    if (MI->getOperand(OpIdx).getSubReg())
751      return false;
752    FoldOps.push_back(OpIdx);
753  }
754
755  return tii_->canFoldMemoryOperand(MI, FoldOps);
756}
757
758bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
759  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
760  for (LiveInterval::Ranges::const_iterator
761         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
762    std::vector<IdxMBBPair>::const_iterator II =
763      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
764    if (II == Idx2MBBMap.end())
765      continue;
766    if (I->end > II->first)  // crossing a MBB.
767      return false;
768    MBBs.insert(II->second);
769    if (MBBs.size() > 1)
770      return false;
771  }
772  return true;
773}
774
775/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
776/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
777bool LiveIntervals::
778rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
779                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
780                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
781                 unsigned Slot, int LdSlot,
782                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
783                 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
784                 const TargetRegisterClass* rc,
785                 SmallVector<int, 4> &ReMatIds,
786                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
787                 const MachineLoopInfo *loopInfo,
788                 std::map<unsigned,unsigned> &MBBVRegsMap,
789                 std::vector<LiveInterval*> &NewLIs) {
790  bool CanFold = false;
791 RestartInstruction:
792  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
793    MachineOperand& mop = MI->getOperand(i);
794    if (!mop.isRegister())
795      continue;
796    unsigned Reg = mop.getReg();
797    unsigned RegI = Reg;
798    if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg))
799      continue;
800    if (Reg != li.reg)
801      continue;
802
803    bool TryFold = !DefIsReMat;
804    bool FoldSS = true; // Default behavior unless it's a remat.
805    int FoldSlot = Slot;
806    if (DefIsReMat) {
807      // If this is the rematerializable definition MI itself and
808      // all of its uses are rematerialized, simply delete it.
809      if (MI == ReMatOrigDefMI && CanDelete) {
810        DOUT << "\t\t\t\tErasing re-materlizable def: ";
811        DOUT << MI << '\n';
812        RemoveMachineInstrFromMaps(MI);
813        vrm.RemoveMachineInstrFromMaps(MI);
814        MI->eraseFromParent();
815        break;
816      }
817
818      // If def for this use can't be rematerialized, then try folding.
819      // If def is rematerializable and it's a load, also try folding.
820      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
821      if (isLoad) {
822        // Try fold loads (from stack slot, constant pool, etc.) into uses.
823        FoldSS = isLoadSS;
824        FoldSlot = LdSlot;
825      }
826    }
827
828    // Scan all of the operands of this instruction rewriting operands
829    // to use NewVReg instead of li.reg as appropriate.  We do this for
830    // two reasons:
831    //
832    //   1. If the instr reads the same spilled vreg multiple times, we
833    //      want to reuse the NewVReg.
834    //   2. If the instr is a two-addr instruction, we are required to
835    //      keep the src/dst regs pinned.
836    //
837    // Keep track of whether we replace a use and/or def so that we can
838    // create the spill interval with the appropriate range.
839
840    HasUse = mop.isUse();
841    HasDef = mop.isDef();
842    SmallVector<unsigned, 2> Ops;
843    Ops.push_back(i);
844    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
845      const MachineOperand &MOj = MI->getOperand(j);
846      if (!MOj.isRegister())
847        continue;
848      unsigned RegJ = MOj.getReg();
849      if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
850        continue;
851      if (RegJ == RegI) {
852        Ops.push_back(j);
853        HasUse |= MOj.isUse();
854        HasDef |= MOj.isDef();
855      }
856    }
857
858    if (TryFold) {
859      // Do not fold load / store here if we are splitting. We'll find an
860      // optimal point to insert a load / store later.
861      if (!TrySplit) {
862        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
863                                 Ops, FoldSS, FoldSlot, Reg)) {
864          // Folding the load/store can completely change the instruction in
865          // unpredictable ways, rescan it from the beginning.
866          HasUse = false;
867          HasDef = false;
868          CanFold = false;
869          goto RestartInstruction;
870        }
871      } else {
872        CanFold = canFoldMemoryOperand(MI, Ops);
873      }
874    } else
875      CanFold = false;
876
877    // Create a new virtual register for the spill interval.
878    bool CreatedNewVReg = false;
879    if (NewVReg == 0) {
880      NewVReg = RegInfo.createVirtualRegister(rc);
881      vrm.grow();
882      CreatedNewVReg = true;
883    }
884    mop.setReg(NewVReg);
885
886    // Reuse NewVReg for other reads.
887    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
888      MI->getOperand(Ops[j]).setReg(NewVReg);
889
890    if (CreatedNewVReg) {
891      if (DefIsReMat) {
892        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
893        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
894          // Each valnum may have its own remat id.
895          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
896        } else {
897          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
898        }
899        if (!CanDelete || (HasUse && HasDef)) {
900          // If this is a two-addr instruction then its use operands are
901          // rematerializable but its def is not. It should be assigned a
902          // stack slot.
903          vrm.assignVirt2StackSlot(NewVReg, Slot);
904        }
905      } else {
906        vrm.assignVirt2StackSlot(NewVReg, Slot);
907      }
908    } else if (HasUse && HasDef &&
909               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
910      // If this interval hasn't been assigned a stack slot (because earlier
911      // def is a deleted remat def), do it now.
912      assert(Slot != VirtRegMap::NO_STACK_SLOT);
913      vrm.assignVirt2StackSlot(NewVReg, Slot);
914    }
915
916    // create a new register interval for this spill / remat.
917    LiveInterval &nI = getOrCreateInterval(NewVReg);
918    if (CreatedNewVReg) {
919      NewLIs.push_back(&nI);
920      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
921      if (TrySplit)
922        vrm.setIsSplitFromReg(NewVReg, li.reg);
923    }
924
925    if (HasUse) {
926      if (CreatedNewVReg) {
927        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
928                     nI.getNextValue(~0U, 0, VNInfoAllocator));
929        DOUT << " +" << LR;
930        nI.addRange(LR);
931      } else {
932        // Extend the split live interval to this def / use.
933        unsigned End = getUseIndex(index)+1;
934        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
935                     nI.getValNumInfo(nI.getNumValNums()-1));
936        DOUT << " +" << LR;
937        nI.addRange(LR);
938      }
939    }
940    if (HasDef) {
941      LiveRange LR(getDefIndex(index), getStoreIndex(index),
942                   nI.getNextValue(~0U, 0, VNInfoAllocator));
943      DOUT << " +" << LR;
944      nI.addRange(LR);
945    }
946
947    DOUT << "\t\t\t\tAdded new interval: ";
948    nI.print(DOUT, mri_);
949    DOUT << '\n';
950  }
951  return CanFold;
952}
953bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
954                                   const VNInfo *VNI,
955                                   MachineBasicBlock *MBB, unsigned Idx) const {
956  unsigned End = getMBBEndIdx(MBB);
957  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
958    unsigned KillIdx = VNI->kills[j];
959    if (KillIdx > Idx && KillIdx < End)
960      return true;
961  }
962  return false;
963}
964
965static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
966  const VNInfo *VNI = NULL;
967  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
968         e = li.vni_end(); i != e; ++i)
969    if ((*i)->def == DefIdx) {
970      VNI = *i;
971      break;
972    }
973  return VNI;
974}
975
976void LiveIntervals::
977rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
978                    LiveInterval::Ranges::const_iterator &I,
979                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
980                    unsigned Slot, int LdSlot,
981                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
982                    VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
983                    const TargetRegisterClass* rc,
984                    SmallVector<int, 4> &ReMatIds,
985                    const MachineLoopInfo *loopInfo,
986                    BitVector &SpillMBBs,
987                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
988                    BitVector &RestoreMBBs,
989                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
990                    std::map<unsigned,unsigned> &MBBVRegsMap,
991                    std::vector<LiveInterval*> &NewLIs) {
992  bool AllCanFold = true;
993  unsigned NewVReg = 0;
994  unsigned index = getBaseIndex(I->start);
995  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
996  for (; index != end; index += InstrSlots::NUM) {
997    // skip deleted instructions
998    while (index != end && !getInstructionFromIndex(index))
999      index += InstrSlots::NUM;
1000    if (index == end) break;
1001
1002    MachineInstr *MI = getInstructionFromIndex(index);
1003    MachineBasicBlock *MBB = MI->getParent();
1004    unsigned ThisVReg = 0;
1005    if (TrySplit) {
1006      std::map<unsigned,unsigned>::const_iterator NVI =
1007        MBBVRegsMap.find(MBB->getNumber());
1008      if (NVI != MBBVRegsMap.end()) {
1009        ThisVReg = NVI->second;
1010        // One common case:
1011        // x = use
1012        // ...
1013        // ...
1014        // def = ...
1015        //     = use
1016        // It's better to start a new interval to avoid artifically
1017        // extend the new interval.
1018        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
1019        bool MIHasUse = false;
1020        bool MIHasDef = false;
1021        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1022          MachineOperand& mop = MI->getOperand(i);
1023          if (!mop.isRegister() || mop.getReg() != li.reg)
1024            continue;
1025          if (mop.isUse())
1026            MIHasUse = true;
1027          else
1028            MIHasDef = true;
1029        }
1030        if (MIHasDef && !MIHasUse) {
1031          MBBVRegsMap.erase(MBB->getNumber());
1032          ThisVReg = 0;
1033        }
1034      }
1035    }
1036
1037    bool IsNew = ThisVReg == 0;
1038    if (IsNew) {
1039      // This ends the previous live interval. If all of its def / use
1040      // can be folded, give it a low spill weight.
1041      if (NewVReg && TrySplit && AllCanFold) {
1042        LiveInterval &nI = getOrCreateInterval(NewVReg);
1043        nI.weight /= 10.0F;
1044      }
1045      AllCanFold = true;
1046    }
1047    NewVReg = ThisVReg;
1048
1049    bool HasDef = false;
1050    bool HasUse = false;
1051    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1052                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1053                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1054                                CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1055                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1056    if (!HasDef && !HasUse)
1057      continue;
1058
1059    AllCanFold &= CanFold;
1060
1061    // Update weight of spill interval.
1062    LiveInterval &nI = getOrCreateInterval(NewVReg);
1063    if (!TrySplit) {
1064      // The spill weight is now infinity as it cannot be spilled again.
1065      nI.weight = HUGE_VALF;
1066      continue;
1067    }
1068
1069    // Keep track of the last def and first use in each MBB.
1070    unsigned MBBId = MBB->getNumber();
1071    if (HasDef) {
1072      if (MI != ReMatOrigDefMI || !CanDelete) {
1073        bool HasKill = false;
1074        if (!HasUse)
1075          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1076        else {
1077          // If this is a two-address code, then this index starts a new VNInfo.
1078          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1079          if (VNI)
1080            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1081        }
1082        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1083          SpillIdxes.find(MBBId);
1084        if (!HasKill) {
1085          if (SII == SpillIdxes.end()) {
1086            std::vector<SRInfo> S;
1087            S.push_back(SRInfo(index, NewVReg, true));
1088            SpillIdxes.insert(std::make_pair(MBBId, S));
1089          } else if (SII->second.back().vreg != NewVReg) {
1090            SII->second.push_back(SRInfo(index, NewVReg, true));
1091          } else if ((int)index > SII->second.back().index) {
1092            // If there is an earlier def and this is a two-address
1093            // instruction, then it's not possible to fold the store (which
1094            // would also fold the load).
1095            SRInfo &Info = SII->second.back();
1096            Info.index = index;
1097            Info.canFold = !HasUse;
1098          }
1099          SpillMBBs.set(MBBId);
1100        } else if (SII != SpillIdxes.end() &&
1101                   SII->second.back().vreg == NewVReg &&
1102                   (int)index > SII->second.back().index) {
1103          // There is an earlier def that's not killed (must be two-address).
1104          // The spill is no longer needed.
1105          SII->second.pop_back();
1106          if (SII->second.empty()) {
1107            SpillIdxes.erase(MBBId);
1108            SpillMBBs.reset(MBBId);
1109          }
1110        }
1111      }
1112    }
1113
1114    if (HasUse) {
1115      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1116        SpillIdxes.find(MBBId);
1117      if (SII != SpillIdxes.end() &&
1118          SII->second.back().vreg == NewVReg &&
1119          (int)index > SII->second.back().index)
1120        // Use(s) following the last def, it's not safe to fold the spill.
1121        SII->second.back().canFold = false;
1122      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1123        RestoreIdxes.find(MBBId);
1124      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1125        // If we are splitting live intervals, only fold if it's the first
1126        // use and there isn't another use later in the MBB.
1127        RII->second.back().canFold = false;
1128      else if (IsNew) {
1129        // Only need a reload if there isn't an earlier def / use.
1130        if (RII == RestoreIdxes.end()) {
1131          std::vector<SRInfo> Infos;
1132          Infos.push_back(SRInfo(index, NewVReg, true));
1133          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1134        } else {
1135          RII->second.push_back(SRInfo(index, NewVReg, true));
1136        }
1137        RestoreMBBs.set(MBBId);
1138      }
1139    }
1140
1141    // Update spill weight.
1142    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1143    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1144  }
1145
1146  if (NewVReg && TrySplit && AllCanFold) {
1147    // If all of its def / use can be folded, give it a low spill weight.
1148    LiveInterval &nI = getOrCreateInterval(NewVReg);
1149    nI.weight /= 10.0F;
1150  }
1151}
1152
1153bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1154                        BitVector &RestoreMBBs,
1155                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1156  if (!RestoreMBBs[Id])
1157    return false;
1158  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1159  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1160    if (Restores[i].index == index &&
1161        Restores[i].vreg == vr &&
1162        Restores[i].canFold)
1163      return true;
1164  return false;
1165}
1166
1167void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1168                        BitVector &RestoreMBBs,
1169                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1170  if (!RestoreMBBs[Id])
1171    return;
1172  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1173  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1174    if (Restores[i].index == index && Restores[i].vreg)
1175      Restores[i].index = -1;
1176}
1177
1178
1179std::vector<LiveInterval*> LiveIntervals::
1180addIntervalsForSpills(const LiveInterval &li,
1181                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1182  // Since this is called after the analysis is done we don't know if
1183  // LiveVariables is available
1184  lv_ = getAnalysisToUpdate<LiveVariables>();
1185
1186  assert(li.weight != HUGE_VALF &&
1187         "attempt to spill already spilled interval!");
1188
1189  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1190  li.print(DOUT, mri_);
1191  DOUT << '\n';
1192
1193  // Each bit specify whether it a spill is required in the MBB.
1194  BitVector SpillMBBs(mf_->getNumBlockIDs());
1195  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1196  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1197  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1198  std::map<unsigned,unsigned> MBBVRegsMap;
1199  std::vector<LiveInterval*> NewLIs;
1200  MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1201  const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1202
1203  unsigned NumValNums = li.getNumValNums();
1204  SmallVector<MachineInstr*, 4> ReMatDefs;
1205  ReMatDefs.resize(NumValNums, NULL);
1206  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1207  ReMatOrigDefs.resize(NumValNums, NULL);
1208  SmallVector<int, 4> ReMatIds;
1209  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1210  BitVector ReMatDelete(NumValNums);
1211  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1212
1213  // Spilling a split live interval. It cannot be split any further. Also,
1214  // it's also guaranteed to be a single val# / range interval.
1215  if (vrm.getPreSplitReg(li.reg)) {
1216    vrm.setIsSplitFromReg(li.reg, 0);
1217    // Unset the split kill marker on the last use.
1218    unsigned KillIdx = vrm.getKillPoint(li.reg);
1219    if (KillIdx) {
1220      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1221      assert(KillMI && "Last use disappeared?");
1222      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1223      assert(KillOp != -1 && "Last use disappeared?");
1224      KillMI->getOperand(KillOp).setIsKill(false);
1225    }
1226    vrm.removeKillPoint(li.reg);
1227    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1228    Slot = vrm.getStackSlot(li.reg);
1229    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1230    MachineInstr *ReMatDefMI = DefIsReMat ?
1231      vrm.getReMaterializedMI(li.reg) : NULL;
1232    int LdSlot = 0;
1233    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1234    bool isLoad = isLoadSS ||
1235      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1236    bool IsFirstRange = true;
1237    for (LiveInterval::Ranges::const_iterator
1238           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1239      // If this is a split live interval with multiple ranges, it means there
1240      // are two-address instructions that re-defined the value. Only the
1241      // first def can be rematerialized!
1242      if (IsFirstRange) {
1243        // Note ReMatOrigDefMI has already been deleted.
1244        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1245                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1246                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1247                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1248                             MBBVRegsMap, NewLIs);
1249      } else {
1250        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1251                             Slot, 0, false, false, false,
1252                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1253                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1254                             MBBVRegsMap, NewLIs);
1255      }
1256      IsFirstRange = false;
1257    }
1258    return NewLIs;
1259  }
1260
1261  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1262  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1263    TrySplit = false;
1264  if (TrySplit)
1265    ++numSplits;
1266  bool NeedStackSlot = false;
1267  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1268       i != e; ++i) {
1269    const VNInfo *VNI = *i;
1270    unsigned VN = VNI->id;
1271    unsigned DefIdx = VNI->def;
1272    if (DefIdx == ~1U)
1273      continue; // Dead val#.
1274    // Is the def for the val# rematerializable?
1275    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1276      ? 0 : getInstructionFromIndex(DefIdx);
1277    bool dummy;
1278    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1279      // Remember how to remat the def of this val#.
1280      ReMatOrigDefs[VN] = ReMatDefMI;
1281      // Original def may be modified so we have to make a copy here. vrm must
1282      // delete these!
1283      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1284
1285      bool CanDelete = true;
1286      if (VNI->hasPHIKill) {
1287        // A kill is a phi node, not all of its uses can be rematerialized.
1288        // It must not be deleted.
1289        CanDelete = false;
1290        // Need a stack slot if there is any live range where uses cannot be
1291        // rematerialized.
1292        NeedStackSlot = true;
1293      }
1294      if (CanDelete)
1295        ReMatDelete.set(VN);
1296    } else {
1297      // Need a stack slot if there is any live range where uses cannot be
1298      // rematerialized.
1299      NeedStackSlot = true;
1300    }
1301  }
1302
1303  // One stack slot per live interval.
1304  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1305    Slot = vrm.assignVirt2StackSlot(li.reg);
1306
1307  // Create new intervals and rewrite defs and uses.
1308  for (LiveInterval::Ranges::const_iterator
1309         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1310    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1311    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1312    bool DefIsReMat = ReMatDefMI != NULL;
1313    bool CanDelete = ReMatDelete[I->valno->id];
1314    int LdSlot = 0;
1315    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1316    bool isLoad = isLoadSS ||
1317      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1318    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1319                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1320                               CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1321                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1322                               MBBVRegsMap, NewLIs);
1323  }
1324
1325  // Insert spills / restores if we are splitting.
1326  if (!TrySplit)
1327    return NewLIs;
1328
1329  SmallPtrSet<LiveInterval*, 4> AddedKill;
1330  SmallVector<unsigned, 2> Ops;
1331  if (NeedStackSlot) {
1332    int Id = SpillMBBs.find_first();
1333    while (Id != -1) {
1334      std::vector<SRInfo> &spills = SpillIdxes[Id];
1335      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1336        int index = spills[i].index;
1337        unsigned VReg = spills[i].vreg;
1338        LiveInterval &nI = getOrCreateInterval(VReg);
1339        bool isReMat = vrm.isReMaterialized(VReg);
1340        MachineInstr *MI = getInstructionFromIndex(index);
1341        bool CanFold = false;
1342        bool FoundUse = false;
1343        Ops.clear();
1344        if (spills[i].canFold) {
1345          CanFold = true;
1346          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1347            MachineOperand &MO = MI->getOperand(j);
1348            if (!MO.isRegister() || MO.getReg() != VReg)
1349              continue;
1350
1351            Ops.push_back(j);
1352            if (MO.isDef())
1353              continue;
1354            if (isReMat ||
1355                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1356                                                RestoreMBBs, RestoreIdxes))) {
1357              // MI has two-address uses of the same register. If the use
1358              // isn't the first and only use in the BB, then we can't fold
1359              // it. FIXME: Move this to rewriteInstructionsForSpills.
1360              CanFold = false;
1361              break;
1362            }
1363            FoundUse = true;
1364          }
1365        }
1366        // Fold the store into the def if possible.
1367        bool Folded = false;
1368        if (CanFold && !Ops.empty()) {
1369          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1370            Folded = true;
1371            if (FoundUse > 0) {
1372              // Also folded uses, do not issue a load.
1373              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1374              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1375            }
1376            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1377          }
1378        }
1379
1380        // Else tell the spiller to issue a spill.
1381        if (!Folded) {
1382          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1383          bool isKill = LR->end == getStoreIndex(index);
1384          vrm.addSpillPoint(VReg, isKill, MI);
1385          if (isKill)
1386            AddedKill.insert(&nI);
1387        }
1388      }
1389      Id = SpillMBBs.find_next(Id);
1390    }
1391  }
1392
1393  int Id = RestoreMBBs.find_first();
1394  while (Id != -1) {
1395    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1396    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1397      int index = restores[i].index;
1398      if (index == -1)
1399        continue;
1400      unsigned VReg = restores[i].vreg;
1401      LiveInterval &nI = getOrCreateInterval(VReg);
1402      MachineInstr *MI = getInstructionFromIndex(index);
1403      bool CanFold = false;
1404      Ops.clear();
1405      if (restores[i].canFold) {
1406        CanFold = true;
1407        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1408          MachineOperand &MO = MI->getOperand(j);
1409          if (!MO.isRegister() || MO.getReg() != VReg)
1410            continue;
1411
1412          if (MO.isDef()) {
1413            // If this restore were to be folded, it would have been folded
1414            // already.
1415            CanFold = false;
1416            break;
1417          }
1418          Ops.push_back(j);
1419        }
1420      }
1421
1422      // Fold the load into the use if possible.
1423      bool Folded = false;
1424      if (CanFold && !Ops.empty()) {
1425        if (!vrm.isReMaterialized(VReg))
1426          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1427        else {
1428          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1429          int LdSlot = 0;
1430          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1431          // If the rematerializable def is a load, also try to fold it.
1432          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1433            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1434                                          Ops, isLoadSS, LdSlot, VReg);
1435        }
1436      }
1437      // If folding is not possible / failed, then tell the spiller to issue a
1438      // load / rematerialization for us.
1439      if (Folded)
1440        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1441      else
1442        vrm.addRestorePoint(VReg, MI);
1443    }
1444    Id = RestoreMBBs.find_next(Id);
1445  }
1446
1447  // Finalize intervals: add kills, finalize spill weights, and filter out
1448  // dead intervals.
1449  std::vector<LiveInterval*> RetNewLIs;
1450  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1451    LiveInterval *LI = NewLIs[i];
1452    if (!LI->empty()) {
1453      LI->weight /= LI->getSize();
1454      if (!AddedKill.count(LI)) {
1455        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1456        unsigned LastUseIdx = getBaseIndex(LR->end);
1457        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1458        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1459        assert(UseIdx != -1);
1460        if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) ==
1461            -1) {
1462          LastUse->getOperand(UseIdx).setIsKill();
1463          vrm.addKillPoint(LI->reg, LastUseIdx);
1464        }
1465      }
1466      RetNewLIs.push_back(LI);
1467    }
1468  }
1469
1470  return RetNewLIs;
1471}
1472