LiveIntervalAnalysis.cpp revision e984e504b5f3090ab270cbdab02638ac3a2afb21
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/TargetRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82namespace llvm {
83  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
84    return V < IM.first;
85  }
86
87  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
88    return IM.first < V;
89  }
90
91  struct Idx2MBBCompare {
92    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
93      return LHS.first < RHS.first;
94    }
95  };
96}
97
98/// runOnMachineFunction - Register allocate the whole function
99///
100bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
101  mf_ = &fn;
102  tm_ = &fn.getTarget();
103  tri_ = tm_->getRegisterInfo();
104  tii_ = tm_->getInstrInfo();
105  lv_ = &getAnalysis<LiveVariables>();
106  allocatableRegs_ = tri_->getAllocatableSet(fn);
107
108  // Number MachineInstrs and MachineBasicBlocks.
109  // Initialize MBB indexes to a sentinal.
110  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111
112  unsigned MIIndex = 0;
113  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114       MBB != E; ++MBB) {
115    unsigned StartIdx = MIIndex;
116
117    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
118         I != E; ++I) {
119      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
120      assert(inserted && "multiple MachineInstr -> index mappings");
121      i2miMap_.push_back(I);
122      MIIndex += InstrSlots::NUM;
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  computeIntervals();
132
133  numIntervals += getNumIntervals();
134
135  DOUT << "********** INTERVALS **********\n";
136  for (iterator I = begin(), E = end(); I != E; ++I) {
137    I->second.print(DOUT, tri_);
138    DOUT << "\n";
139  }
140
141  numIntervalsAfter += getNumIntervals();
142  DEBUG(dump());
143  return true;
144}
145
146/// print - Implement the dump method.
147void LiveIntervals::print(std::ostream &O, const Module* ) const {
148  O << "********** INTERVALS **********\n";
149  for (const_iterator I = begin(), E = end(); I != E; ++I) {
150    I->second.print(DOUT, tri_);
151    DOUT << "\n";
152  }
153
154  O << "********** MACHINEINSTRS **********\n";
155  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
156       mbbi != mbbe; ++mbbi) {
157    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
158    for (MachineBasicBlock::iterator mii = mbbi->begin(),
159           mie = mbbi->end(); mii != mie; ++mii) {
160      O << getInstructionIndex(mii) << '\t' << *mii;
161    }
162  }
163}
164
165/// conflictsWithPhysRegDef - Returns true if the specified register
166/// is defined during the duration of the specified interval.
167bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
168                                            VirtRegMap &vrm, unsigned reg) {
169  for (LiveInterval::Ranges::const_iterator
170         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
171    for (unsigned index = getBaseIndex(I->start),
172           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
173         index += InstrSlots::NUM) {
174      // skip deleted instructions
175      while (index != end && !getInstructionFromIndex(index))
176        index += InstrSlots::NUM;
177      if (index == end) break;
178
179      MachineInstr *MI = getInstructionFromIndex(index);
180      unsigned SrcReg, DstReg;
181      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
182        if (SrcReg == li.reg || DstReg == li.reg)
183          continue;
184      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
185        MachineOperand& mop = MI->getOperand(i);
186        if (!mop.isRegister())
187          continue;
188        unsigned PhysReg = mop.getReg();
189        if (PhysReg == 0 || PhysReg == li.reg)
190          continue;
191        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
192          if (!vrm.hasPhys(PhysReg))
193            continue;
194          PhysReg = vrm.getPhys(PhysReg);
195        }
196        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
197          return true;
198      }
199    }
200  }
201
202  return false;
203}
204
205void LiveIntervals::printRegName(unsigned reg) const {
206  if (TargetRegisterInfo::isPhysicalRegister(reg))
207    cerr << tri_->getName(reg);
208  else
209    cerr << "%reg" << reg;
210}
211
212void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
213                                             MachineBasicBlock::iterator mi,
214                                             unsigned MIIdx,
215                                             LiveInterval &interval) {
216  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
217  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
218
219  // Virtual registers may be defined multiple times (due to phi
220  // elimination and 2-addr elimination).  Much of what we do only has to be
221  // done once for the vreg.  We use an empty interval to detect the first
222  // time we see a vreg.
223  if (interval.empty()) {
224    // Get the Idx of the defining instructions.
225    unsigned defIndex = getDefIndex(MIIdx);
226    VNInfo *ValNo;
227    unsigned SrcReg, DstReg;
228    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
229      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
230    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
231      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
232                                    VNInfoAllocator);
233    else
234      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
235
236    assert(ValNo->id == 0 && "First value in interval is not 0?");
237
238    // Loop over all of the blocks that the vreg is defined in.  There are
239    // two cases we have to handle here.  The most common case is a vreg
240    // whose lifetime is contained within a basic block.  In this case there
241    // will be a single kill, in MBB, which comes after the definition.
242    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
243      // FIXME: what about dead vars?
244      unsigned killIdx;
245      if (vi.Kills[0] != mi)
246        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
247      else
248        killIdx = defIndex+1;
249
250      // If the kill happens after the definition, we have an intra-block
251      // live range.
252      if (killIdx > defIndex) {
253        assert(vi.AliveBlocks.none() &&
254               "Shouldn't be alive across any blocks!");
255        LiveRange LR(defIndex, killIdx, ValNo);
256        interval.addRange(LR);
257        DOUT << " +" << LR << "\n";
258        interval.addKill(ValNo, killIdx);
259        return;
260      }
261    }
262
263    // The other case we handle is when a virtual register lives to the end
264    // of the defining block, potentially live across some blocks, then is
265    // live into some number of blocks, but gets killed.  Start by adding a
266    // range that goes from this definition to the end of the defining block.
267    LiveRange NewLR(defIndex,
268                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
269                    ValNo);
270    DOUT << " +" << NewLR;
271    interval.addRange(NewLR);
272
273    // Iterate over all of the blocks that the variable is completely
274    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
275    // live interval.
276    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
277      if (vi.AliveBlocks[i]) {
278        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
279        if (!MBB->empty()) {
280          LiveRange LR(getMBBStartIdx(i),
281                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
282                       ValNo);
283          interval.addRange(LR);
284          DOUT << " +" << LR;
285        }
286      }
287    }
288
289    // Finally, this virtual register is live from the start of any killing
290    // block to the 'use' slot of the killing instruction.
291    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
292      MachineInstr *Kill = vi.Kills[i];
293      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
294      LiveRange LR(getMBBStartIdx(Kill->getParent()),
295                   killIdx, ValNo);
296      interval.addRange(LR);
297      interval.addKill(ValNo, killIdx);
298      DOUT << " +" << LR;
299    }
300
301  } else {
302    // If this is the second time we see a virtual register definition, it
303    // must be due to phi elimination or two addr elimination.  If this is
304    // the result of two address elimination, then the vreg is one of the
305    // def-and-use register operand.
306    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
307      // If this is a two-address definition, then we have already processed
308      // the live range.  The only problem is that we didn't realize there
309      // are actually two values in the live interval.  Because of this we
310      // need to take the LiveRegion that defines this register and split it
311      // into two values.
312      assert(interval.containsOneValue());
313      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
314      unsigned RedefIndex = getDefIndex(MIIdx);
315
316      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
317      VNInfo *OldValNo = OldLR->valno;
318
319      // Delete the initial value, which should be short and continuous,
320      // because the 2-addr copy must be in the same MBB as the redef.
321      interval.removeRange(DefIndex, RedefIndex);
322
323      // Two-address vregs should always only be redefined once.  This means
324      // that at this point, there should be exactly one value number in it.
325      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
326
327      // The new value number (#1) is defined by the instruction we claimed
328      // defined value #0.
329      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
330      ValNo->def = OldValNo->def;
331      ValNo->reg = OldValNo->reg;
332
333      // Value#0 is now defined by the 2-addr instruction.
334      OldValNo->def = RedefIndex;
335      OldValNo->reg = 0;
336
337      // Add the new live interval which replaces the range for the input copy.
338      LiveRange LR(DefIndex, RedefIndex, ValNo);
339      DOUT << " replace range with " << LR;
340      interval.addRange(LR);
341      interval.addKill(ValNo, RedefIndex);
342
343      // If this redefinition is dead, we need to add a dummy unit live
344      // range covering the def slot.
345      if (lv_->RegisterDefIsDead(mi, interval.reg))
346        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
347
348      DOUT << " RESULT: ";
349      interval.print(DOUT, tri_);
350
351    } else {
352      // Otherwise, this must be because of phi elimination.  If this is the
353      // first redefinition of the vreg that we have seen, go back and change
354      // the live range in the PHI block to be a different value number.
355      if (interval.containsOneValue()) {
356        assert(vi.Kills.size() == 1 &&
357               "PHI elimination vreg should have one kill, the PHI itself!");
358
359        // Remove the old range that we now know has an incorrect number.
360        VNInfo *VNI = interval.getValNumInfo(0);
361        MachineInstr *Killer = vi.Kills[0];
362        unsigned Start = getMBBStartIdx(Killer->getParent());
363        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
364        DOUT << " Removing [" << Start << "," << End << "] from: ";
365        interval.print(DOUT, tri_); DOUT << "\n";
366        interval.removeRange(Start, End);
367        interval.addKill(VNI, Start);
368        VNI->hasPHIKill = true;
369        DOUT << " RESULT: "; interval.print(DOUT, tri_);
370
371        // Replace the interval with one of a NEW value number.  Note that this
372        // value number isn't actually defined by an instruction, weird huh? :)
373        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
374        DOUT << " replace range with " << LR;
375        interval.addRange(LR);
376        interval.addKill(LR.valno, End);
377        DOUT << " RESULT: "; interval.print(DOUT, tri_);
378      }
379
380      // In the case of PHI elimination, each variable definition is only
381      // live until the end of the block.  We've already taken care of the
382      // rest of the live range.
383      unsigned defIndex = getDefIndex(MIIdx);
384
385      VNInfo *ValNo;
386      unsigned SrcReg, DstReg;
387      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
388        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
389      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
390        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
391                                      VNInfoAllocator);
392      else
393        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
394
395      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
396      LiveRange LR(defIndex, killIndex, ValNo);
397      interval.addRange(LR);
398      interval.addKill(ValNo, killIndex);
399      ValNo->hasPHIKill = true;
400      DOUT << " +" << LR;
401    }
402  }
403
404  DOUT << '\n';
405}
406
407void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
408                                              MachineBasicBlock::iterator mi,
409                                              unsigned MIIdx,
410                                              LiveInterval &interval,
411                                              unsigned SrcReg) {
412  // A physical register cannot be live across basic block, so its
413  // lifetime must end somewhere in its defining basic block.
414  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
415
416  unsigned baseIndex = MIIdx;
417  unsigned start = getDefIndex(baseIndex);
418  unsigned end = start;
419
420  // If it is not used after definition, it is considered dead at
421  // the instruction defining it. Hence its interval is:
422  // [defSlot(def), defSlot(def)+1)
423  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
424    DOUT << " dead";
425    end = getDefIndex(start) + 1;
426    goto exit;
427  }
428
429  // If it is not dead on definition, it must be killed by a
430  // subsequent instruction. Hence its interval is:
431  // [defSlot(def), useSlot(kill)+1)
432  while (++mi != MBB->end()) {
433    baseIndex += InstrSlots::NUM;
434    if (lv_->KillsRegister(mi, interval.reg)) {
435      DOUT << " killed";
436      end = getUseIndex(baseIndex) + 1;
437      goto exit;
438    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
439      // Another instruction redefines the register before it is ever read.
440      // Then the register is essentially dead at the instruction that defines
441      // it. Hence its interval is:
442      // [defSlot(def), defSlot(def)+1)
443      DOUT << " dead";
444      end = getDefIndex(start) + 1;
445      goto exit;
446    }
447  }
448
449  // The only case we should have a dead physreg here without a killing or
450  // instruction where we know it's dead is if it is live-in to the function
451  // and never used.
452  assert(!SrcReg && "physreg was not killed in defining block!");
453  end = getDefIndex(start) + 1;  // It's dead.
454
455exit:
456  assert(start < end && "did not find end of interval?");
457
458  // Already exists? Extend old live interval.
459  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
460  VNInfo *ValNo = (OldLR != interval.end())
461    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
462  LiveRange LR(start, end, ValNo);
463  interval.addRange(LR);
464  interval.addKill(LR.valno, end);
465  DOUT << " +" << LR << '\n';
466}
467
468void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
469                                      MachineBasicBlock::iterator MI,
470                                      unsigned MIIdx,
471                                      unsigned reg) {
472  if (TargetRegisterInfo::isVirtualRegister(reg))
473    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
474  else if (allocatableRegs_[reg]) {
475    unsigned SrcReg, DstReg;
476    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
477      SrcReg = MI->getOperand(1).getReg();
478    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
479      SrcReg = 0;
480    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
481    // Def of a register also defines its sub-registers.
482    for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
483      // Avoid processing some defs more than once.
484      if (!MI->findRegisterDefOperand(*AS))
485        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
486  }
487}
488
489void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
490                                         unsigned MIIdx,
491                                         LiveInterval &interval, bool isAlias) {
492  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
493
494  // Look for kills, if it reaches a def before it's killed, then it shouldn't
495  // be considered a livein.
496  MachineBasicBlock::iterator mi = MBB->begin();
497  unsigned baseIndex = MIIdx;
498  unsigned start = baseIndex;
499  unsigned end = start;
500  while (mi != MBB->end()) {
501    if (lv_->KillsRegister(mi, interval.reg)) {
502      DOUT << " killed";
503      end = getUseIndex(baseIndex) + 1;
504      goto exit;
505    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
506      // Another instruction redefines the register before it is ever read.
507      // Then the register is essentially dead at the instruction that defines
508      // it. Hence its interval is:
509      // [defSlot(def), defSlot(def)+1)
510      DOUT << " dead";
511      end = getDefIndex(start) + 1;
512      goto exit;
513    }
514
515    baseIndex += InstrSlots::NUM;
516    ++mi;
517  }
518
519exit:
520  // Live-in register might not be used at all.
521  if (end == MIIdx) {
522    if (isAlias) {
523      DOUT << " dead";
524      end = getDefIndex(MIIdx) + 1;
525    } else {
526      DOUT << " live through";
527      end = baseIndex;
528    }
529  }
530
531  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
532  interval.addRange(LR);
533  interval.addKill(LR.valno, end);
534  DOUT << " +" << LR << '\n';
535}
536
537/// computeIntervals - computes the live intervals for virtual
538/// registers. for some ordering of the machine instructions [1,N] a
539/// live interval is an interval [i, j) where 1 <= i <= j < N for
540/// which a variable is live
541void LiveIntervals::computeIntervals() {
542  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
543       << "********** Function: "
544       << ((Value*)mf_->getFunction())->getName() << '\n';
545  // Track the index of the current machine instr.
546  unsigned MIIndex = 0;
547  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
548       MBBI != E; ++MBBI) {
549    MachineBasicBlock *MBB = MBBI;
550    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
551
552    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
553
554    // Create intervals for live-ins to this BB first.
555    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
556           LE = MBB->livein_end(); LI != LE; ++LI) {
557      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
558      // Multiple live-ins can alias the same register.
559      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
560        if (!hasInterval(*AS))
561          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
562                               true);
563    }
564
565    for (; MI != miEnd; ++MI) {
566      DOUT << MIIndex << "\t" << *MI;
567
568      // Handle defs.
569      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
570        MachineOperand &MO = MI->getOperand(i);
571        // handle register defs - build intervals
572        if (MO.isRegister() && MO.getReg() && MO.isDef())
573          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
574      }
575
576      MIIndex += InstrSlots::NUM;
577    }
578  }
579}
580
581bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
582                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
583  std::vector<IdxMBBPair>::const_iterator I =
584    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
585
586  bool ResVal = false;
587  while (I != Idx2MBBMap.end()) {
588    if (LR.end <= I->first)
589      break;
590    MBBs.push_back(I->second);
591    ResVal = true;
592    ++I;
593  }
594  return ResVal;
595}
596
597
598LiveInterval LiveIntervals::createInterval(unsigned reg) {
599  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
600                       HUGE_VALF : 0.0F;
601  return LiveInterval(reg, Weight);
602}
603
604
605//===----------------------------------------------------------------------===//
606// Register allocator hooks.
607//
608
609/// isReMaterializable - Returns true if the definition MI of the specified
610/// val# of the specified interval is re-materializable.
611bool LiveIntervals::isReMaterializable(const LiveInterval &li,
612                                       const VNInfo *ValNo, MachineInstr *MI,
613                                       bool &isLoad) {
614  if (DisableReMat)
615    return false;
616
617  isLoad = false;
618  const TargetInstrDesc &TID = MI->getDesc();
619  if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) {
620    isLoad = TID.isSimpleLoad();
621    return true;
622  }
623
624  int FrameIdx = 0;
625  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
626      !mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
627    return false;
628
629  // This is a load from fixed stack slot. It can be rematerialized unless it's
630  // re-defined by a two-address instruction.
631  isLoad = true;
632  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
633       i != e; ++i) {
634    const VNInfo *VNI = *i;
635    if (VNI == ValNo)
636      continue;
637    unsigned DefIdx = VNI->def;
638    if (DefIdx == ~1U)
639      continue; // Dead val#.
640    MachineInstr *DefMI = (DefIdx == ~0u)
641      ? NULL : getInstructionFromIndex(DefIdx);
642    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
643      isLoad = false;
644      return false;
645    }
646  }
647  return true;
648}
649
650/// isReMaterializable - Returns true if every definition of MI of every
651/// val# of the specified interval is re-materializable.
652bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
653  isLoad = false;
654  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
655       i != e; ++i) {
656    const VNInfo *VNI = *i;
657    unsigned DefIdx = VNI->def;
658    if (DefIdx == ~1U)
659      continue; // Dead val#.
660    // Is the def for the val# rematerializable?
661    if (DefIdx == ~0u)
662      return false;
663    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
664    bool DefIsLoad = false;
665    if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
666      return false;
667    isLoad |= DefIsLoad;
668  }
669  return true;
670}
671
672/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
673/// slot / to reg or any rematerialized load into ith operand of specified
674/// MI. If it is successul, MI is updated with the newly created MI and
675/// returns true.
676bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
677                                         VirtRegMap &vrm, MachineInstr *DefMI,
678                                         unsigned InstrIdx,
679                                         SmallVector<unsigned, 2> &Ops,
680                                         bool isSS, int Slot, unsigned Reg) {
681  unsigned MRInfo = 0;
682  const TargetInstrDesc &TID = MI->getDesc();
683  // If it is an implicit def instruction, just delete it.
684  if (TID.isImplicitDef()) {
685    RemoveMachineInstrFromMaps(MI);
686    vrm.RemoveMachineInstrFromMaps(MI);
687    MI->eraseFromParent();
688    ++numFolds;
689    return true;
690  }
691
692  SmallVector<unsigned, 2> FoldOps;
693  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
694    unsigned OpIdx = Ops[i];
695    // FIXME: fold subreg use.
696    if (MI->getOperand(OpIdx).getSubReg())
697      return false;
698    if (MI->getOperand(OpIdx).isDef())
699      MRInfo |= (unsigned)VirtRegMap::isMod;
700    else {
701      // Filter out two-address use operand(s).
702      if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
703        MRInfo = VirtRegMap::isModRef;
704        continue;
705      }
706      MRInfo |= (unsigned)VirtRegMap::isRef;
707    }
708    FoldOps.push_back(OpIdx);
709  }
710
711  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
712                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
713  if (fmi) {
714    // Attempt to fold the memory reference into the instruction. If
715    // we can do this, we don't need to insert spill code.
716    if (lv_)
717      lv_->instructionChanged(MI, fmi);
718    else
719      fmi->copyKillDeadInfo(MI, tri_);
720    MachineBasicBlock &MBB = *MI->getParent();
721    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
722      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
723    vrm.transferSpillPts(MI, fmi);
724    vrm.transferRestorePts(MI, fmi);
725    mi2iMap_.erase(MI);
726    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
727    mi2iMap_[fmi] = InstrIdx;
728    MI = MBB.insert(MBB.erase(MI), fmi);
729    ++numFolds;
730    return true;
731  }
732  return false;
733}
734
735/// canFoldMemoryOperand - Returns true if the specified load / store
736/// folding is possible.
737bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
738                                         SmallVector<unsigned, 2> &Ops) const {
739  SmallVector<unsigned, 2> FoldOps;
740  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
741    unsigned OpIdx = Ops[i];
742    // FIXME: fold subreg use.
743    if (MI->getOperand(OpIdx).getSubReg())
744      return false;
745    FoldOps.push_back(OpIdx);
746  }
747
748  return tii_->canFoldMemoryOperand(MI, FoldOps);
749}
750
751bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
752  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
753  for (LiveInterval::Ranges::const_iterator
754         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
755    std::vector<IdxMBBPair>::const_iterator II =
756      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
757    if (II == Idx2MBBMap.end())
758      continue;
759    if (I->end > II->first)  // crossing a MBB.
760      return false;
761    MBBs.insert(II->second);
762    if (MBBs.size() > 1)
763      return false;
764  }
765  return true;
766}
767
768/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
769/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
770bool LiveIntervals::
771rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
772                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
773                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
774                 unsigned Slot, int LdSlot,
775                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
776                 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
777                 const TargetRegisterClass* rc,
778                 SmallVector<int, 4> &ReMatIds,
779                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
780                 const MachineLoopInfo *loopInfo,
781                 std::map<unsigned,unsigned> &MBBVRegsMap,
782                 std::vector<LiveInterval*> &NewLIs) {
783  bool CanFold = false;
784 RestartInstruction:
785  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
786    MachineOperand& mop = MI->getOperand(i);
787    if (!mop.isRegister())
788      continue;
789    unsigned Reg = mop.getReg();
790    unsigned RegI = Reg;
791    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
792      continue;
793    if (Reg != li.reg)
794      continue;
795
796    bool TryFold = !DefIsReMat;
797    bool FoldSS = true; // Default behavior unless it's a remat.
798    int FoldSlot = Slot;
799    if (DefIsReMat) {
800      // If this is the rematerializable definition MI itself and
801      // all of its uses are rematerialized, simply delete it.
802      if (MI == ReMatOrigDefMI && CanDelete) {
803        DOUT << "\t\t\t\tErasing re-materlizable def: ";
804        DOUT << MI << '\n';
805        RemoveMachineInstrFromMaps(MI);
806        vrm.RemoveMachineInstrFromMaps(MI);
807        MI->eraseFromParent();
808        break;
809      }
810
811      // If def for this use can't be rematerialized, then try folding.
812      // If def is rematerializable and it's a load, also try folding.
813      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
814      if (isLoad) {
815        // Try fold loads (from stack slot, constant pool, etc.) into uses.
816        FoldSS = isLoadSS;
817        FoldSlot = LdSlot;
818      }
819    }
820
821    // Scan all of the operands of this instruction rewriting operands
822    // to use NewVReg instead of li.reg as appropriate.  We do this for
823    // two reasons:
824    //
825    //   1. If the instr reads the same spilled vreg multiple times, we
826    //      want to reuse the NewVReg.
827    //   2. If the instr is a two-addr instruction, we are required to
828    //      keep the src/dst regs pinned.
829    //
830    // Keep track of whether we replace a use and/or def so that we can
831    // create the spill interval with the appropriate range.
832
833    HasUse = mop.isUse();
834    HasDef = mop.isDef();
835    SmallVector<unsigned, 2> Ops;
836    Ops.push_back(i);
837    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
838      const MachineOperand &MOj = MI->getOperand(j);
839      if (!MOj.isRegister())
840        continue;
841      unsigned RegJ = MOj.getReg();
842      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
843        continue;
844      if (RegJ == RegI) {
845        Ops.push_back(j);
846        HasUse |= MOj.isUse();
847        HasDef |= MOj.isDef();
848      }
849    }
850
851    if (TryFold) {
852      // Do not fold load / store here if we are splitting. We'll find an
853      // optimal point to insert a load / store later.
854      if (!TrySplit) {
855        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
856                                 Ops, FoldSS, FoldSlot, Reg)) {
857          // Folding the load/store can completely change the instruction in
858          // unpredictable ways, rescan it from the beginning.
859          HasUse = false;
860          HasDef = false;
861          CanFold = false;
862          goto RestartInstruction;
863        }
864      } else {
865        CanFold = canFoldMemoryOperand(MI, Ops);
866      }
867    } else
868      CanFold = false;
869
870    // Create a new virtual register for the spill interval.
871    bool CreatedNewVReg = false;
872    if (NewVReg == 0) {
873      NewVReg = RegInfo.createVirtualRegister(rc);
874      vrm.grow();
875      CreatedNewVReg = true;
876    }
877    mop.setReg(NewVReg);
878
879    // Reuse NewVReg for other reads.
880    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
881      MI->getOperand(Ops[j]).setReg(NewVReg);
882
883    if (CreatedNewVReg) {
884      if (DefIsReMat) {
885        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
886        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
887          // Each valnum may have its own remat id.
888          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
889        } else {
890          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
891        }
892        if (!CanDelete || (HasUse && HasDef)) {
893          // If this is a two-addr instruction then its use operands are
894          // rematerializable but its def is not. It should be assigned a
895          // stack slot.
896          vrm.assignVirt2StackSlot(NewVReg, Slot);
897        }
898      } else {
899        vrm.assignVirt2StackSlot(NewVReg, Slot);
900      }
901    } else if (HasUse && HasDef &&
902               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
903      // If this interval hasn't been assigned a stack slot (because earlier
904      // def is a deleted remat def), do it now.
905      assert(Slot != VirtRegMap::NO_STACK_SLOT);
906      vrm.assignVirt2StackSlot(NewVReg, Slot);
907    }
908
909    // create a new register interval for this spill / remat.
910    LiveInterval &nI = getOrCreateInterval(NewVReg);
911    if (CreatedNewVReg) {
912      NewLIs.push_back(&nI);
913      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
914      if (TrySplit)
915        vrm.setIsSplitFromReg(NewVReg, li.reg);
916    }
917
918    if (HasUse) {
919      if (CreatedNewVReg) {
920        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
921                     nI.getNextValue(~0U, 0, VNInfoAllocator));
922        DOUT << " +" << LR;
923        nI.addRange(LR);
924      } else {
925        // Extend the split live interval to this def / use.
926        unsigned End = getUseIndex(index)+1;
927        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
928                     nI.getValNumInfo(nI.getNumValNums()-1));
929        DOUT << " +" << LR;
930        nI.addRange(LR);
931      }
932    }
933    if (HasDef) {
934      LiveRange LR(getDefIndex(index), getStoreIndex(index),
935                   nI.getNextValue(~0U, 0, VNInfoAllocator));
936      DOUT << " +" << LR;
937      nI.addRange(LR);
938    }
939
940    DOUT << "\t\t\t\tAdded new interval: ";
941    nI.print(DOUT, tri_);
942    DOUT << '\n';
943  }
944  return CanFold;
945}
946bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
947                                   const VNInfo *VNI,
948                                   MachineBasicBlock *MBB, unsigned Idx) const {
949  unsigned End = getMBBEndIdx(MBB);
950  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
951    unsigned KillIdx = VNI->kills[j];
952    if (KillIdx > Idx && KillIdx < End)
953      return true;
954  }
955  return false;
956}
957
958static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
959  const VNInfo *VNI = NULL;
960  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
961         e = li.vni_end(); i != e; ++i)
962    if ((*i)->def == DefIdx) {
963      VNI = *i;
964      break;
965    }
966  return VNI;
967}
968
969void LiveIntervals::
970rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
971                    LiveInterval::Ranges::const_iterator &I,
972                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
973                    unsigned Slot, int LdSlot,
974                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
975                    VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
976                    const TargetRegisterClass* rc,
977                    SmallVector<int, 4> &ReMatIds,
978                    const MachineLoopInfo *loopInfo,
979                    BitVector &SpillMBBs,
980                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
981                    BitVector &RestoreMBBs,
982                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
983                    std::map<unsigned,unsigned> &MBBVRegsMap,
984                    std::vector<LiveInterval*> &NewLIs) {
985  bool AllCanFold = true;
986  unsigned NewVReg = 0;
987  unsigned index = getBaseIndex(I->start);
988  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
989  for (; index != end; index += InstrSlots::NUM) {
990    // skip deleted instructions
991    while (index != end && !getInstructionFromIndex(index))
992      index += InstrSlots::NUM;
993    if (index == end) break;
994
995    MachineInstr *MI = getInstructionFromIndex(index);
996    MachineBasicBlock *MBB = MI->getParent();
997    unsigned ThisVReg = 0;
998    if (TrySplit) {
999      std::map<unsigned,unsigned>::const_iterator NVI =
1000        MBBVRegsMap.find(MBB->getNumber());
1001      if (NVI != MBBVRegsMap.end()) {
1002        ThisVReg = NVI->second;
1003        // One common case:
1004        // x = use
1005        // ...
1006        // ...
1007        // def = ...
1008        //     = use
1009        // It's better to start a new interval to avoid artifically
1010        // extend the new interval.
1011        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
1012        bool MIHasUse = false;
1013        bool MIHasDef = false;
1014        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1015          MachineOperand& mop = MI->getOperand(i);
1016          if (!mop.isRegister() || mop.getReg() != li.reg)
1017            continue;
1018          if (mop.isUse())
1019            MIHasUse = true;
1020          else
1021            MIHasDef = true;
1022        }
1023        if (MIHasDef && !MIHasUse) {
1024          MBBVRegsMap.erase(MBB->getNumber());
1025          ThisVReg = 0;
1026        }
1027      }
1028    }
1029
1030    bool IsNew = ThisVReg == 0;
1031    if (IsNew) {
1032      // This ends the previous live interval. If all of its def / use
1033      // can be folded, give it a low spill weight.
1034      if (NewVReg && TrySplit && AllCanFold) {
1035        LiveInterval &nI = getOrCreateInterval(NewVReg);
1036        nI.weight /= 10.0F;
1037      }
1038      AllCanFold = true;
1039    }
1040    NewVReg = ThisVReg;
1041
1042    bool HasDef = false;
1043    bool HasUse = false;
1044    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1045                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1046                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1047                                CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1048                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1049    if (!HasDef && !HasUse)
1050      continue;
1051
1052    AllCanFold &= CanFold;
1053
1054    // Update weight of spill interval.
1055    LiveInterval &nI = getOrCreateInterval(NewVReg);
1056    if (!TrySplit) {
1057      // The spill weight is now infinity as it cannot be spilled again.
1058      nI.weight = HUGE_VALF;
1059      continue;
1060    }
1061
1062    // Keep track of the last def and first use in each MBB.
1063    unsigned MBBId = MBB->getNumber();
1064    if (HasDef) {
1065      if (MI != ReMatOrigDefMI || !CanDelete) {
1066        bool HasKill = false;
1067        if (!HasUse)
1068          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1069        else {
1070          // If this is a two-address code, then this index starts a new VNInfo.
1071          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1072          if (VNI)
1073            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1074        }
1075        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1076          SpillIdxes.find(MBBId);
1077        if (!HasKill) {
1078          if (SII == SpillIdxes.end()) {
1079            std::vector<SRInfo> S;
1080            S.push_back(SRInfo(index, NewVReg, true));
1081            SpillIdxes.insert(std::make_pair(MBBId, S));
1082          } else if (SII->second.back().vreg != NewVReg) {
1083            SII->second.push_back(SRInfo(index, NewVReg, true));
1084          } else if ((int)index > SII->second.back().index) {
1085            // If there is an earlier def and this is a two-address
1086            // instruction, then it's not possible to fold the store (which
1087            // would also fold the load).
1088            SRInfo &Info = SII->second.back();
1089            Info.index = index;
1090            Info.canFold = !HasUse;
1091          }
1092          SpillMBBs.set(MBBId);
1093        } else if (SII != SpillIdxes.end() &&
1094                   SII->second.back().vreg == NewVReg &&
1095                   (int)index > SII->second.back().index) {
1096          // There is an earlier def that's not killed (must be two-address).
1097          // The spill is no longer needed.
1098          SII->second.pop_back();
1099          if (SII->second.empty()) {
1100            SpillIdxes.erase(MBBId);
1101            SpillMBBs.reset(MBBId);
1102          }
1103        }
1104      }
1105    }
1106
1107    if (HasUse) {
1108      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1109        SpillIdxes.find(MBBId);
1110      if (SII != SpillIdxes.end() &&
1111          SII->second.back().vreg == NewVReg &&
1112          (int)index > SII->second.back().index)
1113        // Use(s) following the last def, it's not safe to fold the spill.
1114        SII->second.back().canFold = false;
1115      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1116        RestoreIdxes.find(MBBId);
1117      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1118        // If we are splitting live intervals, only fold if it's the first
1119        // use and there isn't another use later in the MBB.
1120        RII->second.back().canFold = false;
1121      else if (IsNew) {
1122        // Only need a reload if there isn't an earlier def / use.
1123        if (RII == RestoreIdxes.end()) {
1124          std::vector<SRInfo> Infos;
1125          Infos.push_back(SRInfo(index, NewVReg, true));
1126          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1127        } else {
1128          RII->second.push_back(SRInfo(index, NewVReg, true));
1129        }
1130        RestoreMBBs.set(MBBId);
1131      }
1132    }
1133
1134    // Update spill weight.
1135    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1136    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1137  }
1138
1139  if (NewVReg && TrySplit && AllCanFold) {
1140    // If all of its def / use can be folded, give it a low spill weight.
1141    LiveInterval &nI = getOrCreateInterval(NewVReg);
1142    nI.weight /= 10.0F;
1143  }
1144}
1145
1146bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1147                        BitVector &RestoreMBBs,
1148                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1149  if (!RestoreMBBs[Id])
1150    return false;
1151  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1152  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1153    if (Restores[i].index == index &&
1154        Restores[i].vreg == vr &&
1155        Restores[i].canFold)
1156      return true;
1157  return false;
1158}
1159
1160void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1161                        BitVector &RestoreMBBs,
1162                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1163  if (!RestoreMBBs[Id])
1164    return;
1165  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1166  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1167    if (Restores[i].index == index && Restores[i].vreg)
1168      Restores[i].index = -1;
1169}
1170
1171
1172std::vector<LiveInterval*> LiveIntervals::
1173addIntervalsForSpills(const LiveInterval &li,
1174                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1175  // Since this is called after the analysis is done we don't know if
1176  // LiveVariables is available
1177  lv_ = getAnalysisToUpdate<LiveVariables>();
1178
1179  assert(li.weight != HUGE_VALF &&
1180         "attempt to spill already spilled interval!");
1181
1182  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1183  li.print(DOUT, tri_);
1184  DOUT << '\n';
1185
1186  // Each bit specify whether it a spill is required in the MBB.
1187  BitVector SpillMBBs(mf_->getNumBlockIDs());
1188  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1189  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1190  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1191  std::map<unsigned,unsigned> MBBVRegsMap;
1192  std::vector<LiveInterval*> NewLIs;
1193  MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1194  const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1195
1196  unsigned NumValNums = li.getNumValNums();
1197  SmallVector<MachineInstr*, 4> ReMatDefs;
1198  ReMatDefs.resize(NumValNums, NULL);
1199  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1200  ReMatOrigDefs.resize(NumValNums, NULL);
1201  SmallVector<int, 4> ReMatIds;
1202  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1203  BitVector ReMatDelete(NumValNums);
1204  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1205
1206  // Spilling a split live interval. It cannot be split any further. Also,
1207  // it's also guaranteed to be a single val# / range interval.
1208  if (vrm.getPreSplitReg(li.reg)) {
1209    vrm.setIsSplitFromReg(li.reg, 0);
1210    // Unset the split kill marker on the last use.
1211    unsigned KillIdx = vrm.getKillPoint(li.reg);
1212    if (KillIdx) {
1213      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1214      assert(KillMI && "Last use disappeared?");
1215      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1216      assert(KillOp != -1 && "Last use disappeared?");
1217      KillMI->getOperand(KillOp).setIsKill(false);
1218    }
1219    vrm.removeKillPoint(li.reg);
1220    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1221    Slot = vrm.getStackSlot(li.reg);
1222    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1223    MachineInstr *ReMatDefMI = DefIsReMat ?
1224      vrm.getReMaterializedMI(li.reg) : NULL;
1225    int LdSlot = 0;
1226    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1227    bool isLoad = isLoadSS ||
1228      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1229    bool IsFirstRange = true;
1230    for (LiveInterval::Ranges::const_iterator
1231           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1232      // If this is a split live interval with multiple ranges, it means there
1233      // are two-address instructions that re-defined the value. Only the
1234      // first def can be rematerialized!
1235      if (IsFirstRange) {
1236        // Note ReMatOrigDefMI has already been deleted.
1237        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1238                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1239                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1240                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1241                             MBBVRegsMap, NewLIs);
1242      } else {
1243        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1244                             Slot, 0, false, false, false,
1245                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1246                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1247                             MBBVRegsMap, NewLIs);
1248      }
1249      IsFirstRange = false;
1250    }
1251    return NewLIs;
1252  }
1253
1254  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1255  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1256    TrySplit = false;
1257  if (TrySplit)
1258    ++numSplits;
1259  bool NeedStackSlot = false;
1260  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1261       i != e; ++i) {
1262    const VNInfo *VNI = *i;
1263    unsigned VN = VNI->id;
1264    unsigned DefIdx = VNI->def;
1265    if (DefIdx == ~1U)
1266      continue; // Dead val#.
1267    // Is the def for the val# rematerializable?
1268    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1269      ? 0 : getInstructionFromIndex(DefIdx);
1270    bool dummy;
1271    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1272      // Remember how to remat the def of this val#.
1273      ReMatOrigDefs[VN] = ReMatDefMI;
1274      // Original def may be modified so we have to make a copy here. vrm must
1275      // delete these!
1276      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1277
1278      bool CanDelete = true;
1279      if (VNI->hasPHIKill) {
1280        // A kill is a phi node, not all of its uses can be rematerialized.
1281        // It must not be deleted.
1282        CanDelete = false;
1283        // Need a stack slot if there is any live range where uses cannot be
1284        // rematerialized.
1285        NeedStackSlot = true;
1286      }
1287      if (CanDelete)
1288        ReMatDelete.set(VN);
1289    } else {
1290      // Need a stack slot if there is any live range where uses cannot be
1291      // rematerialized.
1292      NeedStackSlot = true;
1293    }
1294  }
1295
1296  // One stack slot per live interval.
1297  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1298    Slot = vrm.assignVirt2StackSlot(li.reg);
1299
1300  // Create new intervals and rewrite defs and uses.
1301  for (LiveInterval::Ranges::const_iterator
1302         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1303    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1304    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1305    bool DefIsReMat = ReMatDefMI != NULL;
1306    bool CanDelete = ReMatDelete[I->valno->id];
1307    int LdSlot = 0;
1308    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1309    bool isLoad = isLoadSS ||
1310      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1311    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1312                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1313                               CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1314                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1315                               MBBVRegsMap, NewLIs);
1316  }
1317
1318  // Insert spills / restores if we are splitting.
1319  if (!TrySplit)
1320    return NewLIs;
1321
1322  SmallPtrSet<LiveInterval*, 4> AddedKill;
1323  SmallVector<unsigned, 2> Ops;
1324  if (NeedStackSlot) {
1325    int Id = SpillMBBs.find_first();
1326    while (Id != -1) {
1327      std::vector<SRInfo> &spills = SpillIdxes[Id];
1328      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1329        int index = spills[i].index;
1330        unsigned VReg = spills[i].vreg;
1331        LiveInterval &nI = getOrCreateInterval(VReg);
1332        bool isReMat = vrm.isReMaterialized(VReg);
1333        MachineInstr *MI = getInstructionFromIndex(index);
1334        bool CanFold = false;
1335        bool FoundUse = false;
1336        Ops.clear();
1337        if (spills[i].canFold) {
1338          CanFold = true;
1339          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1340            MachineOperand &MO = MI->getOperand(j);
1341            if (!MO.isRegister() || MO.getReg() != VReg)
1342              continue;
1343
1344            Ops.push_back(j);
1345            if (MO.isDef())
1346              continue;
1347            if (isReMat ||
1348                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1349                                                RestoreMBBs, RestoreIdxes))) {
1350              // MI has two-address uses of the same register. If the use
1351              // isn't the first and only use in the BB, then we can't fold
1352              // it. FIXME: Move this to rewriteInstructionsForSpills.
1353              CanFold = false;
1354              break;
1355            }
1356            FoundUse = true;
1357          }
1358        }
1359        // Fold the store into the def if possible.
1360        bool Folded = false;
1361        if (CanFold && !Ops.empty()) {
1362          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1363            Folded = true;
1364            if (FoundUse > 0) {
1365              // Also folded uses, do not issue a load.
1366              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1367              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1368            }
1369            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1370          }
1371        }
1372
1373        // Else tell the spiller to issue a spill.
1374        if (!Folded) {
1375          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1376          bool isKill = LR->end == getStoreIndex(index);
1377          vrm.addSpillPoint(VReg, isKill, MI);
1378          if (isKill)
1379            AddedKill.insert(&nI);
1380        }
1381      }
1382      Id = SpillMBBs.find_next(Id);
1383    }
1384  }
1385
1386  int Id = RestoreMBBs.find_first();
1387  while (Id != -1) {
1388    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1389    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1390      int index = restores[i].index;
1391      if (index == -1)
1392        continue;
1393      unsigned VReg = restores[i].vreg;
1394      LiveInterval &nI = getOrCreateInterval(VReg);
1395      MachineInstr *MI = getInstructionFromIndex(index);
1396      bool CanFold = false;
1397      Ops.clear();
1398      if (restores[i].canFold) {
1399        CanFold = true;
1400        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1401          MachineOperand &MO = MI->getOperand(j);
1402          if (!MO.isRegister() || MO.getReg() != VReg)
1403            continue;
1404
1405          if (MO.isDef()) {
1406            // If this restore were to be folded, it would have been folded
1407            // already.
1408            CanFold = false;
1409            break;
1410          }
1411          Ops.push_back(j);
1412        }
1413      }
1414
1415      // Fold the load into the use if possible.
1416      bool Folded = false;
1417      if (CanFold && !Ops.empty()) {
1418        if (!vrm.isReMaterialized(VReg))
1419          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1420        else {
1421          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1422          int LdSlot = 0;
1423          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1424          // If the rematerializable def is a load, also try to fold it.
1425          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1426            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1427                                          Ops, isLoadSS, LdSlot, VReg);
1428        }
1429      }
1430      // If folding is not possible / failed, then tell the spiller to issue a
1431      // load / rematerialization for us.
1432      if (Folded)
1433        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1434      else
1435        vrm.addRestorePoint(VReg, MI);
1436    }
1437    Id = RestoreMBBs.find_next(Id);
1438  }
1439
1440  // Finalize intervals: add kills, finalize spill weights, and filter out
1441  // dead intervals.
1442  std::vector<LiveInterval*> RetNewLIs;
1443  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1444    LiveInterval *LI = NewLIs[i];
1445    if (!LI->empty()) {
1446      LI->weight /= LI->getSize();
1447      if (!AddedKill.count(LI)) {
1448        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1449        unsigned LastUseIdx = getBaseIndex(LR->end);
1450        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1451        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1452        assert(UseIdx != -1);
1453        if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) ==
1454            -1) {
1455          LastUse->getOperand(UseIdx).setIsKill();
1456          vrm.addKillPoint(LI->reg, LastUseIdx);
1457        }
1458      }
1459      RetNewLIs.push_back(LI);
1460    }
1461  }
1462
1463  return RetNewLIs;
1464}
1465