LiveIntervalAnalysis.cpp revision 5a18c02adf710ec282a3b8d4eebc30ac85df53ca
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                  cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits    , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64  AU.setPreservesCFG();
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  AU.addPreserved<ProcessImplicitDefs>();
79  AU.addRequired<ProcessImplicitDefs>();
80  AU.addPreserved<SlotIndexes>();
81  AU.addRequiredTransitive<SlotIndexes>();
82  MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86  // Free the live intervals themselves.
87  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88       E = r2iMap_.end(); I != E; ++I)
89    delete I->second;
90
91  r2iMap_.clear();
92
93  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94  VNInfoAllocator.DestroyAll();
95  while (!CloneMIs.empty()) {
96    MachineInstr *MI = CloneMIs.back();
97    CloneMIs.pop_back();
98    mf_->DeleteMachineInstr(MI);
99  }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105  mf_ = &fn;
106  mri_ = &mf_->getRegInfo();
107  tm_ = &fn.getTarget();
108  tri_ = tm_->getRegisterInfo();
109  tii_ = tm_->getInstrInfo();
110  aa_ = &getAnalysis<AliasAnalysis>();
111  lv_ = &getAnalysis<LiveVariables>();
112  indexes_ = &getAnalysis<SlotIndexes>();
113  allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115  computeIntervals();
116
117  numIntervals += getNumIntervals();
118
119  DEBUG(dump());
120  return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125  OS << "********** INTERVALS **********\n";
126  for (const_iterator I = begin(), E = end(); I != E; ++I) {
127    I->second->print(OS, tri_);
128    OS << "\n";
129  }
130
131  printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135  OS << "********** MACHINEINSTRS **********\n";
136
137  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138       mbbi != mbbe; ++mbbi) {
139    OS << "BB#" << mbbi->getNumber()
140       << ":\t\t# derived from " << mbbi->getName() << "\n";
141    for (MachineBasicBlock::iterator mii = mbbi->begin(),
142           mie = mbbi->end(); mii != mie; ++mii) {
143      if (mii->isDebugValue())
144        OS << "    \t" << *mii;
145      else
146        OS << getInstructionIndex(mii) << '\t' << *mii;
147    }
148  }
149}
150
151void LiveIntervals::dumpInstrs() const {
152  printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156                                         VirtRegMap &vrm, unsigned reg) {
157  // We don't handle fancy stuff crossing basic block boundaries
158  if (li.ranges.size() != 1)
159    return true;
160  const LiveRange &range = li.ranges.front();
161  SlotIndex idx = range.start.getBaseIndex();
162  SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164  // Skip deleted instructions
165  MachineInstr *firstMI = getInstructionFromIndex(idx);
166  while (!firstMI && idx != end) {
167    idx = idx.getNextIndex();
168    firstMI = getInstructionFromIndex(idx);
169  }
170  if (!firstMI)
171    return false;
172
173  // Find last instruction in range
174  SlotIndex lastIdx = end.getPrevIndex();
175  MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176  while (!lastMI && lastIdx != idx) {
177    lastIdx = lastIdx.getPrevIndex();
178    lastMI = getInstructionFromIndex(lastIdx);
179  }
180  if (!lastMI)
181    return false;
182
183  // Range cannot cross basic block boundaries or terminators
184  MachineBasicBlock *MBB = firstMI->getParent();
185  if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186    return true;
187
188  MachineBasicBlock::const_iterator E = lastMI;
189  ++E;
190  for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191    const MachineInstr &MI = *I;
192
193    // Allow copies to and from li.reg
194    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195    if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196      if (SrcReg == li.reg || DstReg == li.reg)
197        continue;
198
199    // Check for operands using reg
200    for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
201      const MachineOperand& mop = MI.getOperand(i);
202      if (!mop.isReg())
203        continue;
204      unsigned PhysReg = mop.getReg();
205      if (PhysReg == 0 || PhysReg == li.reg)
206        continue;
207      if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208        if (!vrm.hasPhys(PhysReg))
209          continue;
210        PhysReg = vrm.getPhys(PhysReg);
211      }
212      if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213        return true;
214    }
215  }
216
217  // No conflicts found.
218  return false;
219}
220
221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it checks for sub-register reference and it can check use as well.
223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224                                            unsigned Reg, bool CheckUse,
225                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226  for (LiveInterval::Ranges::const_iterator
227         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228    for (SlotIndex index = I->start.getBaseIndex(),
229           end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230           index != end;
231           index = index.getNextIndex()) {
232      MachineInstr *MI = getInstructionFromIndex(index);
233      if (!MI)
234        continue;               // skip deleted instructions
235
236      if (JoinedCopies.count(MI))
237        continue;
238      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239        MachineOperand& MO = MI->getOperand(i);
240        if (!MO.isReg())
241          continue;
242        if (MO.isUse() && !CheckUse)
243          continue;
244        unsigned PhysReg = MO.getReg();
245        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246          continue;
247        if (tri_->isSubRegister(Reg, PhysReg))
248          return true;
249      }
250    }
251  }
252
253  return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258  if (TargetRegisterInfo::isPhysicalRegister(reg))
259    dbgs() << tri_->getName(reg);
260  else
261    dbgs() << "%reg" << reg;
262}
263#endif
264
265void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266                                             MachineBasicBlock::iterator mi,
267                                             SlotIndex MIIdx,
268                                             MachineOperand& MO,
269                                             unsigned MOIdx,
270                                             LiveInterval &interval) {
271  DEBUG({
272      dbgs() << "\t\tregister: ";
273      printRegName(interval.reg, tri_);
274    });
275
276  // Virtual registers may be defined multiple times (due to phi
277  // elimination and 2-addr elimination).  Much of what we do only has to be
278  // done once for the vreg.  We use an empty interval to detect the first
279  // time we see a vreg.
280  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281  if (interval.empty()) {
282    // Get the Idx of the defining instructions.
283    SlotIndex defIndex = MIIdx.getDefIndex();
284    // Earlyclobbers move back one, so that they overlap the live range
285    // of inputs.
286    if (MO.isEarlyClobber())
287      defIndex = MIIdx.getUseIndex();
288    VNInfo *ValNo;
289    MachineInstr *CopyMI = NULL;
290    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291    if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293      CopyMI = mi;
294    // Earlyclobbers move back one.
295    ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296
297    assert(ValNo->id == 0 && "First value in interval is not 0?");
298
299    // Loop over all of the blocks that the vreg is defined in.  There are
300    // two cases we have to handle here.  The most common case is a vreg
301    // whose lifetime is contained within a basic block.  In this case there
302    // will be a single kill, in MBB, which comes after the definition.
303    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304      // FIXME: what about dead vars?
305      SlotIndex killIdx;
306      if (vi.Kills[0] != mi)
307        killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308      else
309        killIdx = defIndex.getStoreIndex();
310
311      // If the kill happens after the definition, we have an intra-block
312      // live range.
313      if (killIdx > defIndex) {
314        assert(vi.AliveBlocks.empty() &&
315               "Shouldn't be alive across any blocks!");
316        LiveRange LR(defIndex, killIdx, ValNo);
317        interval.addRange(LR);
318        DEBUG(dbgs() << " +" << LR << "\n");
319        ValNo->addKill(killIdx);
320        return;
321      }
322    }
323
324    // The other case we handle is when a virtual register lives to the end
325    // of the defining block, potentially live across some blocks, then is
326    // live into some number of blocks, but gets killed.  Start by adding a
327    // range that goes from this definition to the end of the defining block.
328    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329    DEBUG(dbgs() << " +" << NewLR);
330    interval.addRange(NewLR);
331
332    bool PHIJoin = lv_->isPHIJoin(interval.reg);
333
334    if (PHIJoin) {
335      // A phi join register is killed at the end of the MBB and revived as a new
336      // valno in the killing blocks.
337      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338      DEBUG(dbgs() << " phi-join");
339      ValNo->addKill(indexes_->getTerminatorGap(mbb));
340      ValNo->setHasPHIKill(true);
341    } else {
342      // Iterate over all of the blocks that the variable is completely
343      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
344      // live interval.
345      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346               E = vi.AliveBlocks.end(); I != E; ++I) {
347        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349        interval.addRange(LR);
350        DEBUG(dbgs() << " +" << LR);
351      }
352    }
353
354    // Finally, this virtual register is live from the start of any killing
355    // block to the 'use' slot of the killing instruction.
356    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357      MachineInstr *Kill = vi.Kills[i];
358      SlotIndex Start = getMBBStartIdx(Kill->getParent());
359      SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
360
361      // Create interval with one of a NEW value number.  Note that this value
362      // number isn't actually defined by an instruction, weird huh? :)
363      if (PHIJoin) {
364        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
365                                      VNInfoAllocator);
366        ValNo->setIsPHIDef(true);
367      }
368      LiveRange LR(Start, killIdx, ValNo);
369      interval.addRange(LR);
370      ValNo->addKill(killIdx);
371      DEBUG(dbgs() << " +" << LR);
372    }
373
374  } else {
375    // If this is the second time we see a virtual register definition, it
376    // must be due to phi elimination or two addr elimination.  If this is
377    // the result of two address elimination, then the vreg is one of the
378    // def-and-use register operand.
379    if (mi->isRegTiedToUseOperand(MOIdx)) {
380      // If this is a two-address definition, then we have already processed
381      // the live range.  The only problem is that we didn't realize there
382      // are actually two values in the live interval.  Because of this we
383      // need to take the LiveRegion that defines this register and split it
384      // into two values.
385      assert(interval.containsOneValue());
386      SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387      SlotIndex RedefIndex = MIIdx.getDefIndex();
388      if (MO.isEarlyClobber())
389        RedefIndex = MIIdx.getUseIndex();
390
391      const LiveRange *OldLR =
392        interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393      VNInfo *OldValNo = OldLR->valno;
394
395      // Delete the initial value, which should be short and continuous,
396      // because the 2-addr copy must be in the same MBB as the redef.
397      interval.removeRange(DefIndex, RedefIndex);
398
399      // Two-address vregs should always only be redefined once.  This means
400      // that at this point, there should be exactly one value number in it.
401      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
402
403      // The new value number (#1) is defined by the instruction we claimed
404      // defined value #0.
405      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406                                            false, // update at *
407                                            VNInfoAllocator);
408      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
409
410      // Value#0 is now defined by the 2-addr instruction.
411      OldValNo->def  = RedefIndex;
412      OldValNo->setCopy(0);
413
414      // Add the new live interval which replaces the range for the input copy.
415      LiveRange LR(DefIndex, RedefIndex, ValNo);
416      DEBUG(dbgs() << " replace range with " << LR);
417      interval.addRange(LR);
418      ValNo->addKill(RedefIndex);
419
420      // If this redefinition is dead, we need to add a dummy unit live
421      // range covering the def slot.
422      if (MO.isDead())
423        interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
424                                    OldValNo));
425
426      DEBUG({
427          dbgs() << " RESULT: ";
428          interval.print(dbgs(), tri_);
429        });
430    } else {
431      assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432      // In the case of PHI elimination, each variable definition is only
433      // live until the end of the block.  We've already taken care of the
434      // rest of the live range.
435
436      SlotIndex defIndex = MIIdx.getDefIndex();
437      if (MO.isEarlyClobber())
438        defIndex = MIIdx.getUseIndex();
439
440      VNInfo *ValNo;
441      MachineInstr *CopyMI = NULL;
442      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443      if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
445        CopyMI = mi;
446      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
447
448      SlotIndex killIndex = getMBBEndIdx(mbb);
449      LiveRange LR(defIndex, killIndex, ValNo);
450      interval.addRange(LR);
451      ValNo->addKill(indexes_->getTerminatorGap(mbb));
452      ValNo->setHasPHIKill(true);
453      DEBUG(dbgs() << " phi-join +" << LR);
454    }
455  }
456
457  DEBUG(dbgs() << '\n');
458}
459
460void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461                                              MachineBasicBlock::iterator mi,
462                                              SlotIndex MIIdx,
463                                              MachineOperand& MO,
464                                              LiveInterval &interval,
465                                              MachineInstr *CopyMI) {
466  // A physical register cannot be live across basic block, so its
467  // lifetime must end somewhere in its defining basic block.
468  DEBUG({
469      dbgs() << "\t\tregister: ";
470      printRegName(interval.reg, tri_);
471    });
472
473  SlotIndex baseIndex = MIIdx;
474  SlotIndex start = baseIndex.getDefIndex();
475  // Earlyclobbers move back one.
476  if (MO.isEarlyClobber())
477    start = MIIdx.getUseIndex();
478  SlotIndex end = start;
479
480  // If it is not used after definition, it is considered dead at
481  // the instruction defining it. Hence its interval is:
482  // [defSlot(def), defSlot(def)+1)
483  // For earlyclobbers, the defSlot was pushed back one; the extra
484  // advance below compensates.
485  if (MO.isDead()) {
486    DEBUG(dbgs() << " dead");
487    end = start.getStoreIndex();
488    goto exit;
489  }
490
491  // If it is not dead on definition, it must be killed by a
492  // subsequent instruction. Hence its interval is:
493  // [defSlot(def), useSlot(kill)+1)
494  baseIndex = baseIndex.getNextIndex();
495  while (++mi != MBB->end()) {
496
497    if (mi->isDebugValue())
498      continue;
499    if (getInstructionFromIndex(baseIndex) == 0)
500      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
501
502    if (mi->killsRegister(interval.reg, tri_)) {
503      DEBUG(dbgs() << " killed");
504      end = baseIndex.getDefIndex();
505      goto exit;
506    } else {
507      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
508      if (DefIdx != -1) {
509        if (mi->isRegTiedToUseOperand(DefIdx)) {
510          // Two-address instruction.
511          end = baseIndex.getDefIndex();
512        } else {
513          // Another instruction redefines the register before it is ever read.
514          // Then the register is essentially dead at the instruction that
515          // defines it. Hence its interval is:
516          // [defSlot(def), defSlot(def)+1)
517          DEBUG(dbgs() << " dead");
518          end = start.getStoreIndex();
519        }
520        goto exit;
521      }
522    }
523
524    baseIndex = baseIndex.getNextIndex();
525  }
526
527  // The only case we should have a dead physreg here without a killing or
528  // instruction where we know it's dead is if it is live-in to the function
529  // and never used. Another possible case is the implicit use of the
530  // physical register has been deleted by two-address pass.
531  end = start.getStoreIndex();
532
533exit:
534  assert(start < end && "did not find end of interval?");
535
536  // Already exists? Extend old live interval.
537  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538  bool Extend = OldLR != interval.end();
539  VNInfo *ValNo = Extend
540    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541  if (MO.isEarlyClobber() && Extend)
542    ValNo->setHasRedefByEC(true);
543  LiveRange LR(start, end, ValNo);
544  interval.addRange(LR);
545  LR.valno->addKill(end);
546  DEBUG(dbgs() << " +" << LR << '\n');
547}
548
549void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550                                      MachineBasicBlock::iterator MI,
551                                      SlotIndex MIIdx,
552                                      MachineOperand& MO,
553                                      unsigned MOIdx) {
554  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556                             getOrCreateInterval(MO.getReg()));
557  else if (allocatableRegs_[MO.getReg()]) {
558    MachineInstr *CopyMI = NULL;
559    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560    if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
562      CopyMI = MI;
563    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564                              getOrCreateInterval(MO.getReg()), CopyMI);
565    // Def of a register also defines its sub-registers.
566    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567      // If MI also modifies the sub-register explicitly, avoid processing it
568      // more than once. Do not pass in TRI here so it checks for exact match.
569      if (!MI->modifiesRegister(*AS))
570        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571                                  getOrCreateInterval(*AS), 0);
572  }
573}
574
575void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
576                                         SlotIndex MIIdx,
577                                         LiveInterval &interval, bool isAlias) {
578  DEBUG({
579      dbgs() << "\t\tlivein register: ";
580      printRegName(interval.reg, tri_);
581    });
582
583  // Look for kills, if it reaches a def before it's killed, then it shouldn't
584  // be considered a livein.
585  MachineBasicBlock::iterator mi = MBB->begin();
586  MachineBasicBlock::iterator E = MBB->end();
587  // Skip over DBG_VALUE at the start of the MBB.
588  if (mi != E && mi->isDebugValue()) {
589    while (++mi != E && mi->isDebugValue())
590      ;
591    if (mi == E)
592      // MBB is empty except for DBG_VALUE's.
593      return;
594  }
595
596  SlotIndex baseIndex = MIIdx;
597  SlotIndex start = baseIndex;
598  if (getInstructionFromIndex(baseIndex) == 0)
599    baseIndex = indexes_->getNextNonNullIndex(baseIndex);
600
601  SlotIndex end = baseIndex;
602  bool SeenDefUse = false;
603
604  while (mi != E) {
605    if (mi->killsRegister(interval.reg, tri_)) {
606      DEBUG(dbgs() << " killed");
607      end = baseIndex.getDefIndex();
608      SeenDefUse = true;
609      break;
610    } else if (mi->modifiesRegister(interval.reg, tri_)) {
611      // Another instruction redefines the register before it is ever read.
612      // Then the register is essentially dead at the instruction that defines
613      // it. Hence its interval is:
614      // [defSlot(def), defSlot(def)+1)
615      DEBUG(dbgs() << " dead");
616      end = start.getStoreIndex();
617      SeenDefUse = true;
618      break;
619    }
620
621    while (++mi != E && mi->isDebugValue())
622      // Skip over DBG_VALUE.
623      ;
624    if (mi != E)
625      baseIndex = indexes_->getNextNonNullIndex(baseIndex);
626  }
627
628  // Live-in register might not be used at all.
629  if (!SeenDefUse) {
630    if (isAlias) {
631      DEBUG(dbgs() << " dead");
632      end = MIIdx.getStoreIndex();
633    } else {
634      DEBUG(dbgs() << " live through");
635      end = baseIndex;
636    }
637  }
638
639  VNInfo *vni =
640    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
641                          0, false, VNInfoAllocator);
642  vni->setIsPHIDef(true);
643  LiveRange LR(start, end, vni);
644
645  interval.addRange(LR);
646  LR.valno->addKill(end);
647  DEBUG(dbgs() << " +" << LR << '\n');
648}
649
650/// computeIntervals - computes the live intervals for virtual
651/// registers. for some ordering of the machine instructions [1,N] a
652/// live interval is an interval [i, j) where 1 <= i <= j < N for
653/// which a variable is live
654void LiveIntervals::computeIntervals() {
655  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
656               << "********** Function: "
657               << ((Value*)mf_->getFunction())->getName() << '\n');
658
659  SmallVector<unsigned, 8> UndefUses;
660  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
661       MBBI != E; ++MBBI) {
662    MachineBasicBlock *MBB = MBBI;
663    if (MBB->empty())
664      continue;
665
666    // Track the index of the current machine instr.
667    SlotIndex MIIndex = getMBBStartIdx(MBB);
668    DEBUG(dbgs() << MBB->getName() << ":\n");
669
670    // Create intervals for live-ins to this BB first.
671    for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
672           LE = MBB->livein_end(); LI != LE; ++LI) {
673      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
674      // Multiple live-ins can alias the same register.
675      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
676        if (!hasInterval(*AS))
677          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
678                               true);
679    }
680
681    // Skip over empty initial indices.
682    if (getInstructionFromIndex(MIIndex) == 0)
683      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
684
685    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
686         MI != miEnd; ++MI) {
687      DEBUG(dbgs() << MIIndex << "\t" << *MI);
688      if (MI->isDebugValue())
689        continue;
690
691      // Handle defs.
692      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
693        MachineOperand &MO = MI->getOperand(i);
694        if (!MO.isReg() || !MO.getReg())
695          continue;
696
697        // handle register defs - build intervals
698        if (MO.isDef())
699          handleRegisterDef(MBB, MI, MIIndex, MO, i);
700        else if (MO.isUndef())
701          UndefUses.push_back(MO.getReg());
702      }
703
704      // Move to the next instr slot.
705      MIIndex = indexes_->getNextNonNullIndex(MIIndex);
706    }
707  }
708
709  // Create empty intervals for registers defined by implicit_def's (except
710  // for those implicit_def that define values which are liveout of their
711  // blocks.
712  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
713    unsigned UndefReg = UndefUses[i];
714    (void)getOrCreateInterval(UndefReg);
715  }
716}
717
718LiveInterval* LiveIntervals::createInterval(unsigned reg) {
719  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
720  return new LiveInterval(reg, Weight);
721}
722
723/// dupInterval - Duplicate a live interval. The caller is responsible for
724/// managing the allocated memory.
725LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
726  LiveInterval *NewLI = createInterval(li->reg);
727  NewLI->Copy(*li, mri_, getVNInfoAllocator());
728  return NewLI;
729}
730
731/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
732/// copy field and returns the source register that defines it.
733unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
734  if (!VNI->getCopy())
735    return 0;
736
737  if (VNI->getCopy()->isExtractSubreg()) {
738    // If it's extracting out of a physical register, return the sub-register.
739    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
740    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
741      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
742      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
743      if (SrcSubReg == DstSubReg)
744        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
745        // reg1034 can still be coalesced to EDX.
746        return Reg;
747      assert(DstSubReg == 0);
748      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
749    }
750    return Reg;
751  } else if (VNI->getCopy()->isInsertSubreg() ||
752             VNI->getCopy()->isSubregToReg())
753    return VNI->getCopy()->getOperand(2).getReg();
754
755  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
756  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
757    return SrcReg;
758  llvm_unreachable("Unrecognized copy instruction!");
759  return 0;
760}
761
762//===----------------------------------------------------------------------===//
763// Register allocator hooks.
764//
765
766/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
767/// allow one) virtual register operand, then its uses are implicitly using
768/// the register. Returns the virtual register.
769unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
770                                            MachineInstr *MI) const {
771  unsigned RegOp = 0;
772  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
773    MachineOperand &MO = MI->getOperand(i);
774    if (!MO.isReg() || !MO.isUse())
775      continue;
776    unsigned Reg = MO.getReg();
777    if (Reg == 0 || Reg == li.reg)
778      continue;
779
780    if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
781        !allocatableRegs_[Reg])
782      continue;
783    // FIXME: For now, only remat MI with at most one register operand.
784    assert(!RegOp &&
785           "Can't rematerialize instruction with multiple register operand!");
786    RegOp = MO.getReg();
787#ifndef NDEBUG
788    break;
789#endif
790  }
791  return RegOp;
792}
793
794/// isValNoAvailableAt - Return true if the val# of the specified interval
795/// which reaches the given instruction also reaches the specified use index.
796bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
797                                       SlotIndex UseIdx) const {
798  SlotIndex Index = getInstructionIndex(MI);
799  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
800  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
801  return UI != li.end() && UI->valno == ValNo;
802}
803
804/// isReMaterializable - Returns true if the definition MI of the specified
805/// val# of the specified interval is re-materializable.
806bool LiveIntervals::isReMaterializable(const LiveInterval &li,
807                                       const VNInfo *ValNo, MachineInstr *MI,
808                                       SmallVectorImpl<LiveInterval*> &SpillIs,
809                                       bool &isLoad) {
810  if (DisableReMat)
811    return false;
812
813  if (!tii_->isTriviallyReMaterializable(MI, aa_))
814    return false;
815
816  // Target-specific code can mark an instruction as being rematerializable
817  // if it has one virtual reg use, though it had better be something like
818  // a PIC base register which is likely to be live everywhere.
819  unsigned ImpUse = getReMatImplicitUse(li, MI);
820  if (ImpUse) {
821    const LiveInterval &ImpLi = getInterval(ImpUse);
822    for (MachineRegisterInfo::use_nodbg_iterator
823           ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
824         ri != re; ++ri) {
825      MachineInstr *UseMI = &*ri;
826      SlotIndex UseIdx = getInstructionIndex(UseMI);
827      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
828        continue;
829      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
830        return false;
831    }
832
833    // If a register operand of the re-materialized instruction is going to
834    // be spilled next, then it's not legal to re-materialize this instruction.
835    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
836      if (ImpUse == SpillIs[i]->reg)
837        return false;
838  }
839  return true;
840}
841
842/// isReMaterializable - Returns true if the definition MI of the specified
843/// val# of the specified interval is re-materializable.
844bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845                                       const VNInfo *ValNo, MachineInstr *MI) {
846  SmallVector<LiveInterval*, 4> Dummy1;
847  bool Dummy2;
848  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
849}
850
851/// isReMaterializable - Returns true if every definition of MI of every
852/// val# of the specified interval is re-materializable.
853bool LiveIntervals::isReMaterializable(const LiveInterval &li,
854                                       SmallVectorImpl<LiveInterval*> &SpillIs,
855                                       bool &isLoad) {
856  isLoad = false;
857  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
858       i != e; ++i) {
859    const VNInfo *VNI = *i;
860    if (VNI->isUnused())
861      continue; // Dead val#.
862    // Is the def for the val# rematerializable?
863    if (!VNI->isDefAccurate())
864      return false;
865    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
866    bool DefIsLoad = false;
867    if (!ReMatDefMI ||
868        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
869      return false;
870    isLoad |= DefIsLoad;
871  }
872  return true;
873}
874
875/// FilterFoldedOps - Filter out two-address use operands. Return
876/// true if it finds any issue with the operands that ought to prevent
877/// folding.
878static bool FilterFoldedOps(MachineInstr *MI,
879                            SmallVector<unsigned, 2> &Ops,
880                            unsigned &MRInfo,
881                            SmallVector<unsigned, 2> &FoldOps) {
882  MRInfo = 0;
883  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
884    unsigned OpIdx = Ops[i];
885    MachineOperand &MO = MI->getOperand(OpIdx);
886    // FIXME: fold subreg use.
887    if (MO.getSubReg())
888      return true;
889    if (MO.isDef())
890      MRInfo |= (unsigned)VirtRegMap::isMod;
891    else {
892      // Filter out two-address use operand(s).
893      if (MI->isRegTiedToDefOperand(OpIdx)) {
894        MRInfo = VirtRegMap::isModRef;
895        continue;
896      }
897      MRInfo |= (unsigned)VirtRegMap::isRef;
898    }
899    FoldOps.push_back(OpIdx);
900  }
901  return false;
902}
903
904
905/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
906/// slot / to reg or any rematerialized load into ith operand of specified
907/// MI. If it is successul, MI is updated with the newly created MI and
908/// returns true.
909bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
910                                         VirtRegMap &vrm, MachineInstr *DefMI,
911                                         SlotIndex InstrIdx,
912                                         SmallVector<unsigned, 2> &Ops,
913                                         bool isSS, int Slot, unsigned Reg) {
914  // If it is an implicit def instruction, just delete it.
915  if (MI->isImplicitDef()) {
916    RemoveMachineInstrFromMaps(MI);
917    vrm.RemoveMachineInstrFromMaps(MI);
918    MI->eraseFromParent();
919    ++numFolds;
920    return true;
921  }
922
923  // Filter the list of operand indexes that are to be folded. Abort if
924  // any operand will prevent folding.
925  unsigned MRInfo = 0;
926  SmallVector<unsigned, 2> FoldOps;
927  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
928    return false;
929
930  // The only time it's safe to fold into a two address instruction is when
931  // it's folding reload and spill from / into a spill stack slot.
932  if (DefMI && (MRInfo & VirtRegMap::isMod))
933    return false;
934
935  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
936                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
937  if (fmi) {
938    // Remember this instruction uses the spill slot.
939    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
940
941    // Attempt to fold the memory reference into the instruction. If
942    // we can do this, we don't need to insert spill code.
943    MachineBasicBlock &MBB = *MI->getParent();
944    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
945      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
946    vrm.transferSpillPts(MI, fmi);
947    vrm.transferRestorePts(MI, fmi);
948    vrm.transferEmergencySpills(MI, fmi);
949    ReplaceMachineInstrInMaps(MI, fmi);
950    MI = MBB.insert(MBB.erase(MI), fmi);
951    ++numFolds;
952    return true;
953  }
954  return false;
955}
956
957/// canFoldMemoryOperand - Returns true if the specified load / store
958/// folding is possible.
959bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
960                                         SmallVector<unsigned, 2> &Ops,
961                                         bool ReMat) const {
962  // Filter the list of operand indexes that are to be folded. Abort if
963  // any operand will prevent folding.
964  unsigned MRInfo = 0;
965  SmallVector<unsigned, 2> FoldOps;
966  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
967    return false;
968
969  // It's only legal to remat for a use, not a def.
970  if (ReMat && (MRInfo & VirtRegMap::isMod))
971    return false;
972
973  return tii_->canFoldMemoryOperand(MI, FoldOps);
974}
975
976bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
977  LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
978
979  MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
980
981  if (mbb == 0)
982    return false;
983
984  for (++itr; itr != li.ranges.end(); ++itr) {
985    MachineBasicBlock *mbb2 =
986      indexes_->getMBBCoveringRange(itr->start, itr->end);
987
988    if (mbb2 != mbb)
989      return false;
990  }
991
992  return true;
993}
994
995/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
996/// interval on to-be re-materialized operands of MI) with new register.
997void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
998                                       MachineInstr *MI, unsigned NewVReg,
999                                       VirtRegMap &vrm) {
1000  // There is an implicit use. That means one of the other operand is
1001  // being remat'ed and the remat'ed instruction has li.reg as an
1002  // use operand. Make sure we rewrite that as well.
1003  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1004    MachineOperand &MO = MI->getOperand(i);
1005    if (!MO.isReg())
1006      continue;
1007    unsigned Reg = MO.getReg();
1008    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1009      continue;
1010    if (!vrm.isReMaterialized(Reg))
1011      continue;
1012    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1013    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1014    if (UseMO)
1015      UseMO->setReg(NewVReg);
1016  }
1017}
1018
1019/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1020/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1021bool LiveIntervals::
1022rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1023                 bool TrySplit, SlotIndex index, SlotIndex end,
1024                 MachineInstr *MI,
1025                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1026                 unsigned Slot, int LdSlot,
1027                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1028                 VirtRegMap &vrm,
1029                 const TargetRegisterClass* rc,
1030                 SmallVector<int, 4> &ReMatIds,
1031                 const MachineLoopInfo *loopInfo,
1032                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1033                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1034                 std::vector<LiveInterval*> &NewLIs) {
1035  bool CanFold = false;
1036 RestartInstruction:
1037  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1038    MachineOperand& mop = MI->getOperand(i);
1039    if (!mop.isReg())
1040      continue;
1041    unsigned Reg = mop.getReg();
1042    unsigned RegI = Reg;
1043    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1044      continue;
1045    if (Reg != li.reg)
1046      continue;
1047
1048    bool TryFold = !DefIsReMat;
1049    bool FoldSS = true; // Default behavior unless it's a remat.
1050    int FoldSlot = Slot;
1051    if (DefIsReMat) {
1052      // If this is the rematerializable definition MI itself and
1053      // all of its uses are rematerialized, simply delete it.
1054      if (MI == ReMatOrigDefMI && CanDelete) {
1055        DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1056                     << *MI << '\n');
1057        RemoveMachineInstrFromMaps(MI);
1058        vrm.RemoveMachineInstrFromMaps(MI);
1059        MI->eraseFromParent();
1060        break;
1061      }
1062
1063      // If def for this use can't be rematerialized, then try folding.
1064      // If def is rematerializable and it's a load, also try folding.
1065      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1066      if (isLoad) {
1067        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1068        FoldSS = isLoadSS;
1069        FoldSlot = LdSlot;
1070      }
1071    }
1072
1073    // Scan all of the operands of this instruction rewriting operands
1074    // to use NewVReg instead of li.reg as appropriate.  We do this for
1075    // two reasons:
1076    //
1077    //   1. If the instr reads the same spilled vreg multiple times, we
1078    //      want to reuse the NewVReg.
1079    //   2. If the instr is a two-addr instruction, we are required to
1080    //      keep the src/dst regs pinned.
1081    //
1082    // Keep track of whether we replace a use and/or def so that we can
1083    // create the spill interval with the appropriate range.
1084
1085    HasUse = mop.isUse();
1086    HasDef = mop.isDef();
1087    SmallVector<unsigned, 2> Ops;
1088    Ops.push_back(i);
1089    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1090      const MachineOperand &MOj = MI->getOperand(j);
1091      if (!MOj.isReg())
1092        continue;
1093      unsigned RegJ = MOj.getReg();
1094      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1095        continue;
1096      if (RegJ == RegI) {
1097        Ops.push_back(j);
1098        if (!MOj.isUndef()) {
1099          HasUse |= MOj.isUse();
1100          HasDef |= MOj.isDef();
1101        }
1102      }
1103    }
1104
1105    // Create a new virtual register for the spill interval.
1106    // Create the new register now so we can map the fold instruction
1107    // to the new register so when it is unfolded we get the correct
1108    // answer.
1109    bool CreatedNewVReg = false;
1110    if (NewVReg == 0) {
1111      NewVReg = mri_->createVirtualRegister(rc);
1112      vrm.grow();
1113      CreatedNewVReg = true;
1114
1115      // The new virtual register should get the same allocation hints as the
1116      // old one.
1117      std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1118      if (Hint.first || Hint.second)
1119        mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1120    }
1121
1122    if (!TryFold)
1123      CanFold = false;
1124    else {
1125      // Do not fold load / store here if we are splitting. We'll find an
1126      // optimal point to insert a load / store later.
1127      if (!TrySplit) {
1128        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1129                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1130          // Folding the load/store can completely change the instruction in
1131          // unpredictable ways, rescan it from the beginning.
1132
1133          if (FoldSS) {
1134            // We need to give the new vreg the same stack slot as the
1135            // spilled interval.
1136            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1137          }
1138
1139          HasUse = false;
1140          HasDef = false;
1141          CanFold = false;
1142          if (isNotInMIMap(MI))
1143            break;
1144          goto RestartInstruction;
1145        }
1146      } else {
1147        // We'll try to fold it later if it's profitable.
1148        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1149      }
1150    }
1151
1152    mop.setReg(NewVReg);
1153    if (mop.isImplicit())
1154      rewriteImplicitOps(li, MI, NewVReg, vrm);
1155
1156    // Reuse NewVReg for other reads.
1157    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1158      MachineOperand &mopj = MI->getOperand(Ops[j]);
1159      mopj.setReg(NewVReg);
1160      if (mopj.isImplicit())
1161        rewriteImplicitOps(li, MI, NewVReg, vrm);
1162    }
1163
1164    if (CreatedNewVReg) {
1165      if (DefIsReMat) {
1166        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1167        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1168          // Each valnum may have its own remat id.
1169          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1170        } else {
1171          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1172        }
1173        if (!CanDelete || (HasUse && HasDef)) {
1174          // If this is a two-addr instruction then its use operands are
1175          // rematerializable but its def is not. It should be assigned a
1176          // stack slot.
1177          vrm.assignVirt2StackSlot(NewVReg, Slot);
1178        }
1179      } else {
1180        vrm.assignVirt2StackSlot(NewVReg, Slot);
1181      }
1182    } else if (HasUse && HasDef &&
1183               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1184      // If this interval hasn't been assigned a stack slot (because earlier
1185      // def is a deleted remat def), do it now.
1186      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1187      vrm.assignVirt2StackSlot(NewVReg, Slot);
1188    }
1189
1190    // Re-matting an instruction with virtual register use. Add the
1191    // register as an implicit use on the use MI.
1192    if (DefIsReMat && ImpUse)
1193      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1194
1195    // Create a new register interval for this spill / remat.
1196    LiveInterval &nI = getOrCreateInterval(NewVReg);
1197    if (CreatedNewVReg) {
1198      NewLIs.push_back(&nI);
1199      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1200      if (TrySplit)
1201        vrm.setIsSplitFromReg(NewVReg, li.reg);
1202    }
1203
1204    if (HasUse) {
1205      if (CreatedNewVReg) {
1206        LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1207                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1208        DEBUG(dbgs() << " +" << LR);
1209        nI.addRange(LR);
1210      } else {
1211        // Extend the split live interval to this def / use.
1212        SlotIndex End = index.getDefIndex();
1213        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1214                     nI.getValNumInfo(nI.getNumValNums()-1));
1215        DEBUG(dbgs() << " +" << LR);
1216        nI.addRange(LR);
1217      }
1218    }
1219    if (HasDef) {
1220      LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1221                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1222      DEBUG(dbgs() << " +" << LR);
1223      nI.addRange(LR);
1224    }
1225
1226    DEBUG({
1227        dbgs() << "\t\t\t\tAdded new interval: ";
1228        nI.print(dbgs(), tri_);
1229        dbgs() << '\n';
1230      });
1231  }
1232  return CanFold;
1233}
1234bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1235                                   const VNInfo *VNI,
1236                                   MachineBasicBlock *MBB,
1237                                   SlotIndex Idx) const {
1238  SlotIndex End = getMBBEndIdx(MBB);
1239  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1240    if (VNI->kills[j].isPHI())
1241      continue;
1242
1243    SlotIndex KillIdx = VNI->kills[j];
1244    if (KillIdx > Idx && KillIdx <= End)
1245      return true;
1246  }
1247  return false;
1248}
1249
1250/// RewriteInfo - Keep track of machine instrs that will be rewritten
1251/// during spilling.
1252namespace {
1253  struct RewriteInfo {
1254    SlotIndex Index;
1255    MachineInstr *MI;
1256    bool HasUse;
1257    bool HasDef;
1258    RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1259      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1260  };
1261
1262  struct RewriteInfoCompare {
1263    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1264      return LHS.Index < RHS.Index;
1265    }
1266  };
1267}
1268
1269void LiveIntervals::
1270rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1271                    LiveInterval::Ranges::const_iterator &I,
1272                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1273                    unsigned Slot, int LdSlot,
1274                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1275                    VirtRegMap &vrm,
1276                    const TargetRegisterClass* rc,
1277                    SmallVector<int, 4> &ReMatIds,
1278                    const MachineLoopInfo *loopInfo,
1279                    BitVector &SpillMBBs,
1280                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1281                    BitVector &RestoreMBBs,
1282                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1283                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1284                    std::vector<LiveInterval*> &NewLIs) {
1285  bool AllCanFold = true;
1286  unsigned NewVReg = 0;
1287  SlotIndex start = I->start.getBaseIndex();
1288  SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1289
1290  // First collect all the def / use in this live range that will be rewritten.
1291  // Make sure they are sorted according to instruction index.
1292  std::vector<RewriteInfo> RewriteMIs;
1293  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1294         re = mri_->reg_end(); ri != re; ) {
1295    MachineInstr *MI = &*ri;
1296    MachineOperand &O = ri.getOperand();
1297    ++ri;
1298    if (MI->isDebugValue()) {
1299      // Modify DBG_VALUE now that the value is in a spill slot.
1300      if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1301        uint64_t Offset = MI->getOperand(1).getImm();
1302        const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1303        DebugLoc DL = MI->getDebugLoc();
1304        int FI = isLoadSS ? LdSlot : (int)Slot;
1305        if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1306                                                           Offset, MDPtr, DL)) {
1307          DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1308          ReplaceMachineInstrInMaps(MI, NewDV);
1309          MachineBasicBlock *MBB = MI->getParent();
1310          MBB->insert(MBB->erase(MI), NewDV);
1311          continue;
1312        }
1313      }
1314
1315      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1316      RemoveMachineInstrFromMaps(MI);
1317      vrm.RemoveMachineInstrFromMaps(MI);
1318      MI->eraseFromParent();
1319      continue;
1320    }
1321    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1322    SlotIndex index = getInstructionIndex(MI);
1323    if (index < start || index >= end)
1324      continue;
1325
1326    if (O.isUndef())
1327      // Must be defined by an implicit def. It should not be spilled. Note,
1328      // this is for correctness reason. e.g.
1329      // 8   %reg1024<def> = IMPLICIT_DEF
1330      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1331      // The live range [12, 14) are not part of the r1024 live interval since
1332      // it's defined by an implicit def. It will not conflicts with live
1333      // interval of r1025. Now suppose both registers are spilled, you can
1334      // easily see a situation where both registers are reloaded before
1335      // the INSERT_SUBREG and both target registers that would overlap.
1336      continue;
1337    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1338  }
1339  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1340
1341  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1342  // Now rewrite the defs and uses.
1343  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1344    RewriteInfo &rwi = RewriteMIs[i];
1345    ++i;
1346    SlotIndex index = rwi.Index;
1347    bool MIHasUse = rwi.HasUse;
1348    bool MIHasDef = rwi.HasDef;
1349    MachineInstr *MI = rwi.MI;
1350    // If MI def and/or use the same register multiple times, then there
1351    // are multiple entries.
1352    unsigned NumUses = MIHasUse;
1353    while (i != e && RewriteMIs[i].MI == MI) {
1354      assert(RewriteMIs[i].Index == index);
1355      bool isUse = RewriteMIs[i].HasUse;
1356      if (isUse) ++NumUses;
1357      MIHasUse |= isUse;
1358      MIHasDef |= RewriteMIs[i].HasDef;
1359      ++i;
1360    }
1361    MachineBasicBlock *MBB = MI->getParent();
1362
1363    if (ImpUse && MI != ReMatDefMI) {
1364      // Re-matting an instruction with virtual register use. Prevent interval
1365      // from being spilled.
1366      getInterval(ImpUse).markNotSpillable();
1367    }
1368
1369    unsigned MBBId = MBB->getNumber();
1370    unsigned ThisVReg = 0;
1371    if (TrySplit) {
1372      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1373      if (NVI != MBBVRegsMap.end()) {
1374        ThisVReg = NVI->second;
1375        // One common case:
1376        // x = use
1377        // ...
1378        // ...
1379        // def = ...
1380        //     = use
1381        // It's better to start a new interval to avoid artifically
1382        // extend the new interval.
1383        if (MIHasDef && !MIHasUse) {
1384          MBBVRegsMap.erase(MBB->getNumber());
1385          ThisVReg = 0;
1386        }
1387      }
1388    }
1389
1390    bool IsNew = ThisVReg == 0;
1391    if (IsNew) {
1392      // This ends the previous live interval. If all of its def / use
1393      // can be folded, give it a low spill weight.
1394      if (NewVReg && TrySplit && AllCanFold) {
1395        LiveInterval &nI = getOrCreateInterval(NewVReg);
1396        nI.weight /= 10.0F;
1397      }
1398      AllCanFold = true;
1399    }
1400    NewVReg = ThisVReg;
1401
1402    bool HasDef = false;
1403    bool HasUse = false;
1404    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1405                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1406                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1407                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1408                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1409    if (!HasDef && !HasUse)
1410      continue;
1411
1412    AllCanFold &= CanFold;
1413
1414    // Update weight of spill interval.
1415    LiveInterval &nI = getOrCreateInterval(NewVReg);
1416    if (!TrySplit) {
1417      // The spill weight is now infinity as it cannot be spilled again.
1418      nI.markNotSpillable();
1419      continue;
1420    }
1421
1422    // Keep track of the last def and first use in each MBB.
1423    if (HasDef) {
1424      if (MI != ReMatOrigDefMI || !CanDelete) {
1425        bool HasKill = false;
1426        if (!HasUse)
1427          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1428        else {
1429          // If this is a two-address code, then this index starts a new VNInfo.
1430          const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1431          if (VNI)
1432            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1433        }
1434        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1435          SpillIdxes.find(MBBId);
1436        if (!HasKill) {
1437          if (SII == SpillIdxes.end()) {
1438            std::vector<SRInfo> S;
1439            S.push_back(SRInfo(index, NewVReg, true));
1440            SpillIdxes.insert(std::make_pair(MBBId, S));
1441          } else if (SII->second.back().vreg != NewVReg) {
1442            SII->second.push_back(SRInfo(index, NewVReg, true));
1443          } else if (index > SII->second.back().index) {
1444            // If there is an earlier def and this is a two-address
1445            // instruction, then it's not possible to fold the store (which
1446            // would also fold the load).
1447            SRInfo &Info = SII->second.back();
1448            Info.index = index;
1449            Info.canFold = !HasUse;
1450          }
1451          SpillMBBs.set(MBBId);
1452        } else if (SII != SpillIdxes.end() &&
1453                   SII->second.back().vreg == NewVReg &&
1454                   index > SII->second.back().index) {
1455          // There is an earlier def that's not killed (must be two-address).
1456          // The spill is no longer needed.
1457          SII->second.pop_back();
1458          if (SII->second.empty()) {
1459            SpillIdxes.erase(MBBId);
1460            SpillMBBs.reset(MBBId);
1461          }
1462        }
1463      }
1464    }
1465
1466    if (HasUse) {
1467      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1468        SpillIdxes.find(MBBId);
1469      if (SII != SpillIdxes.end() &&
1470          SII->second.back().vreg == NewVReg &&
1471          index > SII->second.back().index)
1472        // Use(s) following the last def, it's not safe to fold the spill.
1473        SII->second.back().canFold = false;
1474      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1475        RestoreIdxes.find(MBBId);
1476      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1477        // If we are splitting live intervals, only fold if it's the first
1478        // use and there isn't another use later in the MBB.
1479        RII->second.back().canFold = false;
1480      else if (IsNew) {
1481        // Only need a reload if there isn't an earlier def / use.
1482        if (RII == RestoreIdxes.end()) {
1483          std::vector<SRInfo> Infos;
1484          Infos.push_back(SRInfo(index, NewVReg, true));
1485          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1486        } else {
1487          RII->second.push_back(SRInfo(index, NewVReg, true));
1488        }
1489        RestoreMBBs.set(MBBId);
1490      }
1491    }
1492
1493    // Update spill weight.
1494    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1495    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1496  }
1497
1498  if (NewVReg && TrySplit && AllCanFold) {
1499    // If all of its def / use can be folded, give it a low spill weight.
1500    LiveInterval &nI = getOrCreateInterval(NewVReg);
1501    nI.weight /= 10.0F;
1502  }
1503}
1504
1505bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1506                        unsigned vr, BitVector &RestoreMBBs,
1507                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1508  if (!RestoreMBBs[Id])
1509    return false;
1510  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1511  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1512    if (Restores[i].index == index &&
1513        Restores[i].vreg == vr &&
1514        Restores[i].canFold)
1515      return true;
1516  return false;
1517}
1518
1519void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1520                        unsigned vr, BitVector &RestoreMBBs,
1521                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1522  if (!RestoreMBBs[Id])
1523    return;
1524  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1525  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1526    if (Restores[i].index == index && Restores[i].vreg)
1527      Restores[i].index = SlotIndex();
1528}
1529
1530/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1531/// spilled and create empty intervals for their uses.
1532void
1533LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1534                                    const TargetRegisterClass* rc,
1535                                    std::vector<LiveInterval*> &NewLIs) {
1536  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1537         re = mri_->reg_end(); ri != re; ) {
1538    MachineOperand &O = ri.getOperand();
1539    MachineInstr *MI = &*ri;
1540    ++ri;
1541    if (MI->isDebugValue()) {
1542      // Remove debug info for now.
1543      O.setReg(0U);
1544      DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1545      continue;
1546    }
1547    if (O.isDef()) {
1548      assert(MI->isImplicitDef() &&
1549             "Register def was not rewritten?");
1550      RemoveMachineInstrFromMaps(MI);
1551      vrm.RemoveMachineInstrFromMaps(MI);
1552      MI->eraseFromParent();
1553    } else {
1554      // This must be an use of an implicit_def so it's not part of the live
1555      // interval. Create a new empty live interval for it.
1556      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1557      unsigned NewVReg = mri_->createVirtualRegister(rc);
1558      vrm.grow();
1559      vrm.setIsImplicitlyDefined(NewVReg);
1560      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1561      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1562        MachineOperand &MO = MI->getOperand(i);
1563        if (MO.isReg() && MO.getReg() == li.reg) {
1564          MO.setReg(NewVReg);
1565          MO.setIsUndef();
1566        }
1567      }
1568    }
1569  }
1570}
1571
1572float
1573LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1574  // Limit the loop depth ridiculousness.
1575  if (loopDepth > 200)
1576    loopDepth = 200;
1577
1578  // The loop depth is used to roughly estimate the number of times the
1579  // instruction is executed. Something like 10^d is simple, but will quickly
1580  // overflow a float. This expression behaves like 10^d for small d, but is
1581  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1582  // headroom before overflow.
1583  float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1584
1585  return (isDef + isUse) * lc;
1586}
1587
1588void
1589LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1590  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1591    normalizeSpillWeight(*NewLIs[i]);
1592}
1593
1594std::vector<LiveInterval*> LiveIntervals::
1595addIntervalsForSpillsFast(const LiveInterval &li,
1596                          const MachineLoopInfo *loopInfo,
1597                          VirtRegMap &vrm) {
1598  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1599
1600  std::vector<LiveInterval*> added;
1601
1602  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1603
1604  DEBUG({
1605      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1606      li.dump();
1607      dbgs() << '\n';
1608    });
1609
1610  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1611
1612  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1613  while (RI != mri_->reg_end()) {
1614    MachineInstr* MI = &*RI;
1615
1616    SmallVector<unsigned, 2> Indices;
1617    bool HasUse = false;
1618    bool HasDef = false;
1619
1620    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1621      MachineOperand& mop = MI->getOperand(i);
1622      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1623
1624      HasUse |= MI->getOperand(i).isUse();
1625      HasDef |= MI->getOperand(i).isDef();
1626
1627      Indices.push_back(i);
1628    }
1629
1630    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1631                              Indices, true, slot, li.reg)) {
1632      unsigned NewVReg = mri_->createVirtualRegister(rc);
1633      vrm.grow();
1634      vrm.assignVirt2StackSlot(NewVReg, slot);
1635
1636      // create a new register for this spill
1637      LiveInterval &nI = getOrCreateInterval(NewVReg);
1638      nI.markNotSpillable();
1639
1640      // Rewrite register operands to use the new vreg.
1641      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1642           E = Indices.end(); I != E; ++I) {
1643        MI->getOperand(*I).setReg(NewVReg);
1644
1645        if (MI->getOperand(*I).isUse())
1646          MI->getOperand(*I).setIsKill(true);
1647      }
1648
1649      // Fill in  the new live interval.
1650      SlotIndex index = getInstructionIndex(MI);
1651      if (HasUse) {
1652        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1653                     nI.getNextValue(SlotIndex(), 0, false,
1654                                     getVNInfoAllocator()));
1655        DEBUG(dbgs() << " +" << LR);
1656        nI.addRange(LR);
1657        vrm.addRestorePoint(NewVReg, MI);
1658      }
1659      if (HasDef) {
1660        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1661                     nI.getNextValue(SlotIndex(), 0, false,
1662                                     getVNInfoAllocator()));
1663        DEBUG(dbgs() << " +" << LR);
1664        nI.addRange(LR);
1665        vrm.addSpillPoint(NewVReg, true, MI);
1666      }
1667
1668      added.push_back(&nI);
1669
1670      DEBUG({
1671          dbgs() << "\t\t\t\tadded new interval: ";
1672          nI.dump();
1673          dbgs() << '\n';
1674        });
1675    }
1676
1677
1678    RI = mri_->reg_begin(li.reg);
1679  }
1680
1681  return added;
1682}
1683
1684std::vector<LiveInterval*> LiveIntervals::
1685addIntervalsForSpills(const LiveInterval &li,
1686                      SmallVectorImpl<LiveInterval*> &SpillIs,
1687                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1688
1689  if (EnableFastSpilling)
1690    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1691
1692  assert(li.isSpillable() && "attempt to spill already spilled interval!");
1693
1694  DEBUG({
1695      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1696      li.print(dbgs(), tri_);
1697      dbgs() << '\n';
1698    });
1699
1700  // Each bit specify whether a spill is required in the MBB.
1701  BitVector SpillMBBs(mf_->getNumBlockIDs());
1702  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1703  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1704  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1705  DenseMap<unsigned,unsigned> MBBVRegsMap;
1706  std::vector<LiveInterval*> NewLIs;
1707  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1708
1709  unsigned NumValNums = li.getNumValNums();
1710  SmallVector<MachineInstr*, 4> ReMatDefs;
1711  ReMatDefs.resize(NumValNums, NULL);
1712  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1713  ReMatOrigDefs.resize(NumValNums, NULL);
1714  SmallVector<int, 4> ReMatIds;
1715  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1716  BitVector ReMatDelete(NumValNums);
1717  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1718
1719  // Spilling a split live interval. It cannot be split any further. Also,
1720  // it's also guaranteed to be a single val# / range interval.
1721  if (vrm.getPreSplitReg(li.reg)) {
1722    vrm.setIsSplitFromReg(li.reg, 0);
1723    // Unset the split kill marker on the last use.
1724    SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1725    if (KillIdx != SlotIndex()) {
1726      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1727      assert(KillMI && "Last use disappeared?");
1728      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1729      assert(KillOp != -1 && "Last use disappeared?");
1730      KillMI->getOperand(KillOp).setIsKill(false);
1731    }
1732    vrm.removeKillPoint(li.reg);
1733    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1734    Slot = vrm.getStackSlot(li.reg);
1735    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1736    MachineInstr *ReMatDefMI = DefIsReMat ?
1737      vrm.getReMaterializedMI(li.reg) : NULL;
1738    int LdSlot = 0;
1739    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1740    bool isLoad = isLoadSS ||
1741      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1742    bool IsFirstRange = true;
1743    for (LiveInterval::Ranges::const_iterator
1744           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1745      // If this is a split live interval with multiple ranges, it means there
1746      // are two-address instructions that re-defined the value. Only the
1747      // first def can be rematerialized!
1748      if (IsFirstRange) {
1749        // Note ReMatOrigDefMI has already been deleted.
1750        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1751                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1752                             false, vrm, rc, ReMatIds, loopInfo,
1753                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1754                             MBBVRegsMap, NewLIs);
1755      } else {
1756        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1757                             Slot, 0, false, false, false,
1758                             false, vrm, rc, ReMatIds, loopInfo,
1759                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1760                             MBBVRegsMap, NewLIs);
1761      }
1762      IsFirstRange = false;
1763    }
1764
1765    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1766    normalizeSpillWeights(NewLIs);
1767    return NewLIs;
1768  }
1769
1770  bool TrySplit = !intervalIsInOneMBB(li);
1771  if (TrySplit)
1772    ++numSplits;
1773  bool NeedStackSlot = false;
1774  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1775       i != e; ++i) {
1776    const VNInfo *VNI = *i;
1777    unsigned VN = VNI->id;
1778    if (VNI->isUnused())
1779      continue; // Dead val#.
1780    // Is the def for the val# rematerializable?
1781    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1782      ? getInstructionFromIndex(VNI->def) : 0;
1783    bool dummy;
1784    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1785      // Remember how to remat the def of this val#.
1786      ReMatOrigDefs[VN] = ReMatDefMI;
1787      // Original def may be modified so we have to make a copy here.
1788      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1789      CloneMIs.push_back(Clone);
1790      ReMatDefs[VN] = Clone;
1791
1792      bool CanDelete = true;
1793      if (VNI->hasPHIKill()) {
1794        // A kill is a phi node, not all of its uses can be rematerialized.
1795        // It must not be deleted.
1796        CanDelete = false;
1797        // Need a stack slot if there is any live range where uses cannot be
1798        // rematerialized.
1799        NeedStackSlot = true;
1800      }
1801      if (CanDelete)
1802        ReMatDelete.set(VN);
1803    } else {
1804      // Need a stack slot if there is any live range where uses cannot be
1805      // rematerialized.
1806      NeedStackSlot = true;
1807    }
1808  }
1809
1810  // One stack slot per live interval.
1811  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1812    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1813      Slot = vrm.assignVirt2StackSlot(li.reg);
1814
1815    // This case only occurs when the prealloc splitter has already assigned
1816    // a stack slot to this vreg.
1817    else
1818      Slot = vrm.getStackSlot(li.reg);
1819  }
1820
1821  // Create new intervals and rewrite defs and uses.
1822  for (LiveInterval::Ranges::const_iterator
1823         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1824    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1825    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1826    bool DefIsReMat = ReMatDefMI != NULL;
1827    bool CanDelete = ReMatDelete[I->valno->id];
1828    int LdSlot = 0;
1829    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1830    bool isLoad = isLoadSS ||
1831      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1832    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1833                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1834                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1835                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1836                               MBBVRegsMap, NewLIs);
1837  }
1838
1839  // Insert spills / restores if we are splitting.
1840  if (!TrySplit) {
1841    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1842    normalizeSpillWeights(NewLIs);
1843    return NewLIs;
1844  }
1845
1846  SmallPtrSet<LiveInterval*, 4> AddedKill;
1847  SmallVector<unsigned, 2> Ops;
1848  if (NeedStackSlot) {
1849    int Id = SpillMBBs.find_first();
1850    while (Id != -1) {
1851      std::vector<SRInfo> &spills = SpillIdxes[Id];
1852      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1853        SlotIndex index = spills[i].index;
1854        unsigned VReg = spills[i].vreg;
1855        LiveInterval &nI = getOrCreateInterval(VReg);
1856        bool isReMat = vrm.isReMaterialized(VReg);
1857        MachineInstr *MI = getInstructionFromIndex(index);
1858        bool CanFold = false;
1859        bool FoundUse = false;
1860        Ops.clear();
1861        if (spills[i].canFold) {
1862          CanFold = true;
1863          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1864            MachineOperand &MO = MI->getOperand(j);
1865            if (!MO.isReg() || MO.getReg() != VReg)
1866              continue;
1867
1868            Ops.push_back(j);
1869            if (MO.isDef())
1870              continue;
1871            if (isReMat ||
1872                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1873                                                RestoreMBBs, RestoreIdxes))) {
1874              // MI has two-address uses of the same register. If the use
1875              // isn't the first and only use in the BB, then we can't fold
1876              // it. FIXME: Move this to rewriteInstructionsForSpills.
1877              CanFold = false;
1878              break;
1879            }
1880            FoundUse = true;
1881          }
1882        }
1883        // Fold the store into the def if possible.
1884        bool Folded = false;
1885        if (CanFold && !Ops.empty()) {
1886          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1887            Folded = true;
1888            if (FoundUse) {
1889              // Also folded uses, do not issue a load.
1890              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1891              nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1892            }
1893            nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1894          }
1895        }
1896
1897        // Otherwise tell the spiller to issue a spill.
1898        if (!Folded) {
1899          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1900          bool isKill = LR->end == index.getStoreIndex();
1901          if (!MI->registerDefIsDead(nI.reg))
1902            // No need to spill a dead def.
1903            vrm.addSpillPoint(VReg, isKill, MI);
1904          if (isKill)
1905            AddedKill.insert(&nI);
1906        }
1907      }
1908      Id = SpillMBBs.find_next(Id);
1909    }
1910  }
1911
1912  int Id = RestoreMBBs.find_first();
1913  while (Id != -1) {
1914    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1915    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1916      SlotIndex index = restores[i].index;
1917      if (index == SlotIndex())
1918        continue;
1919      unsigned VReg = restores[i].vreg;
1920      LiveInterval &nI = getOrCreateInterval(VReg);
1921      bool isReMat = vrm.isReMaterialized(VReg);
1922      MachineInstr *MI = getInstructionFromIndex(index);
1923      bool CanFold = false;
1924      Ops.clear();
1925      if (restores[i].canFold) {
1926        CanFold = true;
1927        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1928          MachineOperand &MO = MI->getOperand(j);
1929          if (!MO.isReg() || MO.getReg() != VReg)
1930            continue;
1931
1932          if (MO.isDef()) {
1933            // If this restore were to be folded, it would have been folded
1934            // already.
1935            CanFold = false;
1936            break;
1937          }
1938          Ops.push_back(j);
1939        }
1940      }
1941
1942      // Fold the load into the use if possible.
1943      bool Folded = false;
1944      if (CanFold && !Ops.empty()) {
1945        if (!isReMat)
1946          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1947        else {
1948          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1949          int LdSlot = 0;
1950          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1951          // If the rematerializable def is a load, also try to fold it.
1952          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1953            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1954                                          Ops, isLoadSS, LdSlot, VReg);
1955          if (!Folded) {
1956            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1957            if (ImpUse) {
1958              // Re-matting an instruction with virtual register use. Add the
1959              // register as an implicit use on the use MI and mark the register
1960              // interval as unspillable.
1961              LiveInterval &ImpLi = getInterval(ImpUse);
1962              ImpLi.markNotSpillable();
1963              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1964            }
1965          }
1966        }
1967      }
1968      // If folding is not possible / failed, then tell the spiller to issue a
1969      // load / rematerialization for us.
1970      if (Folded)
1971        nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1972      else
1973        vrm.addRestorePoint(VReg, MI);
1974    }
1975    Id = RestoreMBBs.find_next(Id);
1976  }
1977
1978  // Finalize intervals: add kills, finalize spill weights, and filter out
1979  // dead intervals.
1980  std::vector<LiveInterval*> RetNewLIs;
1981  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1982    LiveInterval *LI = NewLIs[i];
1983    if (!LI->empty()) {
1984      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1985      if (!AddedKill.count(LI)) {
1986        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1987        SlotIndex LastUseIdx = LR->end.getBaseIndex();
1988        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1989        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1990        assert(UseIdx != -1);
1991        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1992          LastUse->getOperand(UseIdx).setIsKill();
1993          vrm.addKillPoint(LI->reg, LastUseIdx);
1994        }
1995      }
1996      RetNewLIs.push_back(LI);
1997    }
1998  }
1999
2000  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2001  normalizeSpillWeights(RetNewLIs);
2002  return RetNewLIs;
2003}
2004
2005/// hasAllocatableSuperReg - Return true if the specified physical register has
2006/// any super register that's allocatable.
2007bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2008  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2009    if (allocatableRegs_[*AS] && hasInterval(*AS))
2010      return true;
2011  return false;
2012}
2013
2014/// getRepresentativeReg - Find the largest super register of the specified
2015/// physical register.
2016unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2017  // Find the largest super-register that is allocatable.
2018  unsigned BestReg = Reg;
2019  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2020    unsigned SuperReg = *AS;
2021    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2022      BestReg = SuperReg;
2023      break;
2024    }
2025  }
2026  return BestReg;
2027}
2028
2029/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2030/// specified interval that conflicts with the specified physical register.
2031unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2032                                                   unsigned PhysReg) const {
2033  unsigned NumConflicts = 0;
2034  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2035  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2036         E = mri_->reg_end(); I != E; ++I) {
2037    MachineOperand &O = I.getOperand();
2038    MachineInstr *MI = O.getParent();
2039    if (MI->isDebugValue())
2040      continue;
2041    SlotIndex Index = getInstructionIndex(MI);
2042    if (pli.liveAt(Index))
2043      ++NumConflicts;
2044  }
2045  return NumConflicts;
2046}
2047
2048/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2049/// around all defs and uses of the specified interval. Return true if it
2050/// was able to cut its interval.
2051bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2052                                            unsigned PhysReg, VirtRegMap &vrm) {
2053  unsigned SpillReg = getRepresentativeReg(PhysReg);
2054
2055  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2056    // If there are registers which alias PhysReg, but which are not a
2057    // sub-register of the chosen representative super register. Assert
2058    // since we can't handle it yet.
2059    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2060           tri_->isSuperRegister(*AS, SpillReg));
2061
2062  bool Cut = false;
2063  SmallVector<unsigned, 4> PRegs;
2064  if (hasInterval(SpillReg))
2065    PRegs.push_back(SpillReg);
2066  else {
2067    SmallSet<unsigned, 4> Added;
2068    for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2069      if (Added.insert(*AS) && hasInterval(*AS)) {
2070        PRegs.push_back(*AS);
2071        for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2072          Added.insert(*ASS);
2073      }
2074  }
2075
2076  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2077  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2078         E = mri_->reg_end(); I != E; ++I) {
2079    MachineOperand &O = I.getOperand();
2080    MachineInstr *MI = O.getParent();
2081    if (MI->isDebugValue() || SeenMIs.count(MI))
2082      continue;
2083    SeenMIs.insert(MI);
2084    SlotIndex Index = getInstructionIndex(MI);
2085    for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2086      unsigned PReg = PRegs[i];
2087      LiveInterval &pli = getInterval(PReg);
2088      if (!pli.liveAt(Index))
2089        continue;
2090      vrm.addEmergencySpill(PReg, MI);
2091      SlotIndex StartIdx = Index.getLoadIndex();
2092      SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2093      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2094        pli.removeRange(StartIdx, EndIdx);
2095        Cut = true;
2096      } else {
2097        std::string msg;
2098        raw_string_ostream Msg(msg);
2099        Msg << "Ran out of registers during register allocation!";
2100        if (MI->isInlineAsm()) {
2101          Msg << "\nPlease check your inline asm statement for invalid "
2102              << "constraints:\n";
2103          MI->print(Msg, tm_);
2104        }
2105        report_fatal_error(Msg.str());
2106      }
2107      for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2108        if (!hasInterval(*AS))
2109          continue;
2110        LiveInterval &spli = getInterval(*AS);
2111        if (spli.liveAt(Index))
2112          spli.removeRange(Index.getLoadIndex(),
2113                           Index.getNextIndex().getBaseIndex());
2114      }
2115    }
2116  }
2117  return Cut;
2118}
2119
2120LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2121                                                  MachineInstr* startInst) {
2122  LiveInterval& Interval = getOrCreateInterval(reg);
2123  VNInfo* VN = Interval.getNextValue(
2124    SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2125    startInst, true, getVNInfoAllocator());
2126  VN->setHasPHIKill(true);
2127  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2128  LiveRange LR(
2129     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2130     getMBBEndIdx(startInst->getParent()), VN);
2131  Interval.addRange(LR);
2132
2133  return LR;
2134}
2135
2136