RegAllocGreedy.cpp revision 5f2316a3b55f88dab2190212210770180a32aa95
1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the RAGreedy function pass for register allocation in
11// optimized builds.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "regalloc"
16#include "AllocationOrder.h"
17#include "InterferenceCache.h"
18#include "LiveDebugVariables.h"
19#include "LiveRangeEdit.h"
20#include "RegAllocBase.h"
21#include "Spiller.h"
22#include "SpillPlacement.h"
23#include "SplitKit.h"
24#include "VirtRegMap.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Function.h"
28#include "llvm/PassAnalysisSupport.h"
29#include "llvm/CodeGen/CalcSpillWeights.h"
30#include "llvm/CodeGen/EdgeBundles.h"
31#include "llvm/CodeGen/LiveIntervalAnalysis.h"
32#include "llvm/CodeGen/LiveStackAnalysis.h"
33#include "llvm/CodeGen/MachineDominators.h"
34#include "llvm/CodeGen/MachineFunctionPass.h"
35#include "llvm/CodeGen/MachineLoopInfo.h"
36#include "llvm/CodeGen/MachineLoopRanges.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/CodeGen/Passes.h"
39#include "llvm/CodeGen/RegAllocRegistry.h"
40#include "llvm/CodeGen/RegisterCoalescer.h"
41#include "llvm/Target/TargetOptions.h"
42#include "llvm/Support/Debug.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/Support/Timer.h"
46
47#include <queue>
48
49using namespace llvm;
50
51STATISTIC(NumGlobalSplits, "Number of split global live ranges");
52STATISTIC(NumLocalSplits,  "Number of split local live ranges");
53STATISTIC(NumEvicted,      "Number of interferences evicted");
54
55static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56                                       createGreedyRegisterAllocator);
57
58namespace {
59class RAGreedy : public MachineFunctionPass,
60                 public RegAllocBase,
61                 private LiveRangeEdit::Delegate {
62
63  // context
64  MachineFunction *MF;
65
66  // analyses
67  SlotIndexes *Indexes;
68  LiveStacks *LS;
69  MachineDominatorTree *DomTree;
70  MachineLoopInfo *Loops;
71  MachineLoopRanges *LoopRanges;
72  EdgeBundles *Bundles;
73  SpillPlacement *SpillPlacer;
74  LiveDebugVariables *DebugVars;
75
76  // state
77  std::auto_ptr<Spiller> SpillerInstance;
78  std::priority_queue<std::pair<unsigned, unsigned> > Queue;
79
80  // Live ranges pass through a number of stages as we try to allocate them.
81  // Some of the stages may also create new live ranges:
82  //
83  // - Region splitting.
84  // - Per-block splitting.
85  // - Local splitting.
86  // - Spilling.
87  //
88  // Ranges produced by one of the stages skip the previous stages when they are
89  // dequeued. This improves performance because we can skip interference checks
90  // that are unlikely to give any results. It also guarantees that the live
91  // range splitting algorithm terminates, something that is otherwise hard to
92  // ensure.
93  enum LiveRangeStage {
94    RS_New,      ///< Never seen before.
95    RS_First,    ///< First time in the queue.
96    RS_Second,   ///< Second time in the queue.
97    RS_Global,   ///< Produced by global splitting.
98    RS_Local,    ///< Produced by local splitting.
99    RS_Spill     ///< Produced by spilling.
100  };
101
102  static const char *const StageName[];
103
104  IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
105
106  LiveRangeStage getStage(const LiveInterval &VirtReg) const {
107    return LiveRangeStage(LRStage[VirtReg.reg]);
108  }
109
110  template<typename Iterator>
111  void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
112    LRStage.resize(MRI->getNumVirtRegs());
113    for (;Begin != End; ++Begin) {
114      unsigned Reg = (*Begin)->reg;
115      if (LRStage[Reg] == RS_New)
116        LRStage[Reg] = NewStage;
117    }
118  }
119
120  // Eviction. Sometimes an assigned live range can be evicted without
121  // conditions, but other times it must be split after being evicted to avoid
122  // infinite loops.
123  enum CanEvict {
124    CE_Never,    ///< Can never evict.
125    CE_Always,   ///< Can always evict.
126    CE_WithSplit ///< Can evict only if range is also split or spilled.
127  };
128
129  // splitting state.
130  std::auto_ptr<SplitAnalysis> SA;
131  std::auto_ptr<SplitEditor> SE;
132
133  /// Cached per-block interference maps
134  InterferenceCache IntfCache;
135
136  /// All basic blocks where the current register has uses.
137  SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
138
139  /// Global live range splitting candidate info.
140  struct GlobalSplitCandidate {
141    unsigned PhysReg;
142    BitVector LiveBundles;
143    SmallVector<unsigned, 8> ActiveBlocks;
144
145    void reset(unsigned Reg) {
146      PhysReg = Reg;
147      LiveBundles.clear();
148      ActiveBlocks.clear();
149    }
150  };
151
152  /// Candidate info for for each PhysReg in AllocationOrder.
153  /// This vector never shrinks, but grows to the size of the largest register
154  /// class.
155  SmallVector<GlobalSplitCandidate, 32> GlobalCand;
156
157  /// For every instruction in SA->UseSlots, store the previous non-copy
158  /// instruction.
159  SmallVector<SlotIndex, 8> PrevSlot;
160
161public:
162  RAGreedy();
163
164  /// Return the pass name.
165  virtual const char* getPassName() const {
166    return "Greedy Register Allocator";
167  }
168
169  /// RAGreedy analysis usage.
170  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
171  virtual void releaseMemory();
172  virtual Spiller &spiller() { return *SpillerInstance; }
173  virtual void enqueue(LiveInterval *LI);
174  virtual LiveInterval *dequeue();
175  virtual unsigned selectOrSplit(LiveInterval&,
176                                 SmallVectorImpl<LiveInterval*>&);
177
178  /// Perform register allocation.
179  virtual bool runOnMachineFunction(MachineFunction &mf);
180
181  static char ID;
182
183private:
184  void LRE_WillEraseInstruction(MachineInstr*);
185  bool LRE_CanEraseVirtReg(unsigned);
186  void LRE_WillShrinkVirtReg(unsigned);
187  void LRE_DidCloneVirtReg(unsigned, unsigned);
188
189  float calcSpillCost();
190  bool addSplitConstraints(InterferenceCache::Cursor, float&);
191  void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
192  void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor);
193  float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor);
194  void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
195                         SmallVectorImpl<LiveInterval*>&);
196  void calcGapWeights(unsigned, SmallVectorImpl<float>&);
197  SlotIndex getPrevMappedIndex(const MachineInstr*);
198  void calcPrevSlots();
199  unsigned nextSplitPoint(unsigned);
200  CanEvict canEvict(LiveInterval &A, LiveInterval &B);
201  bool canEvictInterference(LiveInterval&, unsigned, float&);
202
203  unsigned tryAssign(LiveInterval&, AllocationOrder&,
204                     SmallVectorImpl<LiveInterval*>&);
205  unsigned tryEvict(LiveInterval&, AllocationOrder&,
206                    SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
207  unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
208                          SmallVectorImpl<LiveInterval*>&);
209  unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
210    SmallVectorImpl<LiveInterval*>&);
211  unsigned trySplit(LiveInterval&, AllocationOrder&,
212                    SmallVectorImpl<LiveInterval*>&);
213};
214} // end anonymous namespace
215
216char RAGreedy::ID = 0;
217
218#ifndef NDEBUG
219const char *const RAGreedy::StageName[] = {
220  "RS_New",
221  "RS_First",
222  "RS_Second",
223  "RS_Global",
224  "RS_Local",
225  "RS_Spill"
226};
227#endif
228
229// Hysteresis to use when comparing floats.
230// This helps stabilize decisions based on float comparisons.
231const float Hysteresis = 0.98f;
232
233
234FunctionPass* llvm::createGreedyRegisterAllocator() {
235  return new RAGreedy();
236}
237
238RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
239  initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
240  initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
241  initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
242  initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
243  initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
244  initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
245  initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
246  initializeLiveStacksPass(*PassRegistry::getPassRegistry());
247  initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
248  initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
249  initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
250  initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
251  initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
252  initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
253}
254
255void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
256  AU.setPreservesCFG();
257  AU.addRequired<AliasAnalysis>();
258  AU.addPreserved<AliasAnalysis>();
259  AU.addRequired<LiveIntervals>();
260  AU.addRequired<SlotIndexes>();
261  AU.addPreserved<SlotIndexes>();
262  AU.addRequired<LiveDebugVariables>();
263  AU.addPreserved<LiveDebugVariables>();
264  if (StrongPHIElim)
265    AU.addRequiredID(StrongPHIEliminationID);
266  AU.addRequiredTransitive<RegisterCoalescer>();
267  AU.addRequired<CalculateSpillWeights>();
268  AU.addRequired<LiveStacks>();
269  AU.addPreserved<LiveStacks>();
270  AU.addRequired<MachineDominatorTree>();
271  AU.addPreserved<MachineDominatorTree>();
272  AU.addRequired<MachineLoopInfo>();
273  AU.addPreserved<MachineLoopInfo>();
274  AU.addRequired<MachineLoopRanges>();
275  AU.addPreserved<MachineLoopRanges>();
276  AU.addRequired<VirtRegMap>();
277  AU.addPreserved<VirtRegMap>();
278  AU.addRequired<EdgeBundles>();
279  AU.addRequired<SpillPlacement>();
280  MachineFunctionPass::getAnalysisUsage(AU);
281}
282
283
284//===----------------------------------------------------------------------===//
285//                     LiveRangeEdit delegate methods
286//===----------------------------------------------------------------------===//
287
288void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
289  // LRE itself will remove from SlotIndexes and parent basic block.
290  VRM->RemoveMachineInstrFromMaps(MI);
291}
292
293bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
294  if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
295    unassign(LIS->getInterval(VirtReg), PhysReg);
296    return true;
297  }
298  // Unassigned virtreg is probably in the priority queue.
299  // RegAllocBase will erase it after dequeueing.
300  return false;
301}
302
303void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
304  unsigned PhysReg = VRM->getPhys(VirtReg);
305  if (!PhysReg)
306    return;
307
308  // Register is assigned, put it back on the queue for reassignment.
309  LiveInterval &LI = LIS->getInterval(VirtReg);
310  unassign(LI, PhysReg);
311  enqueue(&LI);
312}
313
314void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
315  // LRE may clone a virtual register because dead code elimination causes it to
316  // be split into connected components. Ensure that the new register gets the
317  // same stage as the parent.
318  LRStage.grow(New);
319  LRStage[New] = LRStage[Old];
320}
321
322void RAGreedy::releaseMemory() {
323  SpillerInstance.reset(0);
324  LRStage.clear();
325  GlobalCand.clear();
326  RegAllocBase::releaseMemory();
327}
328
329void RAGreedy::enqueue(LiveInterval *LI) {
330  // Prioritize live ranges by size, assigning larger ranges first.
331  // The queue holds (size, reg) pairs.
332  const unsigned Size = LI->getSize();
333  const unsigned Reg = LI->reg;
334  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
335         "Can only enqueue virtual registers");
336  unsigned Prio;
337
338  LRStage.grow(Reg);
339  if (LRStage[Reg] == RS_New)
340    LRStage[Reg] = RS_First;
341
342  if (LRStage[Reg] == RS_Second)
343    // Unsplit ranges that couldn't be allocated immediately are deferred until
344    // everything else has been allocated. Long ranges are allocated last so
345    // they are split against realistic interference.
346    Prio = (1u << 31) - Size;
347  else {
348    // Everything else is allocated in long->short order. Long ranges that don't
349    // fit should be spilled ASAP so they don't create interference.
350    Prio = (1u << 31) + Size;
351
352    // Boost ranges that have a physical register hint.
353    if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
354      Prio |= (1u << 30);
355  }
356
357  Queue.push(std::make_pair(Prio, Reg));
358}
359
360LiveInterval *RAGreedy::dequeue() {
361  if (Queue.empty())
362    return 0;
363  LiveInterval *LI = &LIS->getInterval(Queue.top().second);
364  Queue.pop();
365  return LI;
366}
367
368
369//===----------------------------------------------------------------------===//
370//                            Direct Assignment
371//===----------------------------------------------------------------------===//
372
373/// tryAssign - Try to assign VirtReg to an available register.
374unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
375                             AllocationOrder &Order,
376                             SmallVectorImpl<LiveInterval*> &NewVRegs) {
377  Order.rewind();
378  unsigned PhysReg;
379  while ((PhysReg = Order.next()))
380    if (!checkPhysRegInterference(VirtReg, PhysReg))
381      break;
382  if (!PhysReg || Order.isHint(PhysReg))
383    return PhysReg;
384
385  // PhysReg is available. Try to evict interference from a cheaper alternative.
386  unsigned Cost = TRI->getCostPerUse(PhysReg);
387
388  // Most registers have 0 additional cost.
389  if (!Cost)
390    return PhysReg;
391
392  DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
393               << '\n');
394  unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
395  return CheapReg ? CheapReg : PhysReg;
396}
397
398
399//===----------------------------------------------------------------------===//
400//                         Interference eviction
401//===----------------------------------------------------------------------===//
402
403/// canEvict - determine if A can evict the assigned live range B. The eviction
404/// policy defined by this function together with the allocation order defined
405/// by enqueue() decides which registers ultimately end up being split and
406/// spilled.
407///
408/// This function must define a non-circular relation when it returns CE_Always,
409/// otherwise infinite eviction loops are possible. When evicting a <= RS_Second
410/// range, it is possible to return CE_WithSplit which forces the evicted
411/// register to be split or spilled before it can evict anything again. That
412/// guarantees progress.
413RAGreedy::CanEvict RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) {
414  return A.weight > B.weight ? CE_Always : CE_Never;
415}
416
417/// canEvict - Return true if all interferences between VirtReg and PhysReg can
418/// be evicted.
419/// Return false if any interference is heavier than MaxWeight.
420/// On return, set MaxWeight to the maximal spill weight of an interference.
421bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
422                                    float &MaxWeight) {
423  float Weight = 0;
424  for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
425    LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
426    // If there is 10 or more interferences, chances are one is heavier.
427    if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
428      return false;
429
430    // Check if any interfering live range is heavier than MaxWeight.
431    for (unsigned i = Q.interferingVRegs().size(); i; --i) {
432      LiveInterval *Intf = Q.interferingVRegs()[i - 1];
433      if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
434        return false;
435      if (Intf->weight >= MaxWeight)
436        return false;
437      switch (canEvict(VirtReg, *Intf)) {
438      case CE_Always:
439        break;
440      case CE_Never:
441        return false;
442      case CE_WithSplit:
443        if (getStage(*Intf) > RS_Second)
444          return false;
445        break;
446      }
447      Weight = std::max(Weight, Intf->weight);
448    }
449  }
450  MaxWeight = Weight;
451  return true;
452}
453
454/// tryEvict - Try to evict all interferences for a physreg.
455/// @param  VirtReg Currently unassigned virtual register.
456/// @param  Order   Physregs to try.
457/// @return         Physreg to assign VirtReg, or 0.
458unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
459                            AllocationOrder &Order,
460                            SmallVectorImpl<LiveInterval*> &NewVRegs,
461                            unsigned CostPerUseLimit) {
462  NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
463
464  // Keep track of the lightest single interference seen so far.
465  float BestWeight = HUGE_VALF;
466  unsigned BestPhys = 0;
467
468  Order.rewind();
469  while (unsigned PhysReg = Order.next()) {
470    if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
471      continue;
472    // The first use of a register in a function has cost 1.
473    if (CostPerUseLimit == 1 && !MRI->isPhysRegUsed(PhysReg))
474      continue;
475
476    float Weight = BestWeight;
477    if (!canEvictInterference(VirtReg, PhysReg, Weight))
478      continue;
479
480    // This is an eviction candidate.
481    DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
482                 << Weight << '\n');
483    if (BestPhys && Weight >= BestWeight)
484      continue;
485
486    // Best so far.
487    BestPhys = PhysReg;
488    BestWeight = Weight;
489    // Stop if the hint can be used.
490    if (Order.isHint(PhysReg))
491      break;
492  }
493
494  if (!BestPhys)
495    return 0;
496
497  DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
498  for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
499    LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
500    assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
501    for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
502      LiveInterval *Intf = Q.interferingVRegs()[i];
503      unassign(*Intf, VRM->getPhys(Intf->reg));
504      ++NumEvicted;
505      NewVRegs.push_back(Intf);
506      // Prevent looping by forcing the evicted ranges to be split before they
507      // can evict anything else.
508      if (getStage(*Intf) < RS_Second &&
509          canEvict(VirtReg, *Intf) == CE_WithSplit)
510        LRStage[Intf->reg] = RS_Second;
511    }
512  }
513  return BestPhys;
514}
515
516
517//===----------------------------------------------------------------------===//
518//                              Region Splitting
519//===----------------------------------------------------------------------===//
520
521/// addSplitConstraints - Fill out the SplitConstraints vector based on the
522/// interference pattern in Physreg and its aliases. Add the constraints to
523/// SpillPlacement and return the static cost of this split in Cost, assuming
524/// that all preferences in SplitConstraints are met.
525/// Return false if there are no bundles with positive bias.
526bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
527                                   float &Cost) {
528  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
529
530  // Reset interference dependent info.
531  SplitConstraints.resize(UseBlocks.size());
532  float StaticCost = 0;
533  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
534    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
535    SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
536
537    BC.Number = BI.MBB->getNumber();
538    Intf.moveToBlock(BC.Number);
539    BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
540    BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
541
542    if (!Intf.hasInterference())
543      continue;
544
545    // Number of spill code instructions to insert.
546    unsigned Ins = 0;
547
548    // Interference for the live-in value.
549    if (BI.LiveIn) {
550      if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
551        BC.Entry = SpillPlacement::MustSpill, ++Ins;
552      else if (Intf.first() < BI.FirstUse)
553        BC.Entry = SpillPlacement::PrefSpill, ++Ins;
554      else if (Intf.first() < BI.LastUse)
555        ++Ins;
556    }
557
558    // Interference for the live-out value.
559    if (BI.LiveOut) {
560      if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
561        BC.Exit = SpillPlacement::MustSpill, ++Ins;
562      else if (Intf.last() > BI.LastUse)
563        BC.Exit = SpillPlacement::PrefSpill, ++Ins;
564      else if (Intf.last() > BI.FirstUse)
565        ++Ins;
566    }
567
568    // Accumulate the total frequency of inserted spill code.
569    if (Ins)
570      StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
571  }
572  Cost = StaticCost;
573
574  // Add constraints for use-blocks. Note that these are the only constraints
575  // that may add a positive bias, it is downhill from here.
576  SpillPlacer->addConstraints(SplitConstraints);
577  return SpillPlacer->scanActiveBundles();
578}
579
580
581/// addThroughConstraints - Add constraints and links to SpillPlacer from the
582/// live-through blocks in Blocks.
583void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
584                                     ArrayRef<unsigned> Blocks) {
585  const unsigned GroupSize = 8;
586  SpillPlacement::BlockConstraint BCS[GroupSize];
587  unsigned TBS[GroupSize];
588  unsigned B = 0, T = 0;
589
590  for (unsigned i = 0; i != Blocks.size(); ++i) {
591    unsigned Number = Blocks[i];
592    Intf.moveToBlock(Number);
593
594    if (!Intf.hasInterference()) {
595      assert(T < GroupSize && "Array overflow");
596      TBS[T] = Number;
597      if (++T == GroupSize) {
598        SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
599        T = 0;
600      }
601      continue;
602    }
603
604    assert(B < GroupSize && "Array overflow");
605    BCS[B].Number = Number;
606
607    // Interference for the live-in value.
608    if (Intf.first() <= Indexes->getMBBStartIdx(Number))
609      BCS[B].Entry = SpillPlacement::MustSpill;
610    else
611      BCS[B].Entry = SpillPlacement::PrefSpill;
612
613    // Interference for the live-out value.
614    if (Intf.last() >= SA->getLastSplitPoint(Number))
615      BCS[B].Exit = SpillPlacement::MustSpill;
616    else
617      BCS[B].Exit = SpillPlacement::PrefSpill;
618
619    if (++B == GroupSize) {
620      ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
621      SpillPlacer->addConstraints(Array);
622      B = 0;
623    }
624  }
625
626  ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
627  SpillPlacer->addConstraints(Array);
628  SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
629}
630
631void RAGreedy::growRegion(GlobalSplitCandidate &Cand,
632                          InterferenceCache::Cursor Intf) {
633  // Keep track of through blocks that have not been added to SpillPlacer.
634  BitVector Todo = SA->getThroughBlocks();
635  SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
636  unsigned AddedTo = 0;
637#ifndef NDEBUG
638  unsigned Visited = 0;
639#endif
640
641  for (;;) {
642    ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
643    if (NewBundles.empty())
644      break;
645    // Find new through blocks in the periphery of PrefRegBundles.
646    for (int i = 0, e = NewBundles.size(); i != e; ++i) {
647      unsigned Bundle = NewBundles[i];
648      // Look at all blocks connected to Bundle in the full graph.
649      ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
650      for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
651           I != E; ++I) {
652        unsigned Block = *I;
653        if (!Todo.test(Block))
654          continue;
655        Todo.reset(Block);
656        // This is a new through block. Add it to SpillPlacer later.
657        ActiveBlocks.push_back(Block);
658#ifndef NDEBUG
659        ++Visited;
660#endif
661      }
662    }
663    // Any new blocks to add?
664    if (ActiveBlocks.size() > AddedTo) {
665      ArrayRef<unsigned> Add(&ActiveBlocks[AddedTo],
666                             ActiveBlocks.size() - AddedTo);
667      addThroughConstraints(Intf, Add);
668      AddedTo = ActiveBlocks.size();
669    }
670    // Perhaps iterating can enable more bundles?
671    SpillPlacer->iterate();
672  }
673  DEBUG(dbgs() << ", v=" << Visited);
674}
675
676/// calcSpillCost - Compute how expensive it would be to split the live range in
677/// SA around all use blocks instead of forming bundle regions.
678float RAGreedy::calcSpillCost() {
679  float Cost = 0;
680  const LiveInterval &LI = SA->getParent();
681  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
682  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
683    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
684    unsigned Number = BI.MBB->getNumber();
685    // We normally only need one spill instruction - a load or a store.
686    Cost += SpillPlacer->getBlockFrequency(Number);
687
688    // Unless the value is redefined in the block.
689    if (BI.LiveIn && BI.LiveOut) {
690      SlotIndex Start, Stop;
691      tie(Start, Stop) = Indexes->getMBBRange(Number);
692      LiveInterval::const_iterator I = LI.find(Start);
693      assert(I != LI.end() && "Expected live-in value");
694      // Is there a different live-out value? If so, we need an extra spill
695      // instruction.
696      if (I->end < Stop)
697        Cost += SpillPlacer->getBlockFrequency(Number);
698    }
699  }
700  return Cost;
701}
702
703/// calcGlobalSplitCost - Return the global split cost of following the split
704/// pattern in LiveBundles. This cost should be added to the local cost of the
705/// interference pattern in SplitConstraints.
706///
707float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
708                                    InterferenceCache::Cursor Intf) {
709  float GlobalCost = 0;
710  const BitVector &LiveBundles = Cand.LiveBundles;
711  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
712  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
713    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
714    SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
715    bool RegIn  = LiveBundles[Bundles->getBundle(BC.Number, 0)];
716    bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
717    unsigned Ins = 0;
718
719    if (BI.LiveIn)
720      Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
721    if (BI.LiveOut)
722      Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
723    if (Ins)
724      GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
725  }
726
727  for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
728    unsigned Number = Cand.ActiveBlocks[i];
729    bool RegIn  = LiveBundles[Bundles->getBundle(Number, 0)];
730    bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
731    if (!RegIn && !RegOut)
732      continue;
733    if (RegIn && RegOut) {
734      // We need double spill code if this block has interference.
735      Intf.moveToBlock(Number);
736      if (Intf.hasInterference())
737        GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
738      continue;
739    }
740    // live-in / stack-out or stack-in live-out.
741    GlobalCost += SpillPlacer->getBlockFrequency(Number);
742  }
743  return GlobalCost;
744}
745
746/// splitAroundRegion - Split VirtReg around the region determined by
747/// LiveBundles. Make an effort to avoid interference from PhysReg.
748///
749/// The 'register' interval is going to contain as many uses as possible while
750/// avoiding interference. The 'stack' interval is the complement constructed by
751/// SplitEditor. It will contain the rest.
752///
753void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
754                                 GlobalSplitCandidate &Cand,
755                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
756  const BitVector &LiveBundles = Cand.LiveBundles;
757
758  DEBUG({
759    dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
760           << " with bundles";
761    for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
762      dbgs() << " EB#" << i;
763    dbgs() << ".\n";
764  });
765
766  InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg);
767  LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
768  SE->reset(LREdit);
769
770  // Create the main cross-block interval.
771  const unsigned MainIntv = SE->openIntv();
772
773  // First add all defs that are live out of a block.
774  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
775  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
776    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
777    bool RegIn  = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
778    bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
779
780    // Create separate intervals for isolated blocks with multiple uses.
781    if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) {
782      DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
783      SE->splitSingleBlock(BI);
784      SE->selectIntv(MainIntv);
785      continue;
786    }
787
788    // Should the register be live out?
789    if (!BI.LiveOut || !RegOut)
790      continue;
791
792    SlotIndex Start, Stop;
793    tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
794    Intf.moveToBlock(BI.MBB->getNumber());
795    DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
796                 << Bundles->getBundle(BI.MBB->getNumber(), 1)
797                 << " [" << Start << ';'
798                 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
799                 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
800
801    // The interference interval should either be invalid or overlap MBB.
802    assert((!Intf.hasInterference() || Intf.first() < Stop)
803           && "Bad interference");
804    assert((!Intf.hasInterference() || Intf.last() > Start)
805           && "Bad interference");
806
807    // Check interference leaving the block.
808    if (!Intf.hasInterference()) {
809      // Block is interference-free.
810      DEBUG(dbgs() << ", no interference");
811      if (!BI.LiveThrough) {
812        DEBUG(dbgs() << ", not live-through.\n");
813        SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
814        continue;
815      }
816      if (!RegIn) {
817        // Block is live-through, but entry bundle is on the stack.
818        // Reload just before the first use.
819        DEBUG(dbgs() << ", not live-in, enter before first use.\n");
820        SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
821        continue;
822      }
823      DEBUG(dbgs() << ", live-through.\n");
824      continue;
825    }
826
827    // Block has interference.
828    DEBUG(dbgs() << ", interference to " << Intf.last());
829
830    if (!BI.LiveThrough && Intf.last() <= BI.FirstUse) {
831      // The interference doesn't reach the outgoing segment.
832      DEBUG(dbgs() << " doesn't affect def from " << BI.FirstUse << '\n');
833      SE->useIntv(BI.FirstUse, Stop);
834      continue;
835    }
836
837    SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
838    if (Intf.last().getBoundaryIndex() < BI.LastUse) {
839      // There are interference-free uses at the end of the block.
840      // Find the first use that can get the live-out register.
841      SmallVectorImpl<SlotIndex>::const_iterator UI =
842        std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
843                         Intf.last().getBoundaryIndex());
844      assert(UI != SA->UseSlots.end() && "Couldn't find last use");
845      SlotIndex Use = *UI;
846      assert(Use <= BI.LastUse && "Couldn't find last use");
847      // Only attempt a split befroe the last split point.
848      if (Use.getBaseIndex() <= LastSplitPoint) {
849        DEBUG(dbgs() << ", free use at " << Use << ".\n");
850        SlotIndex SegStart = SE->enterIntvBefore(Use);
851        assert(SegStart >= Intf.last() && "Couldn't avoid interference");
852        assert(SegStart < LastSplitPoint && "Impossible split point");
853        SE->useIntv(SegStart, Stop);
854        continue;
855      }
856    }
857
858    // Interference is after the last use.
859    DEBUG(dbgs() << " after last use.\n");
860    SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
861    assert(SegStart >= Intf.last() && "Couldn't avoid interference");
862  }
863
864  // Now all defs leading to live bundles are handled, do everything else.
865  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
866    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
867    bool RegIn  = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
868    bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
869
870    // Is the register live-in?
871    if (!BI.LiveIn || !RegIn)
872      continue;
873
874    // We have an incoming register. Check for interference.
875    SlotIndex Start, Stop;
876    tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
877    Intf.moveToBlock(BI.MBB->getNumber());
878    DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
879                 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
880                 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
881                 << ')');
882
883    // Check interference entering the block.
884    if (!Intf.hasInterference()) {
885      // Block is interference-free.
886      DEBUG(dbgs() << ", no interference");
887      if (!BI.LiveThrough) {
888        DEBUG(dbgs() << ", killed in block.\n");
889        SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
890        continue;
891      }
892      if (!RegOut) {
893        SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
894        // Block is live-through, but exit bundle is on the stack.
895        // Spill immediately after the last use.
896        if (BI.LastUse < LastSplitPoint) {
897          DEBUG(dbgs() << ", uses, stack-out.\n");
898          SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
899          continue;
900        }
901        // The last use is after the last split point, it is probably an
902        // indirect jump.
903        DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
904                     << LastSplitPoint << ", stack-out.\n");
905        SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
906        SE->useIntv(Start, SegEnd);
907        // Run a double interval from the split to the last use.
908        // This makes it possible to spill the complement without affecting the
909        // indirect branch.
910        SE->overlapIntv(SegEnd, BI.LastUse);
911        continue;
912      }
913      // Register is live-through.
914      DEBUG(dbgs() << ", uses, live-through.\n");
915      SE->useIntv(Start, Stop);
916      continue;
917    }
918
919    // Block has interference.
920    DEBUG(dbgs() << ", interference from " << Intf.first());
921
922    if (!BI.LiveThrough && Intf.first() >= BI.LastUse) {
923      // The interference doesn't reach the outgoing segment.
924      DEBUG(dbgs() << " doesn't affect kill at " << BI.LastUse << '\n');
925      SE->useIntv(Start, BI.LastUse);
926      continue;
927    }
928
929    if (Intf.first().getBaseIndex() > BI.FirstUse) {
930      // There are interference-free uses at the beginning of the block.
931      // Find the last use that can get the register.
932      SmallVectorImpl<SlotIndex>::const_iterator UI =
933        std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
934                         Intf.first().getBaseIndex());
935      assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
936      SlotIndex Use = (--UI)->getBoundaryIndex();
937      DEBUG(dbgs() << ", free use at " << *UI << ".\n");
938      SlotIndex SegEnd = SE->leaveIntvAfter(Use);
939      assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
940      SE->useIntv(Start, SegEnd);
941      continue;
942    }
943
944    // Interference is before the first use.
945    DEBUG(dbgs() << " before first use.\n");
946    SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
947    assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
948  }
949
950  // Handle live-through blocks.
951  for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
952    unsigned Number = Cand.ActiveBlocks[i];
953    bool RegIn  = LiveBundles[Bundles->getBundle(Number, 0)];
954    bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
955    DEBUG(dbgs() << "Live through BB#" << Number << '\n');
956    if (RegIn && RegOut) {
957      Intf.moveToBlock(Number);
958      if (!Intf.hasInterference()) {
959        SE->useIntv(Indexes->getMBBStartIdx(Number),
960                    Indexes->getMBBEndIdx(Number));
961        continue;
962      }
963    }
964    MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
965    if (RegIn)
966      SE->leaveIntvAtTop(*MBB);
967    if (RegOut)
968      SE->enterIntvAtEnd(*MBB);
969  }
970
971  ++NumGlobalSplits;
972
973  SmallVector<unsigned, 8> IntvMap;
974  SE->finish(&IntvMap);
975  DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
976
977  LRStage.resize(MRI->getNumVirtRegs());
978  unsigned OrigBlocks = SA->getNumLiveBlocks();
979
980  // Sort out the new intervals created by splitting. We get four kinds:
981  // - Remainder intervals should not be split again.
982  // - Candidate intervals can be assigned to Cand.PhysReg.
983  // - Block-local splits are candidates for local splitting.
984  // - DCE leftovers should go back on the queue.
985  for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
986    unsigned Reg = LREdit.get(i)->reg;
987
988    // Ignore old intervals from DCE.
989    if (LRStage[Reg] != RS_New)
990      continue;
991
992    // Remainder interval. Don't try splitting again, spill if it doesn't
993    // allocate.
994    if (IntvMap[i] == 0) {
995      LRStage[Reg] = RS_Global;
996      continue;
997    }
998
999    // Main interval. Allow repeated splitting as long as the number of live
1000    // blocks is strictly decreasing.
1001    if (IntvMap[i] == MainIntv) {
1002      if (SA->countLiveBlocks(LREdit.get(i)) >= OrigBlocks) {
1003        DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1004                     << " blocks as original.\n");
1005        // Don't allow repeated splitting as a safe guard against looping.
1006        LRStage[Reg] = RS_Global;
1007      }
1008      continue;
1009    }
1010
1011    // Other intervals are treated as new. This includes local intervals created
1012    // for blocks with multiple uses, and anything created by DCE.
1013  }
1014
1015  if (VerifyEnabled)
1016    MF->verify(this, "After splitting live range around region");
1017}
1018
1019unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1020                                  SmallVectorImpl<LiveInterval*> &NewVRegs) {
1021  float BestCost = Hysteresis * calcSpillCost();
1022  DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1023  const unsigned NoCand = ~0u;
1024  unsigned BestCand = NoCand;
1025
1026  Order.rewind();
1027  for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
1028    if (GlobalCand.size() <= Cand)
1029      GlobalCand.resize(Cand+1);
1030    GlobalCand[Cand].reset(PhysReg);
1031
1032    SpillPlacer->prepare(GlobalCand[Cand].LiveBundles);
1033    float Cost;
1034    InterferenceCache::Cursor Intf(IntfCache, PhysReg);
1035    if (!addSplitConstraints(Intf, Cost)) {
1036      DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1037      continue;
1038    }
1039    DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1040    if (Cost >= BestCost) {
1041      DEBUG({
1042        if (BestCand == NoCand)
1043          dbgs() << " worse than no bundles\n";
1044        else
1045          dbgs() << " worse than "
1046                 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1047      });
1048      continue;
1049    }
1050    growRegion(GlobalCand[Cand], Intf);
1051
1052    SpillPlacer->finish();
1053
1054    // No live bundles, defer to splitSingleBlocks().
1055    if (!GlobalCand[Cand].LiveBundles.any()) {
1056      DEBUG(dbgs() << " no bundles.\n");
1057      continue;
1058    }
1059
1060    Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf);
1061    DEBUG({
1062      dbgs() << ", total = " << Cost << " with bundles";
1063      for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0;
1064           i = GlobalCand[Cand].LiveBundles.find_next(i))
1065        dbgs() << " EB#" << i;
1066      dbgs() << ".\n";
1067    });
1068    if (Cost < BestCost) {
1069      BestCand = Cand;
1070      BestCost = Hysteresis * Cost; // Prevent rounding effects.
1071    }
1072  }
1073
1074  if (BestCand == NoCand)
1075    return 0;
1076
1077  splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1078  return 0;
1079}
1080
1081
1082//===----------------------------------------------------------------------===//
1083//                             Local Splitting
1084//===----------------------------------------------------------------------===//
1085
1086
1087/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1088/// in order to use PhysReg between two entries in SA->UseSlots.
1089///
1090/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1091///
1092void RAGreedy::calcGapWeights(unsigned PhysReg,
1093                              SmallVectorImpl<float> &GapWeight) {
1094  assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1095  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1096  const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1097  const unsigned NumGaps = Uses.size()-1;
1098
1099  // Start and end points for the interference check.
1100  SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1101  SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1102
1103  GapWeight.assign(NumGaps, 0.0f);
1104
1105  // Add interference from each overlapping register.
1106  for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1107    if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1108           .checkInterference())
1109      continue;
1110
1111    // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1112    // so we don't need InterferenceQuery.
1113    //
1114    // Interference that overlaps an instruction is counted in both gaps
1115    // surrounding the instruction. The exception is interference before
1116    // StartIdx and after StopIdx.
1117    //
1118    LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1119    for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1120      // Skip the gaps before IntI.
1121      while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1122        if (++Gap == NumGaps)
1123          break;
1124      if (Gap == NumGaps)
1125        break;
1126
1127      // Update the gaps covered by IntI.
1128      const float weight = IntI.value()->weight;
1129      for (; Gap != NumGaps; ++Gap) {
1130        GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1131        if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1132          break;
1133      }
1134      if (Gap == NumGaps)
1135        break;
1136    }
1137  }
1138}
1139
1140/// getPrevMappedIndex - Return the slot index of the last non-copy instruction
1141/// before MI that has a slot index. If MI is the first mapped instruction in
1142/// its block, return the block start index instead.
1143///
1144SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
1145  assert(MI && "Missing MachineInstr");
1146  const MachineBasicBlock *MBB = MI->getParent();
1147  MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
1148  while (I != B)
1149    if (!(--I)->isDebugValue() && !I->isCopy())
1150      return Indexes->getInstructionIndex(I);
1151  return Indexes->getMBBStartIdx(MBB);
1152}
1153
1154/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
1155/// real non-copy instruction for each instruction in SA->UseSlots.
1156///
1157void RAGreedy::calcPrevSlots() {
1158  const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1159  PrevSlot.clear();
1160  PrevSlot.reserve(Uses.size());
1161  for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1162    const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1163    PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1164  }
1165}
1166
1167/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1168/// be beneficial to split before UseSlots[i].
1169///
1170/// 0 is always a valid split point
1171unsigned RAGreedy::nextSplitPoint(unsigned i) {
1172  const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1173  const unsigned Size = Uses.size();
1174  assert(i != Size && "No split points after the end");
1175  // Allow split before i when Uses[i] is not adjacent to the previous use.
1176  while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1177    ;
1178  return i;
1179}
1180
1181/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1182/// basic block.
1183///
1184unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1185                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1186  assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1187  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1188
1189  // Note that it is possible to have an interval that is live-in or live-out
1190  // while only covering a single block - A phi-def can use undef values from
1191  // predecessors, and the block could be a single-block loop.
1192  // We don't bother doing anything clever about such a case, we simply assume
1193  // that the interval is continuous from FirstUse to LastUse. We should make
1194  // sure that we don't do anything illegal to such an interval, though.
1195
1196  const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1197  if (Uses.size() <= 2)
1198    return 0;
1199  const unsigned NumGaps = Uses.size()-1;
1200
1201  DEBUG({
1202    dbgs() << "tryLocalSplit: ";
1203    for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1204      dbgs() << ' ' << SA->UseSlots[i];
1205    dbgs() << '\n';
1206  });
1207
1208  // For every use, find the previous mapped non-copy instruction.
1209  // We use this to detect valid split points, and to estimate new interval
1210  // sizes.
1211  calcPrevSlots();
1212
1213  unsigned BestBefore = NumGaps;
1214  unsigned BestAfter = 0;
1215  float BestDiff = 0;
1216
1217  const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1218  SmallVector<float, 8> GapWeight;
1219
1220  Order.rewind();
1221  while (unsigned PhysReg = Order.next()) {
1222    // Keep track of the largest spill weight that would need to be evicted in
1223    // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1224    calcGapWeights(PhysReg, GapWeight);
1225
1226    // Try to find the best sequence of gaps to close.
1227    // The new spill weight must be larger than any gap interference.
1228
1229    // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1230    unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1231
1232    // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1233    // It is the spill weight that needs to be evicted.
1234    float MaxGap = GapWeight[0];
1235    for (unsigned i = 1; i != SplitAfter; ++i)
1236      MaxGap = std::max(MaxGap, GapWeight[i]);
1237
1238    for (;;) {
1239      // Live before/after split?
1240      const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1241      const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1242
1243      DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1244                   << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1245                   << " i=" << MaxGap);
1246
1247      // Stop before the interval gets so big we wouldn't be making progress.
1248      if (!LiveBefore && !LiveAfter) {
1249        DEBUG(dbgs() << " all\n");
1250        break;
1251      }
1252      // Should the interval be extended or shrunk?
1253      bool Shrink = true;
1254      if (MaxGap < HUGE_VALF) {
1255        // Estimate the new spill weight.
1256        //
1257        // Each instruction reads and writes the register, except the first
1258        // instr doesn't read when !FirstLive, and the last instr doesn't write
1259        // when !LastLive.
1260        //
1261        // We will be inserting copies before and after, so the total number of
1262        // reads and writes is 2 * EstUses.
1263        //
1264        const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1265                                 2*(LiveBefore + LiveAfter);
1266
1267        // Try to guess the size of the new interval. This should be trivial,
1268        // but the slot index of an inserted copy can be a lot smaller than the
1269        // instruction it is inserted before if there are many dead indexes
1270        // between them.
1271        //
1272        // We measure the distance from the instruction before SplitBefore to
1273        // get a conservative estimate.
1274        //
1275        // The final distance can still be different if inserting copies
1276        // triggers a slot index renumbering.
1277        //
1278        const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1279                              PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1280        // Would this split be possible to allocate?
1281        // Never allocate all gaps, we wouldn't be making progress.
1282        DEBUG(dbgs() << " w=" << EstWeight);
1283        if (EstWeight * Hysteresis >= MaxGap) {
1284          Shrink = false;
1285          float Diff = EstWeight - MaxGap;
1286          if (Diff > BestDiff) {
1287            DEBUG(dbgs() << " (best)");
1288            BestDiff = Hysteresis * Diff;
1289            BestBefore = SplitBefore;
1290            BestAfter = SplitAfter;
1291          }
1292        }
1293      }
1294
1295      // Try to shrink.
1296      if (Shrink) {
1297        SplitBefore = nextSplitPoint(SplitBefore);
1298        if (SplitBefore < SplitAfter) {
1299          DEBUG(dbgs() << " shrink\n");
1300          // Recompute the max when necessary.
1301          if (GapWeight[SplitBefore - 1] >= MaxGap) {
1302            MaxGap = GapWeight[SplitBefore];
1303            for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1304              MaxGap = std::max(MaxGap, GapWeight[i]);
1305          }
1306          continue;
1307        }
1308        MaxGap = 0;
1309      }
1310
1311      // Try to extend the interval.
1312      if (SplitAfter >= NumGaps) {
1313        DEBUG(dbgs() << " end\n");
1314        break;
1315      }
1316
1317      DEBUG(dbgs() << " extend\n");
1318      for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1319           SplitAfter != e; ++SplitAfter)
1320        MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1321          continue;
1322    }
1323  }
1324
1325  // Didn't find any candidates?
1326  if (BestBefore == NumGaps)
1327    return 0;
1328
1329  DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1330               << '-' << Uses[BestAfter] << ", " << BestDiff
1331               << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1332
1333  LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1334  SE->reset(LREdit);
1335
1336  SE->openIntv();
1337  SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1338  SlotIndex SegStop  = SE->leaveIntvAfter(Uses[BestAfter]);
1339  SE->useIntv(SegStart, SegStop);
1340  SE->finish();
1341  DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1342  setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1343  ++NumLocalSplits;
1344
1345  return 0;
1346}
1347
1348//===----------------------------------------------------------------------===//
1349//                          Live Range Splitting
1350//===----------------------------------------------------------------------===//
1351
1352/// trySplit - Try to split VirtReg or one of its interferences, making it
1353/// assignable.
1354/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1355unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1356                            SmallVectorImpl<LiveInterval*>&NewVRegs) {
1357  // Local intervals are handled separately.
1358  if (LIS->intervalIsInOneMBB(VirtReg)) {
1359    NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1360    SA->analyze(&VirtReg);
1361    return tryLocalSplit(VirtReg, Order, NewVRegs);
1362  }
1363
1364  NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1365
1366  // Don't iterate global splitting.
1367  // Move straight to spilling if this range was produced by a global split.
1368  if (getStage(VirtReg) >= RS_Global)
1369    return 0;
1370
1371  SA->analyze(&VirtReg);
1372
1373  // FIXME: SplitAnalysis may repair broken live ranges coming from the
1374  // coalescer. That may cause the range to become allocatable which means that
1375  // tryRegionSplit won't be making progress. This check should be replaced with
1376  // an assertion when the coalescer is fixed.
1377  if (SA->didRepairRange()) {
1378    // VirtReg has changed, so all cached queries are invalid.
1379    invalidateVirtRegs();
1380    if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1381      return PhysReg;
1382  }
1383
1384  // First try to split around a region spanning multiple blocks.
1385  unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1386  if (PhysReg || !NewVRegs.empty())
1387    return PhysReg;
1388
1389  // Then isolate blocks with multiple uses.
1390  SplitAnalysis::BlockPtrSet Blocks;
1391  if (SA->getMultiUseBlocks(Blocks)) {
1392    LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1393    SE->reset(LREdit);
1394    SE->splitSingleBlocks(Blocks);
1395    setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1396    if (VerifyEnabled)
1397      MF->verify(this, "After splitting live range around basic blocks");
1398  }
1399
1400  // Don't assign any physregs.
1401  return 0;
1402}
1403
1404
1405//===----------------------------------------------------------------------===//
1406//                            Main Entry Point
1407//===----------------------------------------------------------------------===//
1408
1409unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1410                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1411  // First try assigning a free register.
1412  AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1413  if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1414    return PhysReg;
1415
1416  LiveRangeStage Stage = getStage(VirtReg);
1417  DEBUG(dbgs() << StageName[Stage] << '\n');
1418
1419  // Try to evict a less worthy live range, but only for ranges from the primary
1420  // queue. The RS_Second ranges already failed to do this, and they should not
1421  // get a second chance until they have been split.
1422  if (Stage != RS_Second)
1423    if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1424      return PhysReg;
1425
1426  assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1427
1428  // The first time we see a live range, don't try to split or spill.
1429  // Wait until the second time, when all smaller ranges have been allocated.
1430  // This gives a better picture of the interference to split around.
1431  if (Stage == RS_First) {
1432    LRStage[VirtReg.reg] = RS_Second;
1433    DEBUG(dbgs() << "wait for second round\n");
1434    NewVRegs.push_back(&VirtReg);
1435    return 0;
1436  }
1437
1438  // If we couldn't allocate a register from spilling, there is probably some
1439  // invalid inline assembly. The base class wil report it.
1440  if (Stage >= RS_Spill)
1441    return ~0u;
1442
1443  // Try splitting VirtReg or interferences.
1444  unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1445  if (PhysReg || !NewVRegs.empty())
1446    return PhysReg;
1447
1448  // Finally spill VirtReg itself.
1449  NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1450  LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1451  spiller().spill(LRE);
1452  setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1453
1454  if (VerifyEnabled)
1455    MF->verify(this, "After spilling");
1456
1457  // The live virtual register requesting allocation was spilled, so tell
1458  // the caller not to allocate anything during this round.
1459  return 0;
1460}
1461
1462bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1463  DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1464               << "********** Function: "
1465               << ((Value*)mf.getFunction())->getName() << '\n');
1466
1467  MF = &mf;
1468  if (VerifyEnabled)
1469    MF->verify(this, "Before greedy register allocator");
1470
1471  RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1472  Indexes = &getAnalysis<SlotIndexes>();
1473  DomTree = &getAnalysis<MachineDominatorTree>();
1474  SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1475  Loops = &getAnalysis<MachineLoopInfo>();
1476  LoopRanges = &getAnalysis<MachineLoopRanges>();
1477  Bundles = &getAnalysis<EdgeBundles>();
1478  SpillPlacer = &getAnalysis<SpillPlacement>();
1479  DebugVars = &getAnalysis<LiveDebugVariables>();
1480
1481  SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1482  SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1483  LRStage.clear();
1484  LRStage.resize(MRI->getNumVirtRegs());
1485  IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1486
1487  allocatePhysRegs();
1488  addMBBLiveIns(MF);
1489  LIS->addKillFlags();
1490
1491  // Run rewriter
1492  {
1493    NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1494    VRM->rewrite(Indexes);
1495  }
1496
1497  // Write out new DBG_VALUE instructions.
1498  DebugVars->emitDebugValues(VRM);
1499
1500  // The pass output is in VirtRegMap. Release all the transient data.
1501  releaseMemory();
1502
1503  return true;
1504}
1505