RegAllocGreedy.cpp revision 549019792a8b14500cab093ac8f3c5f7331e86d7
1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the RAGreedy function pass for register allocation in 11// optimized builds. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "AllocationOrder.h" 17#include "InterferenceCache.h" 18#include "LiveDebugVariables.h" 19#include "LiveRangeEdit.h" 20#include "RegAllocBase.h" 21#include "Spiller.h" 22#include "SpillPlacement.h" 23#include "SplitKit.h" 24#include "VirtRegMap.h" 25#include "RegisterCoalescer.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/Analysis/AliasAnalysis.h" 28#include "llvm/Function.h" 29#include "llvm/PassAnalysisSupport.h" 30#include "llvm/CodeGen/CalcSpillWeights.h" 31#include "llvm/CodeGen/EdgeBundles.h" 32#include "llvm/CodeGen/LiveIntervalAnalysis.h" 33#include "llvm/CodeGen/LiveStackAnalysis.h" 34#include "llvm/CodeGen/MachineDominators.h" 35#include "llvm/CodeGen/MachineFunctionPass.h" 36#include "llvm/CodeGen/MachineLoopInfo.h" 37#include "llvm/CodeGen/MachineLoopRanges.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/CodeGen/Passes.h" 40#include "llvm/CodeGen/RegAllocRegistry.h" 41#include "llvm/Target/TargetOptions.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/raw_ostream.h" 45#include "llvm/Support/Timer.h" 46 47#include <queue> 48 49using namespace llvm; 50 51STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 52STATISTIC(NumLocalSplits, "Number of split local live ranges"); 53STATISTIC(NumEvicted, "Number of interferences evicted"); 54 55static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 56 createGreedyRegisterAllocator); 57 58namespace { 59class RAGreedy : public MachineFunctionPass, 60 public RegAllocBase, 61 private LiveRangeEdit::Delegate { 62 63 // context 64 MachineFunction *MF; 65 66 // analyses 67 SlotIndexes *Indexes; 68 LiveStacks *LS; 69 MachineDominatorTree *DomTree; 70 MachineLoopInfo *Loops; 71 MachineLoopRanges *LoopRanges; 72 EdgeBundles *Bundles; 73 SpillPlacement *SpillPlacer; 74 LiveDebugVariables *DebugVars; 75 76 // state 77 std::auto_ptr<Spiller> SpillerInstance; 78 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 79 unsigned NextCascade; 80 81 // Live ranges pass through a number of stages as we try to allocate them. 82 // Some of the stages may also create new live ranges: 83 // 84 // - Region splitting. 85 // - Per-block splitting. 86 // - Local splitting. 87 // - Spilling. 88 // 89 // Ranges produced by one of the stages skip the previous stages when they are 90 // dequeued. This improves performance because we can skip interference checks 91 // that are unlikely to give any results. It also guarantees that the live 92 // range splitting algorithm terminates, something that is otherwise hard to 93 // ensure. 94 enum LiveRangeStage { 95 RS_New, ///< Never seen before. 96 RS_First, ///< First time in the queue. 97 RS_Second, ///< Second time in the queue. 98 RS_Global, ///< Produced by global splitting. 99 RS_Local, ///< Produced by local splitting. 100 RS_Spill ///< Produced by spilling. 101 }; 102 103 static const char *const StageName[]; 104 105 // RegInfo - Keep additional information about each live range. 106 struct RegInfo { 107 LiveRangeStage Stage; 108 109 // Cascade - Eviction loop prevention. See canEvictInterference(). 110 unsigned Cascade; 111 112 RegInfo() : Stage(RS_New), Cascade(0) {} 113 }; 114 115 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 116 117 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 118 return ExtraRegInfo[VirtReg.reg].Stage; 119 } 120 121 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 122 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 123 ExtraRegInfo[VirtReg.reg].Stage = Stage; 124 } 125 126 template<typename Iterator> 127 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 128 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 129 for (;Begin != End; ++Begin) { 130 unsigned Reg = (*Begin)->reg; 131 if (ExtraRegInfo[Reg].Stage == RS_New) 132 ExtraRegInfo[Reg].Stage = NewStage; 133 } 134 } 135 136 // splitting state. 137 std::auto_ptr<SplitAnalysis> SA; 138 std::auto_ptr<SplitEditor> SE; 139 140 /// Cached per-block interference maps 141 InterferenceCache IntfCache; 142 143 /// All basic blocks where the current register has uses. 144 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 145 146 /// Global live range splitting candidate info. 147 struct GlobalSplitCandidate { 148 unsigned PhysReg; 149 BitVector LiveBundles; 150 SmallVector<unsigned, 8> ActiveBlocks; 151 152 void reset(unsigned Reg) { 153 PhysReg = Reg; 154 LiveBundles.clear(); 155 ActiveBlocks.clear(); 156 } 157 }; 158 159 /// Candidate info for for each PhysReg in AllocationOrder. 160 /// This vector never shrinks, but grows to the size of the largest register 161 /// class. 162 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 163 164public: 165 RAGreedy(); 166 167 /// Return the pass name. 168 virtual const char* getPassName() const { 169 return "Greedy Register Allocator"; 170 } 171 172 /// RAGreedy analysis usage. 173 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 174 virtual void releaseMemory(); 175 virtual Spiller &spiller() { return *SpillerInstance; } 176 virtual void enqueue(LiveInterval *LI); 177 virtual LiveInterval *dequeue(); 178 virtual unsigned selectOrSplit(LiveInterval&, 179 SmallVectorImpl<LiveInterval*>&); 180 181 /// Perform register allocation. 182 virtual bool runOnMachineFunction(MachineFunction &mf); 183 184 static char ID; 185 186private: 187 void LRE_WillEraseInstruction(MachineInstr*); 188 bool LRE_CanEraseVirtReg(unsigned); 189 void LRE_WillShrinkVirtReg(unsigned); 190 void LRE_DidCloneVirtReg(unsigned, unsigned); 191 192 float calcSpillCost(); 193 bool addSplitConstraints(InterferenceCache::Cursor, float&); 194 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 195 void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor); 196 float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor); 197 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&, 198 SmallVectorImpl<LiveInterval*>&); 199 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 200 bool canEvict(LiveInterval &A, LiveInterval &B); 201 bool canEvictInterference(LiveInterval&, unsigned, float&); 202 203 unsigned tryAssign(LiveInterval&, AllocationOrder&, 204 SmallVectorImpl<LiveInterval*>&); 205 unsigned tryEvict(LiveInterval&, AllocationOrder&, 206 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 207 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 208 SmallVectorImpl<LiveInterval*>&); 209 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 210 SmallVectorImpl<LiveInterval*>&); 211 unsigned trySplit(LiveInterval&, AllocationOrder&, 212 SmallVectorImpl<LiveInterval*>&); 213}; 214} // end anonymous namespace 215 216char RAGreedy::ID = 0; 217 218#ifndef NDEBUG 219const char *const RAGreedy::StageName[] = { 220 "RS_New", 221 "RS_First", 222 "RS_Second", 223 "RS_Global", 224 "RS_Local", 225 "RS_Spill" 226}; 227#endif 228 229// Hysteresis to use when comparing floats. 230// This helps stabilize decisions based on float comparisons. 231const float Hysteresis = 0.98f; 232 233 234FunctionPass* llvm::createGreedyRegisterAllocator() { 235 return new RAGreedy(); 236} 237 238RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 239 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 240 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 241 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 242 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 243 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); 244 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 245 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 246 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 247 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 248 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 249 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry()); 250 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 251 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 252 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 253} 254 255void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 256 AU.setPreservesCFG(); 257 AU.addRequired<AliasAnalysis>(); 258 AU.addPreserved<AliasAnalysis>(); 259 AU.addRequired<LiveIntervals>(); 260 AU.addRequired<SlotIndexes>(); 261 AU.addPreserved<SlotIndexes>(); 262 AU.addRequired<LiveDebugVariables>(); 263 AU.addPreserved<LiveDebugVariables>(); 264 if (StrongPHIElim) 265 AU.addRequiredID(StrongPHIEliminationID); 266 AU.addRequiredTransitive<RegisterCoalescer>(); 267 AU.addRequired<CalculateSpillWeights>(); 268 AU.addRequired<LiveStacks>(); 269 AU.addPreserved<LiveStacks>(); 270 AU.addRequired<MachineDominatorTree>(); 271 AU.addPreserved<MachineDominatorTree>(); 272 AU.addRequired<MachineLoopInfo>(); 273 AU.addPreserved<MachineLoopInfo>(); 274 AU.addRequired<MachineLoopRanges>(); 275 AU.addPreserved<MachineLoopRanges>(); 276 AU.addRequired<VirtRegMap>(); 277 AU.addPreserved<VirtRegMap>(); 278 AU.addRequired<EdgeBundles>(); 279 AU.addRequired<SpillPlacement>(); 280 MachineFunctionPass::getAnalysisUsage(AU); 281} 282 283 284//===----------------------------------------------------------------------===// 285// LiveRangeEdit delegate methods 286//===----------------------------------------------------------------------===// 287 288void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) { 289 // LRE itself will remove from SlotIndexes and parent basic block. 290 VRM->RemoveMachineInstrFromMaps(MI); 291} 292 293bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 294 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 295 unassign(LIS->getInterval(VirtReg), PhysReg); 296 return true; 297 } 298 // Unassigned virtreg is probably in the priority queue. 299 // RegAllocBase will erase it after dequeueing. 300 return false; 301} 302 303void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 304 unsigned PhysReg = VRM->getPhys(VirtReg); 305 if (!PhysReg) 306 return; 307 308 // Register is assigned, put it back on the queue for reassignment. 309 LiveInterval &LI = LIS->getInterval(VirtReg); 310 unassign(LI, PhysReg); 311 enqueue(&LI); 312} 313 314void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 315 // LRE may clone a virtual register because dead code elimination causes it to 316 // be split into connected components. Ensure that the new register gets the 317 // same stage as the parent. 318 ExtraRegInfo.grow(New); 319 ExtraRegInfo[New] = ExtraRegInfo[Old]; 320} 321 322void RAGreedy::releaseMemory() { 323 SpillerInstance.reset(0); 324 ExtraRegInfo.clear(); 325 GlobalCand.clear(); 326 RegAllocBase::releaseMemory(); 327} 328 329void RAGreedy::enqueue(LiveInterval *LI) { 330 // Prioritize live ranges by size, assigning larger ranges first. 331 // The queue holds (size, reg) pairs. 332 const unsigned Size = LI->getSize(); 333 const unsigned Reg = LI->reg; 334 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 335 "Can only enqueue virtual registers"); 336 unsigned Prio; 337 338 ExtraRegInfo.grow(Reg); 339 if (ExtraRegInfo[Reg].Stage == RS_New) 340 ExtraRegInfo[Reg].Stage = RS_First; 341 342 if (ExtraRegInfo[Reg].Stage == RS_Second) 343 // Unsplit ranges that couldn't be allocated immediately are deferred until 344 // everything else has been allocated. Long ranges are allocated last so 345 // they are split against realistic interference. 346 Prio = (1u << 31) - Size; 347 else { 348 // Everything else is allocated in long->short order. Long ranges that don't 349 // fit should be spilled ASAP so they don't create interference. 350 Prio = (1u << 31) + Size; 351 352 // Boost ranges that have a physical register hint. 353 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 354 Prio |= (1u << 30); 355 } 356 357 Queue.push(std::make_pair(Prio, Reg)); 358} 359 360LiveInterval *RAGreedy::dequeue() { 361 if (Queue.empty()) 362 return 0; 363 LiveInterval *LI = &LIS->getInterval(Queue.top().second); 364 Queue.pop(); 365 return LI; 366} 367 368 369//===----------------------------------------------------------------------===// 370// Direct Assignment 371//===----------------------------------------------------------------------===// 372 373/// tryAssign - Try to assign VirtReg to an available register. 374unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 375 AllocationOrder &Order, 376 SmallVectorImpl<LiveInterval*> &NewVRegs) { 377 Order.rewind(); 378 unsigned PhysReg; 379 while ((PhysReg = Order.next())) 380 if (!checkPhysRegInterference(VirtReg, PhysReg)) 381 break; 382 if (!PhysReg || Order.isHint(PhysReg)) 383 return PhysReg; 384 385 // PhysReg is available. Try to evict interference from a cheaper alternative. 386 unsigned Cost = TRI->getCostPerUse(PhysReg); 387 388 // Most registers have 0 additional cost. 389 if (!Cost) 390 return PhysReg; 391 392 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 393 << '\n'); 394 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 395 return CheapReg ? CheapReg : PhysReg; 396} 397 398 399//===----------------------------------------------------------------------===// 400// Interference eviction 401//===----------------------------------------------------------------------===// 402 403/// canEvict - determine if A can evict the assigned live range B. The eviction 404/// policy defined by this function together with the allocation order defined 405/// by enqueue() decides which registers ultimately end up being split and 406/// spilled. 407/// 408/// Cascade numbers are used to prevent infinite loops if this function is a 409/// cyclic relation. 410bool RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) { 411 return A.weight > B.weight; 412} 413 414/// canEvict - Return true if all interferences between VirtReg and PhysReg can 415/// be evicted. 416/// Return false if any interference is heavier than MaxWeight. 417/// On return, set MaxWeight to the maximal spill weight of an interference. 418bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 419 float &MaxWeight) { 420 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 421 // involved in an eviction before. If a cascade number was assigned, deny 422 // evicting anything with the same or a newer cascade number. This prevents 423 // infinite eviction loops. 424 // 425 // This works out so a register without a cascade number is allowed to evict 426 // anything, and it can be evicted by anything. 427 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 428 if (!Cascade) 429 Cascade = NextCascade; 430 431 float Weight = 0; 432 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 433 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 434 // If there is 10 or more interferences, chances are one is heavier. 435 if (Q.collectInterferingVRegs(10, MaxWeight) >= 10) 436 return false; 437 438 // Check if any interfering live range is heavier than MaxWeight. 439 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 440 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 441 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 442 return false; 443 if (Cascade <= ExtraRegInfo[Intf->reg].Cascade) 444 return false; 445 if (Intf->weight >= MaxWeight) 446 return false; 447 if (!canEvict(VirtReg, *Intf)) 448 return false; 449 Weight = std::max(Weight, Intf->weight); 450 } 451 } 452 MaxWeight = Weight; 453 return true; 454} 455 456/// tryEvict - Try to evict all interferences for a physreg. 457/// @param VirtReg Currently unassigned virtual register. 458/// @param Order Physregs to try. 459/// @return Physreg to assign VirtReg, or 0. 460unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 461 AllocationOrder &Order, 462 SmallVectorImpl<LiveInterval*> &NewVRegs, 463 unsigned CostPerUseLimit) { 464 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 465 466 // Keep track of the lightest single interference seen so far. 467 float BestWeight = HUGE_VALF; 468 unsigned BestPhys = 0; 469 470 Order.rewind(); 471 while (unsigned PhysReg = Order.next()) { 472 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 473 continue; 474 // The first use of a register in a function has cost 1. 475 if (CostPerUseLimit == 1 && !MRI->isPhysRegUsed(PhysReg)) 476 continue; 477 478 float Weight = BestWeight; 479 if (!canEvictInterference(VirtReg, PhysReg, Weight)) 480 continue; 481 482 // This is an eviction candidate. 483 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = " 484 << Weight << '\n'); 485 if (BestPhys && Weight >= BestWeight) 486 continue; 487 488 // Best so far. 489 BestPhys = PhysReg; 490 BestWeight = Weight; 491 // Stop if the hint can be used. 492 if (Order.isHint(PhysReg)) 493 break; 494 } 495 496 if (!BestPhys) 497 return 0; 498 499 // We will evict interference. Make sure that VirtReg has a cascade number, 500 // and assign that cascade number to every evicted register. These live 501 // ranges than then only be evicted by a newer cascade, preventing infinite 502 // loops. 503 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 504 if (!Cascade) 505 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 506 507 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) 508 << " interference: Cascade " << Cascade << '\n'); 509 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) { 510 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 511 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 512 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 513 LiveInterval *Intf = Q.interferingVRegs()[i]; 514 unassign(*Intf, VRM->getPhys(Intf->reg)); 515 assert(ExtraRegInfo[Intf->reg].Cascade < Cascade && 516 "Cannot decrease cascade number, illegal eviction"); 517 ExtraRegInfo[Intf->reg].Cascade = Cascade; 518 ++NumEvicted; 519 NewVRegs.push_back(Intf); 520 } 521 } 522 return BestPhys; 523} 524 525 526//===----------------------------------------------------------------------===// 527// Region Splitting 528//===----------------------------------------------------------------------===// 529 530/// addSplitConstraints - Fill out the SplitConstraints vector based on the 531/// interference pattern in Physreg and its aliases. Add the constraints to 532/// SpillPlacement and return the static cost of this split in Cost, assuming 533/// that all preferences in SplitConstraints are met. 534/// Return false if there are no bundles with positive bias. 535bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 536 float &Cost) { 537 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 538 539 // Reset interference dependent info. 540 SplitConstraints.resize(UseBlocks.size()); 541 float StaticCost = 0; 542 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 543 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 544 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 545 546 BC.Number = BI.MBB->getNumber(); 547 Intf.moveToBlock(BC.Number); 548 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 549 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 550 551 if (!Intf.hasInterference()) 552 continue; 553 554 // Number of spill code instructions to insert. 555 unsigned Ins = 0; 556 557 // Interference for the live-in value. 558 if (BI.LiveIn) { 559 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 560 BC.Entry = SpillPlacement::MustSpill, ++Ins; 561 else if (Intf.first() < BI.FirstUse) 562 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 563 else if (Intf.first() < BI.LastUse) 564 ++Ins; 565 } 566 567 // Interference for the live-out value. 568 if (BI.LiveOut) { 569 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 570 BC.Exit = SpillPlacement::MustSpill, ++Ins; 571 else if (Intf.last() > BI.LastUse) 572 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 573 else if (Intf.last() > BI.FirstUse) 574 ++Ins; 575 } 576 577 // Accumulate the total frequency of inserted spill code. 578 if (Ins) 579 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 580 } 581 Cost = StaticCost; 582 583 // Add constraints for use-blocks. Note that these are the only constraints 584 // that may add a positive bias, it is downhill from here. 585 SpillPlacer->addConstraints(SplitConstraints); 586 return SpillPlacer->scanActiveBundles(); 587} 588 589 590/// addThroughConstraints - Add constraints and links to SpillPlacer from the 591/// live-through blocks in Blocks. 592void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 593 ArrayRef<unsigned> Blocks) { 594 const unsigned GroupSize = 8; 595 SpillPlacement::BlockConstraint BCS[GroupSize]; 596 unsigned TBS[GroupSize]; 597 unsigned B = 0, T = 0; 598 599 for (unsigned i = 0; i != Blocks.size(); ++i) { 600 unsigned Number = Blocks[i]; 601 Intf.moveToBlock(Number); 602 603 if (!Intf.hasInterference()) { 604 assert(T < GroupSize && "Array overflow"); 605 TBS[T] = Number; 606 if (++T == GroupSize) { 607 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T)); 608 T = 0; 609 } 610 continue; 611 } 612 613 assert(B < GroupSize && "Array overflow"); 614 BCS[B].Number = Number; 615 616 // Interference for the live-in value. 617 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 618 BCS[B].Entry = SpillPlacement::MustSpill; 619 else 620 BCS[B].Entry = SpillPlacement::PrefSpill; 621 622 // Interference for the live-out value. 623 if (Intf.last() >= SA->getLastSplitPoint(Number)) 624 BCS[B].Exit = SpillPlacement::MustSpill; 625 else 626 BCS[B].Exit = SpillPlacement::PrefSpill; 627 628 if (++B == GroupSize) { 629 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 630 SpillPlacer->addConstraints(Array); 631 B = 0; 632 } 633 } 634 635 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 636 SpillPlacer->addConstraints(Array); 637 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T)); 638} 639 640void RAGreedy::growRegion(GlobalSplitCandidate &Cand, 641 InterferenceCache::Cursor Intf) { 642 // Keep track of through blocks that have not been added to SpillPlacer. 643 BitVector Todo = SA->getThroughBlocks(); 644 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 645 unsigned AddedTo = 0; 646#ifndef NDEBUG 647 unsigned Visited = 0; 648#endif 649 650 for (;;) { 651 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 652 // Find new through blocks in the periphery of PrefRegBundles. 653 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 654 unsigned Bundle = NewBundles[i]; 655 // Look at all blocks connected to Bundle in the full graph. 656 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 657 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 658 I != E; ++I) { 659 unsigned Block = *I; 660 if (!Todo.test(Block)) 661 continue; 662 Todo.reset(Block); 663 // This is a new through block. Add it to SpillPlacer later. 664 ActiveBlocks.push_back(Block); 665#ifndef NDEBUG 666 ++Visited; 667#endif 668 } 669 } 670 // Any new blocks to add? 671 if (ActiveBlocks.size() == AddedTo) 672 break; 673 addThroughConstraints(Intf, 674 ArrayRef<unsigned>(ActiveBlocks).slice(AddedTo)); 675 AddedTo = ActiveBlocks.size(); 676 677 // Perhaps iterating can enable more bundles? 678 SpillPlacer->iterate(); 679 } 680 DEBUG(dbgs() << ", v=" << Visited); 681} 682 683/// calcSpillCost - Compute how expensive it would be to split the live range in 684/// SA around all use blocks instead of forming bundle regions. 685float RAGreedy::calcSpillCost() { 686 float Cost = 0; 687 const LiveInterval &LI = SA->getParent(); 688 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 689 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 690 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 691 unsigned Number = BI.MBB->getNumber(); 692 // We normally only need one spill instruction - a load or a store. 693 Cost += SpillPlacer->getBlockFrequency(Number); 694 695 // Unless the value is redefined in the block. 696 if (BI.LiveIn && BI.LiveOut) { 697 SlotIndex Start, Stop; 698 tie(Start, Stop) = Indexes->getMBBRange(Number); 699 LiveInterval::const_iterator I = LI.find(Start); 700 assert(I != LI.end() && "Expected live-in value"); 701 // Is there a different live-out value? If so, we need an extra spill 702 // instruction. 703 if (I->end < Stop) 704 Cost += SpillPlacer->getBlockFrequency(Number); 705 } 706 } 707 return Cost; 708} 709 710/// calcGlobalSplitCost - Return the global split cost of following the split 711/// pattern in LiveBundles. This cost should be added to the local cost of the 712/// interference pattern in SplitConstraints. 713/// 714float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, 715 InterferenceCache::Cursor Intf) { 716 float GlobalCost = 0; 717 const BitVector &LiveBundles = Cand.LiveBundles; 718 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 719 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 720 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 721 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 722 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 723 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 724 unsigned Ins = 0; 725 726 if (BI.LiveIn) 727 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 728 if (BI.LiveOut) 729 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 730 if (Ins) 731 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 732 } 733 734 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 735 unsigned Number = Cand.ActiveBlocks[i]; 736 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 737 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 738 if (!RegIn && !RegOut) 739 continue; 740 if (RegIn && RegOut) { 741 // We need double spill code if this block has interference. 742 Intf.moveToBlock(Number); 743 if (Intf.hasInterference()) 744 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 745 continue; 746 } 747 // live-in / stack-out or stack-in live-out. 748 GlobalCost += SpillPlacer->getBlockFrequency(Number); 749 } 750 return GlobalCost; 751} 752 753/// splitAroundRegion - Split VirtReg around the region determined by 754/// LiveBundles. Make an effort to avoid interference from PhysReg. 755/// 756/// The 'register' interval is going to contain as many uses as possible while 757/// avoiding interference. The 'stack' interval is the complement constructed by 758/// SplitEditor. It will contain the rest. 759/// 760void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, 761 GlobalSplitCandidate &Cand, 762 SmallVectorImpl<LiveInterval*> &NewVRegs) { 763 const BitVector &LiveBundles = Cand.LiveBundles; 764 765 DEBUG({ 766 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI) 767 << " with bundles"; 768 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i)) 769 dbgs() << " EB#" << i; 770 dbgs() << ".\n"; 771 }); 772 773 InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg); 774 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 775 SE->reset(LREdit); 776 777 // Create the main cross-block interval. 778 const unsigned MainIntv = SE->openIntv(); 779 780 // First handle all the blocks with uses. 781 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 782 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 783 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 784 bool RegIn = BI.LiveIn && 785 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; 786 bool RegOut = BI.LiveOut && 787 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; 788 789 // Create separate intervals for isolated blocks with multiple uses. 790 // 791 // |---o---o---| Enter and leave on the stack. 792 // ____-----____ Create local interval for uses. 793 // 794 // | o---o---| Defined in block, leave on stack. 795 // -----____ Create local interval for uses. 796 // 797 // |---o---x | Enter on stack, killed in block. 798 // ____----- Create local interval for uses. 799 // 800 if (!RegIn && !RegOut) { 801 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 802 if (!BI.isOneInstr()) { 803 SE->splitSingleBlock(BI); 804 SE->selectIntv(MainIntv); 805 } 806 continue; 807 } 808 809 SlotIndex Start, Stop; 810 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB); 811 Intf.moveToBlock(BI.MBB->getNumber()); 812 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0) 813 << (BI.LiveIn ? (RegIn ? " => " : " -> ") : " ") 814 << "BB#" << BI.MBB->getNumber() 815 << (BI.LiveOut ? (RegOut ? " => " : " -> ") : " ") 816 << " EB#" << Bundles->getBundle(BI.MBB->getNumber(), 1) 817 << " [" << Start << ';' 818 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop 819 << ") uses [" << BI.FirstUse << ';' << BI.LastUse 820 << ") intf [" << Intf.first() << ';' << Intf.last() << ')'); 821 822 // The interference interval should either be invalid or overlap MBB. 823 assert((!Intf.hasInterference() || Intf.first() < Stop) 824 && "Bad interference"); 825 assert((!Intf.hasInterference() || Intf.last() > Start) 826 && "Bad interference"); 827 828 // We are now ready to decide where to split in the current block. There 829 // are many variables guiding the decision: 830 // 831 // - RegIn / RegOut: The global splitting algorithm's decisions for our 832 // ingoing and outgoing bundles. 833 // 834 // - BI.BlockIn / BI.BlockOut: Is the live range live-in and/or live-out 835 // from this block. 836 // 837 // - Intf.hasInterference(): Is there interference in this block. 838 // 839 // - Intf.first() / Inft.last(): The range of interference. 840 // 841 // The live range should be split such that MainIntv is live-in when RegIn 842 // is set, and live-out when RegOut is set. MainIntv should never overlap 843 // the interference, and the stack interval should never have more than one 844 // use per block. 845 846 // No splits can be inserted after LastSplitPoint, overlap instead. 847 SlotIndex LastSplitPoint = Stop; 848 if (BI.LiveOut) 849 LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber()); 850 851 // At this point, we know that either RegIn or RegOut is set. We dealt with 852 // the all-stack case above. 853 854 // Blocks without interference are relatively easy. 855 if (!Intf.hasInterference()) { 856 DEBUG(dbgs() << ", no interference.\n"); 857 SE->selectIntv(MainIntv); 858 // The easiest case has MainIntv live through. 859 // 860 // |---o---o---| Live-in, live-out. 861 // ============= Use MainIntv everywhere. 862 // 863 SlotIndex From = Start, To = Stop; 864 865 // Block entry. Reload before the first use if MainIntv is not live-in. 866 // 867 // |---o-- Enter on stack. 868 // ____=== Reload before first use. 869 // 870 // | o-- Defined in block. 871 // === Use MainIntv from def. 872 // 873 if (!RegIn) 874 From = SE->enterIntvBefore(BI.FirstUse); 875 876 // Block exit. Handle cases where MainIntv is not live-out. 877 if (!BI.LiveOut) 878 // 879 // --x | Killed in block. 880 // === Use MainIntv up to kill. 881 // 882 To = SE->leaveIntvAfter(BI.LastUse); 883 else if (!RegOut) { 884 // 885 // --o---| Live-out on stack. 886 // ===____ Use MainIntv up to last use, switch to stack. 887 // 888 // -----o| Live-out on stack, last use after last split point. 889 // ====== Extend MainIntv to last use, overlapping. 890 // \____ Copy to stack interval before last split point. 891 // 892 if (BI.LastUse < LastSplitPoint) 893 To = SE->leaveIntvAfter(BI.LastUse); 894 else { 895 // The last use is after the last split point, it is probably an 896 // indirect branch. 897 To = SE->leaveIntvBefore(LastSplitPoint); 898 // Run a double interval from the split to the last use. This makes 899 // it possible to spill the complement without affecting the indirect 900 // branch. 901 SE->overlapIntv(To, BI.LastUse); 902 } 903 } 904 905 // Paint in MainIntv liveness for this block. 906 SE->useIntv(From, To); 907 continue; 908 } 909 910 // We are now looking at a block with interference, and we know that either 911 // RegIn or RegOut is set. 912 assert(Intf.hasInterference() && (RegIn || RegOut) && "Bad invariant"); 913 914 // If the live range is not live through the block, it is possible that the 915 // interference doesn't even overlap. Deal with those cases first. Since 916 // no copy instructions are required, we can tolerate interference starting 917 // or ending at the same instruction that kills or defines our live range. 918 919 // Live-in, killed before interference. 920 // 921 // ~~~ Interference after kill. 922 // |---o---x | Killed in block. 923 // ========= Use MainIntv everywhere. 924 // 925 if (RegIn && !BI.LiveOut && BI.LastUse <= Intf.first()) { 926 DEBUG(dbgs() << ", live-in, killed before interference.\n"); 927 SE->selectIntv(MainIntv); 928 SlotIndex To = SE->leaveIntvAfter(BI.LastUse); 929 SE->useIntv(Start, To); 930 continue; 931 } 932 933 // Live-out, defined after interference. 934 // 935 // ~~~ Interference before def. 936 // | o---o---| Defined in block. 937 // ========= Use MainIntv everywhere. 938 // 939 if (RegOut && !BI.LiveIn && BI.FirstUse >= Intf.last()) { 940 DEBUG(dbgs() << ", live-out, defined after interference.\n"); 941 SE->selectIntv(MainIntv); 942 SlotIndex From = SE->enterIntvBefore(BI.FirstUse); 943 SE->useIntv(From, Stop); 944 continue; 945 } 946 947 // The interference is now known to overlap the live range, but it may 948 // still be easy to avoid if all the interference is on one side of the 949 // uses, and we enter or leave on the stack. 950 951 // Live-out on stack, interference after last use. 952 // 953 // ~~~ Interference after last use. 954 // |---o---o---| Live-out on stack. 955 // =========____ Leave MainIntv after last use. 956 // 957 // ~ Interference after last use. 958 // |---o---o--o| Live-out on stack, late last use. 959 // ============ Copy to stack after LSP, overlap MainIntv. 960 // \_____ Stack interval is live-out. 961 // 962 if (!RegOut && Intf.first() > BI.LastUse.getBoundaryIndex()) { 963 assert(RegIn && "Stack-in, stack-out should already be handled"); 964 if (BI.LastUse < LastSplitPoint) { 965 DEBUG(dbgs() << ", live-in, stack-out, interference after last use.\n"); 966 SE->selectIntv(MainIntv); 967 SlotIndex To = SE->leaveIntvAfter(BI.LastUse); 968 assert(To <= Intf.first() && "Expected to avoid interference"); 969 SE->useIntv(Start, To); 970 } else { 971 DEBUG(dbgs() << ", live-in, stack-out, avoid last split point\n"); 972 SE->selectIntv(MainIntv); 973 SlotIndex To = SE->leaveIntvBefore(LastSplitPoint); 974 assert(To <= Intf.first() && "Expected to avoid interference"); 975 SE->overlapIntv(To, BI.LastUse); 976 SE->useIntv(Start, To); 977 } 978 continue; 979 } 980 981 // Live-in on stack, interference before first use. 982 // 983 // ~~~ Interference before first use. 984 // |---o---o---| Live-in on stack. 985 // ____========= Enter MainIntv before first use. 986 // 987 if (!RegIn && Intf.last() < BI.FirstUse.getBaseIndex()) { 988 assert(RegOut && "Stack-in, stack-out should already be handled"); 989 DEBUG(dbgs() << ", stack-in, interference before first use.\n"); 990 SE->selectIntv(MainIntv); 991 SlotIndex From = SE->enterIntvBefore(BI.FirstUse); 992 assert(From >= Intf.last() && "Expected to avoid interference"); 993 SE->useIntv(From, Stop); 994 continue; 995 } 996 997 // The interference is overlapping somewhere we wanted to use MainIntv. That 998 // means we need to create a local interval that can be allocated a 999 // different register. 1000 unsigned LocalIntv = SE->openIntv(); 1001 DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n"); 1002 1003 // We may be creating copies directly between MainIntv and LocalIntv, 1004 // bypassing the stack interval. When we do that, we should never use the 1005 // leaveIntv* methods as they define values in the stack interval. By 1006 // starting from the end of the block and working our way backwards, we can 1007 // get by with only enterIntv* methods. 1008 // 1009 // When selecting split points, we generally try to maximize the stack 1010 // interval as long at it contains no uses, maximize the main interval as 1011 // long as it doesn't overlap interference, and minimize the local interval 1012 // that we don't know how to allocate yet. 1013 1014 // Handle the block exit, set Pos to the first handled slot. 1015 SlotIndex Pos = BI.LastUse; 1016 if (RegOut) { 1017 assert(Intf.last() < LastSplitPoint && "Cannot be live-out in register"); 1018 // Create a snippet of MainIntv that is live-out. 1019 // 1020 // ~~~ Interference overlapping uses. 1021 // --o---| Live-out in MainIntv. 1022 // ----=== Switch from LocalIntv to MainIntv after interference. 1023 // 1024 SE->selectIntv(MainIntv); 1025 Pos = SE->enterIntvAfter(Intf.last()); 1026 assert(Pos >= Intf.last() && "Expected to avoid interference"); 1027 SE->useIntv(Pos, Stop); 1028 SE->selectIntv(LocalIntv); 1029 } else if (BI.LiveOut) { 1030 if (BI.LastUse < LastSplitPoint) { 1031 // Live-out on the stack. 1032 // 1033 // ~~~ Interference overlapping uses. 1034 // --o---| Live-out on stack. 1035 // ---____ Switch from LocalIntv to stack after last use. 1036 // 1037 Pos = SE->leaveIntvAfter(BI.LastUse); 1038 } else { 1039 // Live-out on the stack, last use after last split point. 1040 // 1041 // ~~~ Interference overlapping uses. 1042 // --o--o| Live-out on stack, late use. 1043 // ------ Copy to stack before LSP, overlap LocalIntv. 1044 // \__ 1045 // 1046 Pos = SE->leaveIntvBefore(LastSplitPoint); 1047 // We need to overlap LocalIntv so it can reach LastUse. 1048 SE->overlapIntv(Pos, BI.LastUse); 1049 } 1050 } 1051 1052 // When not live-out, leave Pos at LastUse. We have handled everything from 1053 // Pos to Stop. Find the starting point for LocalIntv. 1054 assert(SE->currentIntv() == LocalIntv && "Expecting local interval"); 1055 1056 if (RegIn) { 1057 assert(Start < Intf.first() && "Cannot be live-in with interference"); 1058 // Live-in in MainIntv, only use LocalIntv for interference. 1059 // 1060 // ~~~ Interference overlapping uses. 1061 // |---o-- Live-in in MainIntv. 1062 // ====--- Switch to LocalIntv before interference. 1063 // 1064 SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, Intf.first())); 1065 assert(Switch <= Intf.first() && "Expected to avoid interference"); 1066 SE->useIntv(Switch, Pos); 1067 SE->selectIntv(MainIntv); 1068 SE->useIntv(Start, Switch); 1069 } else { 1070 // Live-in on stack, enter LocalIntv before first use. 1071 // 1072 // ~~~ Interference overlapping uses. 1073 // |---o-- Live-in in MainIntv. 1074 // ____--- Reload to LocalIntv before interference. 1075 // 1076 // Defined in block. 1077 // 1078 // ~~~ Interference overlapping uses. 1079 // | o-- Defined in block. 1080 // --- Begin LocalIntv at first use. 1081 // 1082 SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, BI.FirstUse)); 1083 SE->useIntv(Switch, Pos); 1084 } 1085 } 1086 1087 // Handle live-through blocks. 1088 SE->selectIntv(MainIntv); 1089 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1090 unsigned Number = Cand.ActiveBlocks[i]; 1091 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1092 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1093 DEBUG(dbgs() << "Live through BB#" << Number << '\n'); 1094 if (RegIn && RegOut) { 1095 Intf.moveToBlock(Number); 1096 if (!Intf.hasInterference()) { 1097 SE->useIntv(Indexes->getMBBStartIdx(Number), 1098 Indexes->getMBBEndIdx(Number)); 1099 continue; 1100 } 1101 } 1102 MachineBasicBlock *MBB = MF->getBlockNumbered(Number); 1103 if (RegIn) 1104 SE->leaveIntvAtTop(*MBB); 1105 if (RegOut) 1106 SE->enterIntvAtEnd(*MBB); 1107 } 1108 1109 ++NumGlobalSplits; 1110 1111 SmallVector<unsigned, 8> IntvMap; 1112 SE->finish(&IntvMap); 1113 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1114 1115 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1116 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1117 1118 // Sort out the new intervals created by splitting. We get four kinds: 1119 // - Remainder intervals should not be split again. 1120 // - Candidate intervals can be assigned to Cand.PhysReg. 1121 // - Block-local splits are candidates for local splitting. 1122 // - DCE leftovers should go back on the queue. 1123 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1124 LiveInterval &Reg = *LREdit.get(i); 1125 1126 // Ignore old intervals from DCE. 1127 if (getStage(Reg) != RS_New) 1128 continue; 1129 1130 // Remainder interval. Don't try splitting again, spill if it doesn't 1131 // allocate. 1132 if (IntvMap[i] == 0) { 1133 setStage(Reg, RS_Global); 1134 continue; 1135 } 1136 1137 // Main interval. Allow repeated splitting as long as the number of live 1138 // blocks is strictly decreasing. 1139 if (IntvMap[i] == MainIntv) { 1140 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1141 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1142 << " blocks as original.\n"); 1143 // Don't allow repeated splitting as a safe guard against looping. 1144 setStage(Reg, RS_Global); 1145 } 1146 continue; 1147 } 1148 1149 // Other intervals are treated as new. This includes local intervals created 1150 // for blocks with multiple uses, and anything created by DCE. 1151 } 1152 1153 if (VerifyEnabled) 1154 MF->verify(this, "After splitting live range around region"); 1155} 1156 1157unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1158 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1159 float BestCost = Hysteresis * calcSpillCost(); 1160 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1161 const unsigned NoCand = ~0u; 1162 unsigned BestCand = NoCand; 1163 1164 Order.rewind(); 1165 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) { 1166 if (GlobalCand.size() <= Cand) 1167 GlobalCand.resize(Cand+1); 1168 GlobalCand[Cand].reset(PhysReg); 1169 1170 SpillPlacer->prepare(GlobalCand[Cand].LiveBundles); 1171 float Cost; 1172 InterferenceCache::Cursor Intf(IntfCache, PhysReg); 1173 if (!addSplitConstraints(Intf, Cost)) { 1174 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1175 continue; 1176 } 1177 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1178 if (Cost >= BestCost) { 1179 DEBUG({ 1180 if (BestCand == NoCand) 1181 dbgs() << " worse than no bundles\n"; 1182 else 1183 dbgs() << " worse than " 1184 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1185 }); 1186 continue; 1187 } 1188 growRegion(GlobalCand[Cand], Intf); 1189 1190 SpillPlacer->finish(); 1191 1192 // No live bundles, defer to splitSingleBlocks(). 1193 if (!GlobalCand[Cand].LiveBundles.any()) { 1194 DEBUG(dbgs() << " no bundles.\n"); 1195 continue; 1196 } 1197 1198 Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf); 1199 DEBUG({ 1200 dbgs() << ", total = " << Cost << " with bundles"; 1201 for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0; 1202 i = GlobalCand[Cand].LiveBundles.find_next(i)) 1203 dbgs() << " EB#" << i; 1204 dbgs() << ".\n"; 1205 }); 1206 if (Cost < BestCost) { 1207 BestCand = Cand; 1208 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1209 } 1210 } 1211 1212 if (BestCand == NoCand) 1213 return 0; 1214 1215 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs); 1216 return 0; 1217} 1218 1219 1220//===----------------------------------------------------------------------===// 1221// Local Splitting 1222//===----------------------------------------------------------------------===// 1223 1224 1225/// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1226/// in order to use PhysReg between two entries in SA->UseSlots. 1227/// 1228/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1229/// 1230void RAGreedy::calcGapWeights(unsigned PhysReg, 1231 SmallVectorImpl<float> &GapWeight) { 1232 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1233 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1234 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1235 const unsigned NumGaps = Uses.size()-1; 1236 1237 // Start and end points for the interference check. 1238 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse; 1239 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse; 1240 1241 GapWeight.assign(NumGaps, 0.0f); 1242 1243 // Add interference from each overlapping register. 1244 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 1245 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1246 .checkInterference()) 1247 continue; 1248 1249 // We know that VirtReg is a continuous interval from FirstUse to LastUse, 1250 // so we don't need InterferenceQuery. 1251 // 1252 // Interference that overlaps an instruction is counted in both gaps 1253 // surrounding the instruction. The exception is interference before 1254 // StartIdx and after StopIdx. 1255 // 1256 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx); 1257 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1258 // Skip the gaps before IntI. 1259 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1260 if (++Gap == NumGaps) 1261 break; 1262 if (Gap == NumGaps) 1263 break; 1264 1265 // Update the gaps covered by IntI. 1266 const float weight = IntI.value()->weight; 1267 for (; Gap != NumGaps; ++Gap) { 1268 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1269 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1270 break; 1271 } 1272 if (Gap == NumGaps) 1273 break; 1274 } 1275 } 1276} 1277 1278/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1279/// basic block. 1280/// 1281unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1282 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1283 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1284 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1285 1286 // Note that it is possible to have an interval that is live-in or live-out 1287 // while only covering a single block - A phi-def can use undef values from 1288 // predecessors, and the block could be a single-block loop. 1289 // We don't bother doing anything clever about such a case, we simply assume 1290 // that the interval is continuous from FirstUse to LastUse. We should make 1291 // sure that we don't do anything illegal to such an interval, though. 1292 1293 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1294 if (Uses.size() <= 2) 1295 return 0; 1296 const unsigned NumGaps = Uses.size()-1; 1297 1298 DEBUG({ 1299 dbgs() << "tryLocalSplit: "; 1300 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1301 dbgs() << ' ' << SA->UseSlots[i]; 1302 dbgs() << '\n'; 1303 }); 1304 1305 // Since we allow local split results to be split again, there is a risk of 1306 // creating infinite loops. It is tempting to require that the new live 1307 // ranges have less instructions than the original. That would guarantee 1308 // convergence, but it is too strict. A live range with 3 instructions can be 1309 // split 2+3 (including the COPY), and we want to allow that. 1310 // 1311 // Instead we use these rules: 1312 // 1313 // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the 1314 // noop split, of course). 1315 // 2. Require progress be made for ranges with getStage() >= RS_Local. All 1316 // the new ranges must have fewer instructions than before the split. 1317 // 3. New ranges with the same number of instructions are marked RS_Local, 1318 // smaller ranges are marked RS_New. 1319 // 1320 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1321 // excessive splitting and infinite loops. 1322 // 1323 bool ProgressRequired = getStage(VirtReg) >= RS_Local; 1324 1325 // Best split candidate. 1326 unsigned BestBefore = NumGaps; 1327 unsigned BestAfter = 0; 1328 float BestDiff = 0; 1329 1330 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1331 SmallVector<float, 8> GapWeight; 1332 1333 Order.rewind(); 1334 while (unsigned PhysReg = Order.next()) { 1335 // Keep track of the largest spill weight that would need to be evicted in 1336 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1337 calcGapWeights(PhysReg, GapWeight); 1338 1339 // Try to find the best sequence of gaps to close. 1340 // The new spill weight must be larger than any gap interference. 1341 1342 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1343 unsigned SplitBefore = 0, SplitAfter = 1; 1344 1345 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1346 // It is the spill weight that needs to be evicted. 1347 float MaxGap = GapWeight[0]; 1348 1349 for (;;) { 1350 // Live before/after split? 1351 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1352 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1353 1354 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1355 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1356 << " i=" << MaxGap); 1357 1358 // Stop before the interval gets so big we wouldn't be making progress. 1359 if (!LiveBefore && !LiveAfter) { 1360 DEBUG(dbgs() << " all\n"); 1361 break; 1362 } 1363 // Should the interval be extended or shrunk? 1364 bool Shrink = true; 1365 1366 // How many gaps would the new range have? 1367 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1368 1369 // Legally, without causing looping? 1370 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1371 1372 if (Legal && MaxGap < HUGE_VALF) { 1373 // Estimate the new spill weight. Each instruction reads or writes the 1374 // register. Conservatively assume there are no read-modify-write 1375 // instructions. 1376 // 1377 // Try to guess the size of the new interval. 1378 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1379 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1380 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1381 // Would this split be possible to allocate? 1382 // Never allocate all gaps, we wouldn't be making progress. 1383 DEBUG(dbgs() << " w=" << EstWeight); 1384 if (EstWeight * Hysteresis >= MaxGap) { 1385 Shrink = false; 1386 float Diff = EstWeight - MaxGap; 1387 if (Diff > BestDiff) { 1388 DEBUG(dbgs() << " (best)"); 1389 BestDiff = Hysteresis * Diff; 1390 BestBefore = SplitBefore; 1391 BestAfter = SplitAfter; 1392 } 1393 } 1394 } 1395 1396 // Try to shrink. 1397 if (Shrink) { 1398 if (++SplitBefore < SplitAfter) { 1399 DEBUG(dbgs() << " shrink\n"); 1400 // Recompute the max when necessary. 1401 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1402 MaxGap = GapWeight[SplitBefore]; 1403 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1404 MaxGap = std::max(MaxGap, GapWeight[i]); 1405 } 1406 continue; 1407 } 1408 MaxGap = 0; 1409 } 1410 1411 // Try to extend the interval. 1412 if (SplitAfter >= NumGaps) { 1413 DEBUG(dbgs() << " end\n"); 1414 break; 1415 } 1416 1417 DEBUG(dbgs() << " extend\n"); 1418 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1419 } 1420 } 1421 1422 // Didn't find any candidates? 1423 if (BestBefore == NumGaps) 1424 return 0; 1425 1426 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1427 << '-' << Uses[BestAfter] << ", " << BestDiff 1428 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1429 1430 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1431 SE->reset(LREdit); 1432 1433 SE->openIntv(); 1434 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1435 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1436 SE->useIntv(SegStart, SegStop); 1437 SmallVector<unsigned, 8> IntvMap; 1438 SE->finish(&IntvMap); 1439 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1440 1441 // If the new range has the same number of instructions as before, mark it as 1442 // RS_Local so the next split will be forced to make progress. Otherwise, 1443 // leave the new intervals as RS_New so they can compete. 1444 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1445 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1446 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1447 if (NewGaps >= NumGaps) { 1448 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1449 assert(!ProgressRequired && "Didn't make progress when it was required."); 1450 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1451 if (IntvMap[i] == 1) { 1452 setStage(*LREdit.get(i), RS_Local); 1453 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1454 } 1455 DEBUG(dbgs() << '\n'); 1456 } 1457 ++NumLocalSplits; 1458 1459 return 0; 1460} 1461 1462//===----------------------------------------------------------------------===// 1463// Live Range Splitting 1464//===----------------------------------------------------------------------===// 1465 1466/// trySplit - Try to split VirtReg or one of its interferences, making it 1467/// assignable. 1468/// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1469unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1470 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1471 // Local intervals are handled separately. 1472 if (LIS->intervalIsInOneMBB(VirtReg)) { 1473 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1474 SA->analyze(&VirtReg); 1475 return tryLocalSplit(VirtReg, Order, NewVRegs); 1476 } 1477 1478 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1479 1480 // Don't iterate global splitting. 1481 // Move straight to spilling if this range was produced by a global split. 1482 if (getStage(VirtReg) >= RS_Global) 1483 return 0; 1484 1485 SA->analyze(&VirtReg); 1486 1487 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1488 // coalescer. That may cause the range to become allocatable which means that 1489 // tryRegionSplit won't be making progress. This check should be replaced with 1490 // an assertion when the coalescer is fixed. 1491 if (SA->didRepairRange()) { 1492 // VirtReg has changed, so all cached queries are invalid. 1493 invalidateVirtRegs(); 1494 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1495 return PhysReg; 1496 } 1497 1498 // First try to split around a region spanning multiple blocks. 1499 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1500 if (PhysReg || !NewVRegs.empty()) 1501 return PhysReg; 1502 1503 // Then isolate blocks with multiple uses. 1504 SplitAnalysis::BlockPtrSet Blocks; 1505 if (SA->getMultiUseBlocks(Blocks)) { 1506 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1507 SE->reset(LREdit); 1508 SE->splitSingleBlocks(Blocks); 1509 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global); 1510 if (VerifyEnabled) 1511 MF->verify(this, "After splitting live range around basic blocks"); 1512 } 1513 1514 // Don't assign any physregs. 1515 return 0; 1516} 1517 1518 1519//===----------------------------------------------------------------------===// 1520// Main Entry Point 1521//===----------------------------------------------------------------------===// 1522 1523unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1524 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1525 // First try assigning a free register. 1526 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1527 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1528 return PhysReg; 1529 1530 LiveRangeStage Stage = getStage(VirtReg); 1531 DEBUG(dbgs() << StageName[Stage] 1532 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1533 1534 // Try to evict a less worthy live range, but only for ranges from the primary 1535 // queue. The RS_Second ranges already failed to do this, and they should not 1536 // get a second chance until they have been split. 1537 if (Stage != RS_Second) 1538 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1539 return PhysReg; 1540 1541 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1542 1543 // The first time we see a live range, don't try to split or spill. 1544 // Wait until the second time, when all smaller ranges have been allocated. 1545 // This gives a better picture of the interference to split around. 1546 if (Stage == RS_First) { 1547 setStage(VirtReg, RS_Second); 1548 DEBUG(dbgs() << "wait for second round\n"); 1549 NewVRegs.push_back(&VirtReg); 1550 return 0; 1551 } 1552 1553 // If we couldn't allocate a register from spilling, there is probably some 1554 // invalid inline assembly. The base class wil report it. 1555 if (Stage >= RS_Spill) 1556 return ~0u; 1557 1558 // Try splitting VirtReg or interferences. 1559 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1560 if (PhysReg || !NewVRegs.empty()) 1561 return PhysReg; 1562 1563 // Finally spill VirtReg itself. 1564 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1565 LiveRangeEdit LRE(VirtReg, NewVRegs, this); 1566 spiller().spill(LRE); 1567 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill); 1568 1569 if (VerifyEnabled) 1570 MF->verify(this, "After spilling"); 1571 1572 // The live virtual register requesting allocation was spilled, so tell 1573 // the caller not to allocate anything during this round. 1574 return 0; 1575} 1576 1577bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1578 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1579 << "********** Function: " 1580 << ((Value*)mf.getFunction())->getName() << '\n'); 1581 1582 MF = &mf; 1583 if (VerifyEnabled) 1584 MF->verify(this, "Before greedy register allocator"); 1585 1586 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1587 Indexes = &getAnalysis<SlotIndexes>(); 1588 DomTree = &getAnalysis<MachineDominatorTree>(); 1589 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1590 Loops = &getAnalysis<MachineLoopInfo>(); 1591 LoopRanges = &getAnalysis<MachineLoopRanges>(); 1592 Bundles = &getAnalysis<EdgeBundles>(); 1593 SpillPlacer = &getAnalysis<SpillPlacement>(); 1594 DebugVars = &getAnalysis<LiveDebugVariables>(); 1595 1596 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1597 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1598 ExtraRegInfo.clear(); 1599 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1600 NextCascade = 1; 1601 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI); 1602 1603 allocatePhysRegs(); 1604 addMBBLiveIns(MF); 1605 LIS->addKillFlags(); 1606 1607 // Run rewriter 1608 { 1609 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1610 VRM->rewrite(Indexes); 1611 } 1612 1613 // Write out new DBG_VALUE instructions. 1614 DebugVars->emitDebugValues(VRM); 1615 1616 // The pass output is in VirtRegMap. Release all the transient data. 1617 releaseMemory(); 1618 1619 return true; 1620} 1621