InlineSpiller.cpp revision cb39064e7aee2273da1d00e6b800db84ddc34b6b
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "Spiller.h" 17#include "LiveRangeEdit.h" 18#include "VirtRegMap.h" 19#include "llvm/ADT/Statistic.h" 20#include "llvm/ADT/TinyPtrVector.h" 21#include "llvm/Analysis/AliasAnalysis.h" 22#include "llvm/CodeGen/LiveIntervalAnalysis.h" 23#include "llvm/CodeGen/LiveStackAnalysis.h" 24#include "llvm/CodeGen/MachineDominators.h" 25#include "llvm/CodeGen/MachineFrameInfo.h" 26#include "llvm/CodeGen/MachineFunction.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineRegisterInfo.h" 29#include "llvm/Target/TargetMachine.h" 30#include "llvm/Target/TargetInstrInfo.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/raw_ostream.h" 34 35using namespace llvm; 36 37STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); 38STATISTIC(NumSnippets, "Number of spilled snippets"); 39STATISTIC(NumSpills, "Number of spills inserted"); 40STATISTIC(NumSpillsRemoved, "Number of spills removed"); 41STATISTIC(NumReloads, "Number of reloads inserted"); 42STATISTIC(NumReloadsRemoved, "Number of reloads removed"); 43STATISTIC(NumFolded, "Number of folded stack accesses"); 44STATISTIC(NumFoldedLoads, "Number of folded loads"); 45STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); 46STATISTIC(NumOmitReloadSpill, "Number of omitted spills of reloads"); 47STATISTIC(NumHoists, "Number of hoisted spills"); 48 49static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 50 cl::desc("Disable inline spill hoisting")); 51 52namespace { 53class InlineSpiller : public Spiller { 54 MachineFunctionPass &Pass; 55 MachineFunction &MF; 56 LiveIntervals &LIS; 57 LiveStacks &LSS; 58 AliasAnalysis *AA; 59 MachineDominatorTree &MDT; 60 MachineLoopInfo &Loops; 61 VirtRegMap &VRM; 62 MachineFrameInfo &MFI; 63 MachineRegisterInfo &MRI; 64 const TargetInstrInfo &TII; 65 const TargetRegisterInfo &TRI; 66 67 // Variables that are valid during spill(), but used by multiple methods. 68 LiveRangeEdit *Edit; 69 LiveInterval *StackInt; 70 int StackSlot; 71 unsigned Original; 72 73 // All registers to spill to StackSlot, including the main register. 74 SmallVector<unsigned, 8> RegsToSpill; 75 76 // All COPY instructions to/from snippets. 77 // They are ignored since both operands refer to the same stack slot. 78 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 79 80 // Values that failed to remat at some point. 81 SmallPtrSet<VNInfo*, 8> UsedValues; 82 83public: 84 // Information about a value that was defined by a copy from a sibling 85 // register. 86 struct SibValueInfo { 87 // True when all reaching defs were reloads: No spill is necessary. 88 bool AllDefsAreReloads; 89 90 // True when value is defined by an original PHI not from splitting. 91 bool DefByOrigPHI; 92 93 // True when the COPY defining this value killed its source. 94 bool KillsSource; 95 96 // The preferred register to spill. 97 unsigned SpillReg; 98 99 // The value of SpillReg that should be spilled. 100 VNInfo *SpillVNI; 101 102 // The block where SpillVNI should be spilled. Currently, this must be the 103 // block containing SpillVNI->def. 104 MachineBasicBlock *SpillMBB; 105 106 // A defining instruction that is not a sibling copy or a reload, or NULL. 107 // This can be used as a template for rematerialization. 108 MachineInstr *DefMI; 109 110 // List of values that depend on this one. These values are actually the 111 // same, but live range splitting has placed them in different registers, 112 // or SSA update needed to insert PHI-defs to preserve SSA form. This is 113 // copies of the current value and phi-kills. Usually only phi-kills cause 114 // more than one dependent value. 115 TinyPtrVector<VNInfo*> Deps; 116 117 SibValueInfo(unsigned Reg, VNInfo *VNI) 118 : AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false), 119 SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {} 120 121 // Returns true when a def has been found. 122 bool hasDef() const { return DefByOrigPHI || DefMI; } 123 }; 124 125private: 126 // Values in RegsToSpill defined by sibling copies. 127 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 128 SibValueMap SibValues; 129 130 // Dead defs generated during spilling. 131 SmallVector<MachineInstr*, 8> DeadDefs; 132 133 ~InlineSpiller() {} 134 135public: 136 InlineSpiller(MachineFunctionPass &pass, 137 MachineFunction &mf, 138 VirtRegMap &vrm) 139 : Pass(pass), 140 MF(mf), 141 LIS(pass.getAnalysis<LiveIntervals>()), 142 LSS(pass.getAnalysis<LiveStacks>()), 143 AA(&pass.getAnalysis<AliasAnalysis>()), 144 MDT(pass.getAnalysis<MachineDominatorTree>()), 145 Loops(pass.getAnalysis<MachineLoopInfo>()), 146 VRM(vrm), 147 MFI(*mf.getFrameInfo()), 148 MRI(mf.getRegInfo()), 149 TII(*mf.getTarget().getInstrInfo()), 150 TRI(*mf.getTarget().getRegisterInfo()) {} 151 152 void spill(LiveRangeEdit &); 153 154private: 155 bool isSnippet(const LiveInterval &SnipLI); 156 void collectRegsToSpill(); 157 158 bool isRegToSpill(unsigned Reg) { 159 return std::find(RegsToSpill.begin(), 160 RegsToSpill.end(), Reg) != RegsToSpill.end(); 161 } 162 163 bool isSibling(unsigned Reg); 164 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*); 165 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0); 166 void analyzeSiblingValues(); 167 168 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); 169 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI); 170 171 void markValueUsed(LiveInterval*, VNInfo*); 172 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI); 173 void reMaterializeAll(); 174 175 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 176 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 177 const SmallVectorImpl<unsigned> &Ops, 178 MachineInstr *LoadMI = 0); 179 void insertReload(LiveInterval &NewLI, SlotIndex, 180 MachineBasicBlock::iterator MI); 181 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 182 SlotIndex, MachineBasicBlock::iterator MI); 183 184 void spillAroundUses(unsigned Reg); 185 void spillAll(); 186}; 187} 188 189namespace llvm { 190Spiller *createInlineSpiller(MachineFunctionPass &pass, 191 MachineFunction &mf, 192 VirtRegMap &vrm) { 193 return new InlineSpiller(pass, mf, vrm); 194} 195} 196 197//===----------------------------------------------------------------------===// 198// Snippets 199//===----------------------------------------------------------------------===// 200 201// When spilling a virtual register, we also spill any snippets it is connected 202// to. The snippets are small live ranges that only have a single real use, 203// leftovers from live range splitting. Spilling them enables memory operand 204// folding or tightens the live range around the single use. 205// 206// This minimizes register pressure and maximizes the store-to-load distance for 207// spill slots which can be important in tight loops. 208 209/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 210/// otherwise return 0. 211static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 212 if (!MI->isFullCopy()) 213 return 0; 214 if (MI->getOperand(0).getReg() == Reg) 215 return MI->getOperand(1).getReg(); 216 if (MI->getOperand(1).getReg() == Reg) 217 return MI->getOperand(0).getReg(); 218 return 0; 219} 220 221/// isSnippet - Identify if a live interval is a snippet that should be spilled. 222/// It is assumed that SnipLI is a virtual register with the same original as 223/// Edit->getReg(). 224bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 225 unsigned Reg = Edit->getReg(); 226 227 // A snippet is a tiny live range with only a single instruction using it 228 // besides copies to/from Reg or spills/fills. We accept: 229 // 230 // %snip = COPY %Reg / FILL fi# 231 // %snip = USE %snip 232 // %Reg = COPY %snip / SPILL %snip, fi# 233 // 234 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 235 return false; 236 237 MachineInstr *UseMI = 0; 238 239 // Check that all uses satisfy our criteria. 240 for (MachineRegisterInfo::reg_nodbg_iterator 241 RI = MRI.reg_nodbg_begin(SnipLI.reg); 242 MachineInstr *MI = RI.skipInstruction();) { 243 244 // Allow copies to/from Reg. 245 if (isFullCopyOf(MI, Reg)) 246 continue; 247 248 // Allow stack slot loads. 249 int FI; 250 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 251 continue; 252 253 // Allow stack slot stores. 254 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 255 continue; 256 257 // Allow a single additional instruction. 258 if (UseMI && MI != UseMI) 259 return false; 260 UseMI = MI; 261 } 262 return true; 263} 264 265/// collectRegsToSpill - Collect live range snippets that only have a single 266/// real use. 267void InlineSpiller::collectRegsToSpill() { 268 unsigned Reg = Edit->getReg(); 269 270 // Main register always spills. 271 RegsToSpill.assign(1, Reg); 272 SnippetCopies.clear(); 273 274 // Snippets all have the same original, so there can't be any for an original 275 // register. 276 if (Original == Reg) 277 return; 278 279 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 280 MachineInstr *MI = RI.skipInstruction();) { 281 unsigned SnipReg = isFullCopyOf(MI, Reg); 282 if (!isSibling(SnipReg)) 283 continue; 284 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 285 if (!isSnippet(SnipLI)) 286 continue; 287 SnippetCopies.insert(MI); 288 if (isRegToSpill(SnipReg)) 289 continue; 290 RegsToSpill.push_back(SnipReg); 291 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 292 ++NumSnippets; 293 } 294} 295 296 297//===----------------------------------------------------------------------===// 298// Sibling Values 299//===----------------------------------------------------------------------===// 300 301// After live range splitting, some values to be spilled may be defined by 302// copies from sibling registers. We trace the sibling copies back to the 303// original value if it still exists. We need it for rematerialization. 304// 305// Even when the value can't be rematerialized, we still want to determine if 306// the value has already been spilled, or we may want to hoist the spill from a 307// loop. 308 309bool InlineSpiller::isSibling(unsigned Reg) { 310 return TargetRegisterInfo::isVirtualRegister(Reg) && 311 VRM.getOriginal(Reg) == Original; 312} 313 314#ifndef NDEBUG 315static raw_ostream &operator<<(raw_ostream &OS, 316 const InlineSpiller::SibValueInfo &SVI) { 317 OS << "spill " << PrintReg(SVI.SpillReg) << ':' 318 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def; 319 if (SVI.SpillMBB) 320 OS << " in BB#" << SVI.SpillMBB->getNumber(); 321 if (SVI.AllDefsAreReloads) 322 OS << " all-reloads"; 323 if (SVI.DefByOrigPHI) 324 OS << " orig-phi"; 325 if (SVI.KillsSource) 326 OS << " kill"; 327 OS << " deps["; 328 for (unsigned i = 0, e = SVI.Deps.size(); i != e; ++i) 329 OS << ' ' << SVI.Deps[i]->id << '@' << SVI.Deps[i]->def; 330 OS << " ]"; 331 if (SVI.DefMI) 332 OS << " def: " << *SVI.DefMI; 333 else 334 OS << '\n'; 335 return OS; 336} 337#endif 338 339/// propagateSiblingValue - Propagate the value in SVI to dependents if it is 340/// known. Otherwise remember the dependency for later. 341/// 342/// @param SVI SibValues entry to propagate. 343/// @param VNI Dependent value, or NULL to propagate to all saved dependents. 344void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVI, 345 VNInfo *VNI) { 346 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. 347 TinyPtrVector<VNInfo*> FirstDeps; 348 if (VNI) { 349 FirstDeps.push_back(VNI); 350 SVI->second.Deps.push_back(VNI); 351 } 352 353 // Has the value been completely determined yet? If not, defer propagation. 354 if (!SVI->second.hasDef()) 355 return; 356 357 // Work list of values to propagate. It would be nice to use a SetVector 358 // here, but then we would be forced to use a SmallSet. 359 SmallVector<SibValueMap::iterator, 8> WorkList(1, SVI); 360 SmallPtrSet<VNInfo*, 8> WorkSet; 361 362 do { 363 SVI = WorkList.pop_back_val(); 364 WorkSet.erase(SVI->first); 365 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; 366 VNI = 0; 367 368 SibValueInfo &SV = SVI->second; 369 if (!SV.SpillMBB) 370 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); 371 372 DEBUG(dbgs() << " prop to " << Deps->size() << ": " 373 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); 374 375 assert(SV.hasDef() && "Propagating undefined value"); 376 377 // Should this value be propagated as a preferred spill candidate? We don't 378 // propagate values of registers that are about to spill. 379 bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); 380 unsigned SpillDepth = ~0u; 381 382 for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(), 383 DepE = Deps->end(); DepI != DepE; ++DepI) { 384 SibValueMap::iterator DepSVI = SibValues.find(*DepI); 385 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); 386 SibValueInfo &DepSV = DepSVI->second; 387 if (!DepSV.SpillMBB) 388 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); 389 390 bool Changed = false; 391 392 // Propagate defining instruction. 393 if (!DepSV.hasDef()) { 394 Changed = true; 395 DepSV.DefMI = SV.DefMI; 396 DepSV.DefByOrigPHI = SV.DefByOrigPHI; 397 } 398 399 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of 400 // all predecessors. 401 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { 402 Changed = true; 403 DepSV.AllDefsAreReloads = false; 404 } 405 406 // Propagate best spill value. 407 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { 408 if (SV.SpillMBB == DepSV.SpillMBB) { 409 // DepSV is in the same block. Hoist when dominated. 410 if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { 411 // This is an alternative def earlier in the same MBB. 412 // Hoist the spill as far as possible in SpillMBB. This can ease 413 // register pressure: 414 // 415 // x = def 416 // y = use x 417 // s = copy x 418 // 419 // Hoisting the spill of s to immediately after the def removes the 420 // interference between x and y: 421 // 422 // x = def 423 // spill x 424 // y = use x<kill> 425 // 426 // This hoist only helps when the DepSV copy kills its source. 427 Changed = true; 428 DepSV.SpillReg = SV.SpillReg; 429 DepSV.SpillVNI = SV.SpillVNI; 430 DepSV.SpillMBB = SV.SpillMBB; 431 } 432 } else { 433 // DepSV is in a different block. 434 if (SpillDepth == ~0u) 435 SpillDepth = Loops.getLoopDepth(SV.SpillMBB); 436 437 // Also hoist spills to blocks with smaller loop depth, but make sure 438 // that the new value dominates. Non-phi dependents are always 439 // dominated, phis need checking. 440 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && 441 (!DepSVI->first->isPHIDef() || 442 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { 443 Changed = true; 444 DepSV.SpillReg = SV.SpillReg; 445 DepSV.SpillVNI = SV.SpillVNI; 446 DepSV.SpillMBB = SV.SpillMBB; 447 } 448 } 449 } 450 451 if (!Changed) 452 continue; 453 454 // Something changed in DepSVI. Propagate to dependents. 455 if (WorkSet.insert(DepSVI->first)) 456 WorkList.push_back(DepSVI); 457 458 DEBUG(dbgs() << " update " << DepSVI->first->id << '@' 459 << DepSVI->first->def << " to:\t" << DepSV); 460 } 461 } while (!WorkList.empty()); 462} 463 464/// traceSiblingValue - Trace a value that is about to be spilled back to the 465/// real defining instructions by looking through sibling copies. Always stay 466/// within the range of OrigVNI so the registers are known to carry the same 467/// value. 468/// 469/// Determine if the value is defined by all reloads, so spilling isn't 470/// necessary - the value is already in the stack slot. 471/// 472/// Return a defining instruction that may be a candidate for rematerialization. 473/// 474MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 475 VNInfo *OrigVNI) { 476 // Check if a cached value already exists. 477 SibValueMap::iterator SVI; 478 bool Inserted; 479 tie(SVI, Inserted) = 480 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI))); 481 if (!Inserted) { 482 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':' 483 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second); 484 return SVI->second.DefMI; 485 } 486 487 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 488 << UseVNI->id << '@' << UseVNI->def << '\n'); 489 490 // List of (Reg, VNI) that have been inserted into SibValues, but need to be 491 // processed. 492 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 493 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 494 495 do { 496 unsigned Reg; 497 VNInfo *VNI; 498 tie(Reg, VNI) = WorkList.pop_back_val(); 499 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def 500 << ":\t"); 501 502 // First check if this value has already been computed. 503 SVI = SibValues.find(VNI); 504 assert(SVI != SibValues.end() && "Missing SibValues entry"); 505 506 // Trace through PHI-defs created by live range splitting. 507 if (VNI->isPHIDef()) { 508 // Stop at original PHIs. We don't know the value at the predecessors. 509 if (VNI->def == OrigVNI->def) { 510 DEBUG(dbgs() << "orig phi value\n"); 511 SVI->second.DefByOrigPHI = true; 512 SVI->second.AllDefsAreReloads = false; 513 propagateSiblingValue(SVI); 514 continue; 515 } 516 517 // This is a PHI inserted by live range splitting. We could trace the 518 // live-out value from predecessor blocks, but that search can be very 519 // expensive if there are many predecessors and many more PHIs as 520 // generated by tail-dup when it sees an indirectbr. Instead, look at 521 // all the non-PHI defs that have the same value as OrigVNI. They must 522 // jointly dominate VNI->def. This is not optimal since VNI may actually 523 // be jointly dominated by a smaller subset of defs, so there is a change 524 // we will miss a AllDefsAreReloads optimization. 525 526 // Separate all values dominated by OrigVNI into PHIs and non-PHIs. 527 SmallVector<VNInfo*, 8> PHIs, NonPHIs; 528 LiveInterval &LI = LIS.getInterval(Reg); 529 LiveInterval &OrigLI = LIS.getInterval(Original); 530 531 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); 532 VI != VE; ++VI) { 533 VNInfo *VNI2 = *VI; 534 if (VNI2->isUnused()) 535 continue; 536 if (!OrigLI.containsOneValue() && 537 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI) 538 continue; 539 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def) 540 PHIs.push_back(VNI2); 541 else 542 NonPHIs.push_back(VNI2); 543 } 544 DEBUG(dbgs() << "split phi value, checking " << PHIs.size() 545 << " phi-defs, and " << NonPHIs.size() 546 << " non-phi/orig defs\n"); 547 548 // Create entries for all the PHIs. Don't add them to the worklist, we 549 // are processing all of them in one go here. 550 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) 551 SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i]))); 552 553 // Add every PHI as a dependent of all the non-PHIs. 554 for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) { 555 VNInfo *NonPHI = NonPHIs[i]; 556 // Known value? Try an insertion. 557 tie(SVI, Inserted) = 558 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); 559 // Add all the PHIs as dependents of NonPHI. 560 for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi) 561 SVI->second.Deps.push_back(PHIs[pi]); 562 // This is the first time we see NonPHI, add it to the worklist. 563 if (Inserted) 564 WorkList.push_back(std::make_pair(Reg, NonPHI)); 565 else 566 // Propagate to all inserted PHIs, not just VNI. 567 propagateSiblingValue(SVI); 568 } 569 570 // Next work list item. 571 continue; 572 } 573 574 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 575 assert(MI && "Missing def"); 576 577 // Trace through sibling copies. 578 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 579 if (isSibling(SrcReg)) { 580 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 581 LiveRange *SrcLR = SrcLI.getLiveRangeContaining(VNI->def.getUseIndex()); 582 assert(SrcLR && "Copy from non-existing value"); 583 // Check if this COPY kills its source. 584 SVI->second.KillsSource = (SrcLR->end == VNI->def); 585 VNInfo *SrcVNI = SrcLR->valno; 586 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':' 587 << SrcVNI->id << '@' << SrcVNI->def 588 << " kill=" << unsigned(SVI->second.KillsSource) << '\n'); 589 // Known sibling source value? Try an insertion. 590 tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI, 591 SibValueInfo(SrcReg, SrcVNI))); 592 // This is the first time we see Src, add it to the worklist. 593 if (Inserted) 594 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 595 propagateSiblingValue(SVI, VNI); 596 // Next work list item. 597 continue; 598 } 599 } 600 601 // Track reachable reloads. 602 SVI->second.DefMI = MI; 603 SVI->second.SpillMBB = MI->getParent(); 604 int FI; 605 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 606 DEBUG(dbgs() << "reload\n"); 607 propagateSiblingValue(SVI); 608 // Next work list item. 609 continue; 610 } 611 612 // Potential remat candidate. 613 DEBUG(dbgs() << "def " << *MI); 614 SVI->second.AllDefsAreReloads = false; 615 propagateSiblingValue(SVI); 616 } while (!WorkList.empty()); 617 618 // Look up the value we were looking for. We already did this lokup at the 619 // top of the function, but SibValues may have been invalidated. 620 SVI = SibValues.find(UseVNI); 621 assert(SVI != SibValues.end() && "Didn't compute requested info"); 622 DEBUG(dbgs() << " traced to:\t" << SVI->second); 623 return SVI->second.DefMI; 624} 625 626/// analyzeSiblingValues - Trace values defined by sibling copies back to 627/// something that isn't a sibling copy. 628/// 629/// Keep track of values that may be rematerializable. 630void InlineSpiller::analyzeSiblingValues() { 631 SibValues.clear(); 632 633 // No siblings at all? 634 if (Edit->getReg() == Original) 635 return; 636 637 LiveInterval &OrigLI = LIS.getInterval(Original); 638 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 639 unsigned Reg = RegsToSpill[i]; 640 LiveInterval &LI = LIS.getInterval(Reg); 641 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 642 VE = LI.vni_end(); VI != VE; ++VI) { 643 VNInfo *VNI = *VI; 644 if (VNI->isUnused()) 645 continue; 646 MachineInstr *DefMI = 0; 647 // Check possible sibling copies. 648 if (VNI->isPHIDef() || VNI->getCopy()) { 649 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 650 assert(OrigVNI && "Def outside original live range"); 651 if (OrigVNI->def != VNI->def) 652 DefMI = traceSiblingValue(Reg, VNI, OrigVNI); 653 } 654 if (!DefMI && !VNI->isPHIDef()) 655 DefMI = LIS.getInstructionFromIndex(VNI->def); 656 if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) { 657 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@' 658 << VNI->def << " may remat from " << *DefMI); 659 } 660 } 661 } 662} 663 664/// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 665/// a spill at a better location. 666bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) { 667 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 668 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex()); 669 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy"); 670 SibValueMap::iterator I = SibValues.find(VNI); 671 if (I == SibValues.end()) 672 return false; 673 674 const SibValueInfo &SVI = I->second; 675 676 // Let the normal folding code deal with the boring case. 677 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 678 return false; 679 680 // SpillReg may have been deleted by remat and DCE. 681 if (!LIS.hasInterval(SVI.SpillReg)) { 682 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n'); 683 SibValues.erase(I); 684 return false; 685 } 686 687 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg); 688 if (!SibLI.containsValue(SVI.SpillVNI)) { 689 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n'); 690 SibValues.erase(I); 691 return false; 692 } 693 694 // Conservatively extend the stack slot range to the range of the original 695 // value. We may be able to do better with stack slot coloring by being more 696 // careful here. 697 assert(StackInt && "No stack slot assigned yet."); 698 LiveInterval &OrigLI = LIS.getInterval(Original); 699 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 700 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0)); 701 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 702 << *StackInt << '\n'); 703 704 // Already spilled everywhere. 705 if (SVI.AllDefsAreReloads) { 706 DEBUG(dbgs() << "\tno spill needed: " << SVI); 707 ++NumOmitReloadSpill; 708 return true; 709 } 710 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 711 // any later spills of the same value. 712 eliminateRedundantSpills(SibLI, SVI.SpillVNI); 713 714 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 715 MachineBasicBlock::iterator MII; 716 if (SVI.SpillVNI->isPHIDef()) 717 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 718 else { 719 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 720 assert(DefMI && "Defining instruction disappeared"); 721 MII = DefMI; 722 ++MII; 723 } 724 // Insert spill without kill flag immediately after def. 725 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, 726 MRI.getRegClass(SVI.SpillReg), &TRI); 727 --MII; // Point to store instruction. 728 LIS.InsertMachineInstrInMaps(MII); 729 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 730 731 ++NumSpills; 732 ++NumHoists; 733 return true; 734} 735 736/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any 737/// redundant spills of this value in SLI.reg and sibling copies. 738void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { 739 assert(VNI && "Missing value"); 740 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 741 WorkList.push_back(std::make_pair(&SLI, VNI)); 742 assert(StackInt && "No stack slot assigned yet."); 743 744 do { 745 LiveInterval *LI; 746 tie(LI, VNI) = WorkList.pop_back_val(); 747 unsigned Reg = LI->reg; 748 DEBUG(dbgs() << "Checking redundant spills for " 749 << VNI->id << '@' << VNI->def << " in " << *LI << '\n'); 750 751 // Regs to spill are taken care of. 752 if (isRegToSpill(Reg)) 753 continue; 754 755 // Add all of VNI's live range to StackInt. 756 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0)); 757 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n'); 758 759 // Find all spills and copies of VNI. 760 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); 761 MachineInstr *MI = UI.skipInstruction();) { 762 if (!MI->isCopy() && !MI->getDesc().mayStore()) 763 continue; 764 SlotIndex Idx = LIS.getInstructionIndex(MI); 765 if (LI->getVNInfoAt(Idx) != VNI) 766 continue; 767 768 // Follow sibling copies down the dominator tree. 769 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 770 if (isSibling(DstReg)) { 771 LiveInterval &DstLI = LIS.getInterval(DstReg); 772 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex()); 773 assert(DstVNI && "Missing defined value"); 774 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot"); 775 WorkList.push_back(std::make_pair(&DstLI, DstVNI)); 776 } 777 continue; 778 } 779 780 // Erase spills. 781 int FI; 782 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 783 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 784 // eliminateDeadDefs won't normally remove stores, so switch opcode. 785 MI->setDesc(TII.get(TargetOpcode::KILL)); 786 DeadDefs.push_back(MI); 787 ++NumSpillsRemoved; 788 --NumSpills; 789 } 790 } 791 } while (!WorkList.empty()); 792} 793 794 795//===----------------------------------------------------------------------===// 796// Rematerialization 797//===----------------------------------------------------------------------===// 798 799/// markValueUsed - Remember that VNI failed to rematerialize, so its defining 800/// instruction cannot be eliminated. See through snippet copies 801void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { 802 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList; 803 WorkList.push_back(std::make_pair(LI, VNI)); 804 do { 805 tie(LI, VNI) = WorkList.pop_back_val(); 806 if (!UsedValues.insert(VNI)) 807 continue; 808 809 if (VNI->isPHIDef()) { 810 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 811 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 812 PE = MBB->pred_end(); PI != PE; ++PI) { 813 VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot()); 814 if (PVNI) 815 WorkList.push_back(std::make_pair(LI, PVNI)); 816 } 817 continue; 818 } 819 820 // Follow snippet copies. 821 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 822 if (!SnippetCopies.count(MI)) 823 continue; 824 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); 825 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy"); 826 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex()); 827 assert(SnipVNI && "Snippet undefined before copy"); 828 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); 829 } while (!WorkList.empty()); 830} 831 832/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 833bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, 834 MachineBasicBlock::iterator MI) { 835 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex(); 836 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); 837 838 if (!ParentVNI) { 839 DEBUG(dbgs() << "\tadding <undef> flags: "); 840 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 841 MachineOperand &MO = MI->getOperand(i); 842 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) 843 MO.setIsUndef(); 844 } 845 DEBUG(dbgs() << UseIdx << '\t' << *MI); 846 return true; 847 } 848 849 if (SnippetCopies.count(MI)) 850 return false; 851 852 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy. 853 LiveRangeEdit::Remat RM(ParentVNI); 854 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI); 855 if (SibI != SibValues.end()) 856 RM.OrigMI = SibI->second.DefMI; 857 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) { 858 markValueUsed(&VirtReg, ParentVNI); 859 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 860 return false; 861 } 862 863 // If the instruction also writes VirtReg.reg, it had better not require the 864 // same register for uses and defs. 865 bool Reads, Writes; 866 SmallVector<unsigned, 8> Ops; 867 tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops); 868 if (Writes) { 869 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 870 MachineOperand &MO = MI->getOperand(Ops[i]); 871 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 872 markValueUsed(&VirtReg, ParentVNI); 873 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 874 return false; 875 } 876 } 877 } 878 879 // Before rematerializing into a register for a single instruction, try to 880 // fold a load into the instruction. That avoids allocating a new register. 881 if (RM.OrigMI->getDesc().canFoldAsLoad() && 882 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 883 Edit->markRematerialized(RM.ParentVNI); 884 ++NumFoldedLoads; 885 return true; 886 } 887 888 // Alocate a new register for the remat. 889 LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM); 890 NewLI.markNotSpillable(); 891 892 // Finally we can rematerialize OrigMI before MI. 893 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 894 LIS, TII, TRI); 895 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 896 << *LIS.getInstructionFromIndex(DefIdx)); 897 898 // Replace operands 899 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 900 MachineOperand &MO = MI->getOperand(Ops[i]); 901 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) { 902 MO.setReg(NewLI.reg); 903 MO.setIsKill(); 904 } 905 } 906 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 907 908 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator()); 909 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 910 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 911 ++NumRemats; 912 return true; 913} 914 915/// reMaterializeAll - Try to rematerialize as many uses as possible, 916/// and trim the live ranges after. 917void InlineSpiller::reMaterializeAll() { 918 // analyzeSiblingValues has already tested all relevant defining instructions. 919 if (!Edit->anyRematerializable(LIS, TII, AA)) 920 return; 921 922 UsedValues.clear(); 923 924 // Try to remat before all uses of snippets. 925 bool anyRemat = false; 926 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 927 unsigned Reg = RegsToSpill[i]; 928 LiveInterval &LI = LIS.getInterval(Reg); 929 for (MachineRegisterInfo::use_nodbg_iterator 930 RI = MRI.use_nodbg_begin(Reg); 931 MachineInstr *MI = RI.skipInstruction();) 932 anyRemat |= reMaterializeFor(LI, MI); 933 } 934 if (!anyRemat) 935 return; 936 937 // Remove any values that were completely rematted. 938 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 939 unsigned Reg = RegsToSpill[i]; 940 LiveInterval &LI = LIS.getInterval(Reg); 941 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 942 I != E; ++I) { 943 VNInfo *VNI = *I; 944 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) 945 continue; 946 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 947 MI->addRegisterDead(Reg, &TRI); 948 if (!MI->allDefsAreDead()) 949 continue; 950 DEBUG(dbgs() << "All defs dead: " << *MI); 951 DeadDefs.push_back(MI); 952 } 953 } 954 955 // Eliminate dead code after remat. Note that some snippet copies may be 956 // deleted here. 957 if (DeadDefs.empty()) 958 return; 959 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); 960 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII); 961 962 // Get rid of deleted and empty intervals. 963 for (unsigned i = RegsToSpill.size(); i != 0; --i) { 964 unsigned Reg = RegsToSpill[i-1]; 965 if (!LIS.hasInterval(Reg)) { 966 RegsToSpill.erase(RegsToSpill.begin() + (i - 1)); 967 continue; 968 } 969 LiveInterval &LI = LIS.getInterval(Reg); 970 if (!LI.empty()) 971 continue; 972 Edit->eraseVirtReg(Reg, LIS); 973 RegsToSpill.erase(RegsToSpill.begin() + (i - 1)); 974 } 975 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); 976} 977 978 979//===----------------------------------------------------------------------===// 980// Spilling 981//===----------------------------------------------------------------------===// 982 983/// If MI is a load or store of StackSlot, it can be removed. 984bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 985 int FI = 0; 986 unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); 987 bool IsLoad = InstrReg; 988 if (!IsLoad) 989 InstrReg = TII.isStoreToStackSlot(MI, FI); 990 991 // We have a stack access. Is it the right register and slot? 992 if (InstrReg != Reg || FI != StackSlot) 993 return false; 994 995 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 996 LIS.RemoveMachineInstrFromMaps(MI); 997 MI->eraseFromParent(); 998 999 if (IsLoad) { 1000 ++NumReloadsRemoved; 1001 --NumReloads; 1002 } else { 1003 ++NumSpillsRemoved; 1004 --NumSpills; 1005 } 1006 1007 return true; 1008} 1009 1010/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 1011/// @param MI Instruction using or defining the current register. 1012/// @param Ops Operand indices from readsWritesVirtualRegister(). 1013/// @param LoadMI Load instruction to use instead of stack slot when non-null. 1014/// @return True on success, and MI will be erased. 1015bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 1016 const SmallVectorImpl<unsigned> &Ops, 1017 MachineInstr *LoadMI) { 1018 bool WasCopy = MI->isCopy(); 1019 unsigned ImpReg = 0; 1020 1021 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 1022 // operands. 1023 SmallVector<unsigned, 8> FoldOps; 1024 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1025 unsigned Idx = Ops[i]; 1026 MachineOperand &MO = MI->getOperand(Idx); 1027 if (MO.isImplicit()) { 1028 ImpReg = MO.getReg(); 1029 continue; 1030 } 1031 // FIXME: Teach targets to deal with subregs. 1032 if (MO.getSubReg()) 1033 return false; 1034 // We cannot fold a load instruction into a def. 1035 if (LoadMI && MO.isDef()) 1036 return false; 1037 // Tied use operands should not be passed to foldMemoryOperand. 1038 if (!MI->isRegTiedToDefOperand(Idx)) 1039 FoldOps.push_back(Idx); 1040 } 1041 1042 MachineInstr *FoldMI = 1043 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 1044 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 1045 if (!FoldMI) 1046 return false; 1047 LIS.ReplaceMachineInstrInMaps(MI, FoldMI); 1048 MI->eraseFromParent(); 1049 1050 // TII.foldMemoryOperand may have left some implicit operands on the 1051 // instruction. Strip them. 1052 if (ImpReg) 1053 for (unsigned i = FoldMI->getNumOperands(); i; --i) { 1054 MachineOperand &MO = FoldMI->getOperand(i - 1); 1055 if (!MO.isReg() || !MO.isImplicit()) 1056 break; 1057 if (MO.getReg() == ImpReg) 1058 FoldMI->RemoveOperand(i - 1); 1059 } 1060 1061 DEBUG(dbgs() << "\tfolded: " << LIS.getInstructionIndex(FoldMI) << '\t' 1062 << *FoldMI); 1063 if (!WasCopy) 1064 ++NumFolded; 1065 else if (Ops.front() == 0) 1066 ++NumSpills; 1067 else 1068 ++NumReloads; 1069 return true; 1070} 1071 1072/// insertReload - Insert a reload of NewLI.reg before MI. 1073void InlineSpiller::insertReload(LiveInterval &NewLI, 1074 SlotIndex Idx, 1075 MachineBasicBlock::iterator MI) { 1076 MachineBasicBlock &MBB = *MI->getParent(); 1077 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot, 1078 MRI.getRegClass(NewLI.reg), &TRI); 1079 --MI; // Point to load instruction. 1080 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 1081 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 1082 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 1083 LIS.getVNInfoAllocator()); 1084 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 1085 ++NumReloads; 1086} 1087 1088/// insertSpill - Insert a spill of NewLI.reg after MI. 1089void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 1090 SlotIndex Idx, MachineBasicBlock::iterator MI) { 1091 MachineBasicBlock &MBB = *MI->getParent(); 1092 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot, 1093 MRI.getRegClass(NewLI.reg), &TRI); 1094 --MI; // Point to store instruction. 1095 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 1096 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 1097 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator()); 1098 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 1099 ++NumSpills; 1100} 1101 1102/// spillAroundUses - insert spill code around each use of Reg. 1103void InlineSpiller::spillAroundUses(unsigned Reg) { 1104 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); 1105 LiveInterval &OldLI = LIS.getInterval(Reg); 1106 1107 // Iterate over instructions using Reg. 1108 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 1109 MachineInstr *MI = RI.skipInstruction();) { 1110 1111 // Debug values are not allowed to affect codegen. 1112 if (MI->isDebugValue()) { 1113 // Modify DBG_VALUE now that the value is in a spill slot. 1114 uint64_t Offset = MI->getOperand(1).getImm(); 1115 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1116 DebugLoc DL = MI->getDebugLoc(); 1117 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot, 1118 Offset, MDPtr, DL)) { 1119 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1120 MachineBasicBlock *MBB = MI->getParent(); 1121 MBB->insert(MBB->erase(MI), NewDV); 1122 } else { 1123 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1124 MI->eraseFromParent(); 1125 } 1126 continue; 1127 } 1128 1129 // Ignore copies to/from snippets. We'll delete them. 1130 if (SnippetCopies.count(MI)) 1131 continue; 1132 1133 // Stack slot accesses may coalesce away. 1134 if (coalesceStackAccess(MI, Reg)) 1135 continue; 1136 1137 // Analyze instruction. 1138 bool Reads, Writes; 1139 SmallVector<unsigned, 8> Ops; 1140 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops); 1141 1142 // Find the slot index where this instruction reads and writes OldLI. 1143 // This is usually the def slot, except for tied early clobbers. 1144 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex(); 1145 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex())) 1146 if (SlotIndex::isSameInstr(Idx, VNI->def)) 1147 Idx = VNI->def; 1148 1149 // Check for a sibling copy. 1150 unsigned SibReg = isFullCopyOf(MI, Reg); 1151 if (SibReg && isSibling(SibReg)) { 1152 // This may actually be a copy between snippets. 1153 if (isRegToSpill(SibReg)) { 1154 DEBUG(dbgs() << "Found new snippet copy: " << *MI); 1155 SnippetCopies.insert(MI); 1156 continue; 1157 } 1158 if (Writes) { 1159 // Hoist the spill of a sib-reg copy. 1160 if (hoistSpill(OldLI, MI)) { 1161 // This COPY is now dead, the value is already in the stack slot. 1162 MI->getOperand(0).setIsDead(); 1163 DeadDefs.push_back(MI); 1164 continue; 1165 } 1166 } else { 1167 // This is a reload for a sib-reg copy. Drop spills downstream. 1168 LiveInterval &SibLI = LIS.getInterval(SibReg); 1169 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); 1170 // The COPY will fold to a reload below. 1171 } 1172 } 1173 1174 // Attempt to fold memory ops. 1175 if (foldMemoryOperand(MI, Ops)) 1176 continue; 1177 1178 // Allocate interval around instruction. 1179 // FIXME: Infer regclass from instruction alone. 1180 LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM); 1181 NewLI.markNotSpillable(); 1182 1183 if (Reads) 1184 insertReload(NewLI, Idx, MI); 1185 1186 // Rewrite instruction operands. 1187 bool hasLiveDef = false; 1188 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1189 MachineOperand &MO = MI->getOperand(Ops[i]); 1190 MO.setReg(NewLI.reg); 1191 if (MO.isUse()) { 1192 if (!MI->isRegTiedToDefOperand(Ops[i])) 1193 MO.setIsKill(); 1194 } else { 1195 if (!MO.isDead()) 1196 hasLiveDef = true; 1197 } 1198 } 1199 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI); 1200 1201 // FIXME: Use a second vreg if instruction has no tied ops. 1202 if (Writes) { 1203 if (hasLiveDef) 1204 insertSpill(NewLI, OldLI, Idx, MI); 1205 else { 1206 // This instruction defines a dead value. We don't need to spill it, 1207 // but do create a live range for the dead value. 1208 VNInfo *VNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator()); 1209 NewLI.addRange(LiveRange(Idx, Idx.getNextSlot(), VNI)); 1210 } 1211 } 1212 1213 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 1214 } 1215} 1216 1217/// spillAll - Spill all registers remaining after rematerialization. 1218void InlineSpiller::spillAll() { 1219 // Update LiveStacks now that we are committed to spilling. 1220 if (StackSlot == VirtRegMap::NO_STACK_SLOT) { 1221 StackSlot = VRM.assignVirt2StackSlot(Original); 1222 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); 1223 StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator()); 1224 } else 1225 StackInt = &LSS.getInterval(StackSlot); 1226 1227 if (Original != Edit->getReg()) 1228 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); 1229 1230 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); 1231 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1232 StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]), 1233 StackInt->getValNumInfo(0)); 1234 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); 1235 1236 // Spill around uses of all RegsToSpill. 1237 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1238 spillAroundUses(RegsToSpill[i]); 1239 1240 // Hoisted spills may cause dead code. 1241 if (!DeadDefs.empty()) { 1242 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 1243 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII); 1244 } 1245 1246 // Finally delete the SnippetCopies. 1247 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 1248 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]); 1249 MachineInstr *MI = RI.skipInstruction();) { 1250 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 1251 // FIXME: Do this with a LiveRangeEdit callback. 1252 LIS.RemoveMachineInstrFromMaps(MI); 1253 MI->eraseFromParent(); 1254 } 1255 } 1256 1257 // Delete all spilled registers. 1258 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 1259 Edit->eraseVirtReg(RegsToSpill[i], LIS); 1260} 1261 1262void InlineSpiller::spill(LiveRangeEdit &edit) { 1263 ++NumSpilledRanges; 1264 Edit = &edit; 1265 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 1266 && "Trying to spill a stack slot."); 1267 // Share a stack slot among all descendants of Original. 1268 Original = VRM.getOriginal(edit.getReg()); 1269 StackSlot = VRM.getStackSlot(Original); 1270 StackInt = 0; 1271 1272 DEBUG(dbgs() << "Inline spilling " 1273 << MRI.getRegClass(edit.getReg())->getName() 1274 << ':' << edit.getParent() << "\nFrom original " 1275 << LIS.getInterval(Original) << '\n'); 1276 assert(edit.getParent().isSpillable() && 1277 "Attempting to spill already spilled value."); 1278 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 1279 1280 collectRegsToSpill(); 1281 analyzeSiblingValues(); 1282 reMaterializeAll(); 1283 1284 // Remat may handle everything. 1285 if (!RegsToSpill.empty()) 1286 spillAll(); 1287 1288 Edit->calculateRegClassAndHint(MF, LIS, Loops); 1289} 1290