InlineSpiller.cpp revision c1655e1a3c3a566b91b0513b56d61b58da1e36ba
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "Spiller.h" 17#include "LiveRangeEdit.h" 18#include "VirtRegMap.h" 19#include "llvm/Analysis/AliasAnalysis.h" 20#include "llvm/CodeGen/LiveIntervalAnalysis.h" 21#include "llvm/CodeGen/LiveStackAnalysis.h" 22#include "llvm/CodeGen/MachineDominators.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineLoopInfo.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/Target/TargetMachine.h" 28#include "llvm/Target/TargetInstrInfo.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/raw_ostream.h" 31 32using namespace llvm; 33 34namespace { 35class InlineSpiller : public Spiller { 36 MachineFunctionPass &Pass; 37 MachineFunction &MF; 38 LiveIntervals &LIS; 39 LiveStacks &LSS; 40 AliasAnalysis *AA; 41 MachineDominatorTree &MDT; 42 MachineLoopInfo &Loops; 43 VirtRegMap &VRM; 44 MachineFrameInfo &MFI; 45 MachineRegisterInfo &MRI; 46 const TargetInstrInfo &TII; 47 const TargetRegisterInfo &TRI; 48 49 // Variables that are valid during spill(), but used by multiple methods. 50 LiveRangeEdit *Edit; 51 const TargetRegisterClass *RC; 52 int StackSlot; 53 unsigned Original; 54 55 // All registers to spill to StackSlot, including the main register. 56 SmallVector<unsigned, 8> RegsToSpill; 57 58 // All COPY instructions to/from snippets. 59 // They are ignored since both operands refer to the same stack slot. 60 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 61 62 // Values that failed to remat at some point. 63 SmallPtrSet<VNInfo*, 8> UsedValues; 64 65 // Information about a value that was defined by a copy from a sibling 66 // register. 67 struct SibValueInfo { 68 // True when all reaching defs were reloads: No spill is necessary. 69 bool AllDefsAreReloads; 70 71 // The preferred register to spill. 72 unsigned SpillReg; 73 74 // The value of SpillReg that should be spilled. 75 VNInfo *SpillVNI; 76 77 // A defining instruction that is not a sibling copy or a reload, or NULL. 78 // This can be used as a template for rematerialization. 79 MachineInstr *DefMI; 80 81 SibValueInfo(unsigned Reg, VNInfo *VNI) 82 : AllDefsAreReloads(false), SpillReg(Reg), SpillVNI(VNI), DefMI(0) {} 83 }; 84 85 // Values in RegsToSpill defined by sibling copies. 86 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap; 87 SibValueMap SibValues; 88 89 // Dead defs generated during spilling. 90 SmallVector<MachineInstr*, 8> DeadDefs; 91 92 ~InlineSpiller() {} 93 94public: 95 InlineSpiller(MachineFunctionPass &pass, 96 MachineFunction &mf, 97 VirtRegMap &vrm) 98 : Pass(pass), 99 MF(mf), 100 LIS(pass.getAnalysis<LiveIntervals>()), 101 LSS(pass.getAnalysis<LiveStacks>()), 102 AA(&pass.getAnalysis<AliasAnalysis>()), 103 MDT(pass.getAnalysis<MachineDominatorTree>()), 104 Loops(pass.getAnalysis<MachineLoopInfo>()), 105 VRM(vrm), 106 MFI(*mf.getFrameInfo()), 107 MRI(mf.getRegInfo()), 108 TII(*mf.getTarget().getInstrInfo()), 109 TRI(*mf.getTarget().getRegisterInfo()) {} 110 111 void spill(LiveRangeEdit &); 112 113private: 114 bool isSnippet(const LiveInterval &SnipLI); 115 void collectRegsToSpill(); 116 117 bool isRegToSpill(unsigned Reg) { 118 return std::find(RegsToSpill.begin(), 119 RegsToSpill.end(), Reg) != RegsToSpill.end(); 120 } 121 122 bool isSibling(unsigned Reg); 123 void traceSiblingValue(unsigned, VNInfo*, VNInfo*); 124 void analyzeSiblingValues(); 125 126 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI); 127 void eliminateRedundantSpills(unsigned Reg, VNInfo *VNI); 128 129 bool reMaterializeFor(MachineBasicBlock::iterator MI); 130 void reMaterializeAll(); 131 132 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 133 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 134 const SmallVectorImpl<unsigned> &Ops, 135 MachineInstr *LoadMI = 0); 136 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 137 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 138 MachineBasicBlock::iterator MI); 139 140 void spillAroundUses(unsigned Reg); 141}; 142} 143 144namespace llvm { 145Spiller *createInlineSpiller(MachineFunctionPass &pass, 146 MachineFunction &mf, 147 VirtRegMap &vrm) { 148 return new InlineSpiller(pass, mf, vrm); 149} 150} 151 152//===----------------------------------------------------------------------===// 153// Snippets 154//===----------------------------------------------------------------------===// 155 156// When spilling a virtual register, we also spill any snippets it is connected 157// to. The snippets are small live ranges that only have a single real use, 158// leftovers from live range splitting. Spilling them enables memory operand 159// folding or tightens the live range around the single use. 160// 161// This minimizes register pressure and maximizes the store-to-load distance for 162// spill slots which can be important in tight loops. 163 164/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 165/// otherwise return 0. 166static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 167 if (!MI->isCopy()) 168 return 0; 169 if (MI->getOperand(0).getSubReg() != 0) 170 return 0; 171 if (MI->getOperand(1).getSubReg() != 0) 172 return 0; 173 if (MI->getOperand(0).getReg() == Reg) 174 return MI->getOperand(1).getReg(); 175 if (MI->getOperand(1).getReg() == Reg) 176 return MI->getOperand(0).getReg(); 177 return 0; 178} 179 180/// isSnippet - Identify if a live interval is a snippet that should be spilled. 181/// It is assumed that SnipLI is a virtual register with the same original as 182/// Edit->getReg(). 183bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 184 unsigned Reg = Edit->getReg(); 185 186 // A snippet is a tiny live range with only a single instruction using it 187 // besides copies to/from Reg or spills/fills. We accept: 188 // 189 // %snip = COPY %Reg / FILL fi# 190 // %snip = USE %snip 191 // %Reg = COPY %snip / SPILL %snip, fi# 192 // 193 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI)) 194 return false; 195 196 MachineInstr *UseMI = 0; 197 198 // Check that all uses satisfy our criteria. 199 for (MachineRegisterInfo::reg_nodbg_iterator 200 RI = MRI.reg_nodbg_begin(SnipLI.reg); 201 MachineInstr *MI = RI.skipInstruction();) { 202 203 // Allow copies to/from Reg. 204 if (isFullCopyOf(MI, Reg)) 205 continue; 206 207 // Allow stack slot loads. 208 int FI; 209 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) 210 continue; 211 212 // Allow stack slot stores. 213 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) 214 continue; 215 216 // Allow a single additional instruction. 217 if (UseMI && MI != UseMI) 218 return false; 219 UseMI = MI; 220 } 221 return true; 222} 223 224/// collectRegsToSpill - Collect live range snippets that only have a single 225/// real use. 226void InlineSpiller::collectRegsToSpill() { 227 unsigned Reg = Edit->getReg(); 228 229 // Main register always spills. 230 RegsToSpill.assign(1, Reg); 231 SnippetCopies.clear(); 232 233 // Snippets all have the same original, so there can't be any for an original 234 // register. 235 if (Original == Reg) 236 return; 237 238 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 239 MachineInstr *MI = RI.skipInstruction();) { 240 unsigned SnipReg = isFullCopyOf(MI, Reg); 241 if (!isSibling(SnipReg)) 242 continue; 243 LiveInterval &SnipLI = LIS.getInterval(SnipReg); 244 if (!isSnippet(SnipLI)) 245 continue; 246 SnippetCopies.insert(MI); 247 if (!isRegToSpill(SnipReg)) 248 RegsToSpill.push_back(SnipReg); 249 250 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 251 } 252} 253 254 255//===----------------------------------------------------------------------===// 256// Sibling Values 257//===----------------------------------------------------------------------===// 258 259// After live range splitting, some values to be spilled may be defined by 260// copies from sibling registers. We trace the sibling copies back to the 261// original value if it still exists. We need it for rematerialization. 262// 263// Even when the value can't be rematerialized, we still want to determine if 264// the value has already been spilled, or we may want to hoist the spill from a 265// loop. 266 267bool InlineSpiller::isSibling(unsigned Reg) { 268 return TargetRegisterInfo::isVirtualRegister(Reg) && 269 VRM.getOriginal(Reg) == Original; 270} 271 272/// traceSiblingValue - Trace a value that is about to be spilled back to the 273/// real defining instructions by looking through sibling copies. Always stay 274/// within the range of OrigVNI so the registers are known to carry the same 275/// value. 276/// 277/// Determine if the value is defined by all reloads, so spilling isn't 278/// necessary - the value is already in the stack slot. 279/// 280/// Find a defining instruction that may be a candidate for rematerialization. 281/// 282void InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, 283 VNInfo *OrigVNI) { 284 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':' 285 << UseVNI->id << '@' << UseVNI->def << '\n'); 286 SmallPtrSet<VNInfo*, 8> Visited; 287 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 288 WorkList.push_back(std::make_pair(UseReg, UseVNI)); 289 290 // Best spill candidate seen so far. This must dominate UseVNI. 291 SibValueInfo SVI(UseReg, UseVNI); 292 MachineBasicBlock *UseMBB = LIS.getMBBFromIndex(UseVNI->def); 293 unsigned SpillDepth = Loops.getLoopDepth(UseMBB); 294 bool SeenOrigPHI = false; // Original PHI met. 295 296 do { 297 unsigned Reg; 298 VNInfo *VNI; 299 tie(Reg, VNI) = WorkList.pop_back_val(); 300 if (!Visited.insert(VNI)) 301 continue; 302 303 // Is this value a better spill candidate? 304 if (!isRegToSpill(Reg)) { 305 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 306 if (MBB != UseMBB && MDT.dominates(MBB, UseMBB)) { 307 // This is a valid spill location dominating UseVNI. 308 // Prefer to spill at a smaller loop depth. 309 unsigned Depth = Loops.getLoopDepth(MBB); 310 if (Depth < SpillDepth) { 311 DEBUG(dbgs() << " spill depth " << Depth << ": " << PrintReg(Reg) 312 << ':' << VNI->id << '@' << VNI->def << '\n'); 313 SVI.SpillReg = Reg; 314 SVI.SpillVNI = VNI; 315 SpillDepth = Depth; 316 } 317 } 318 } 319 320 // Trace through PHI-defs created by live range splitting. 321 if (VNI->isPHIDef()) { 322 if (VNI->def == OrigVNI->def) { 323 DEBUG(dbgs() << " orig phi value " << PrintReg(Reg) << ':' 324 << VNI->id << '@' << VNI->def << '\n'); 325 SeenOrigPHI = true; 326 continue; 327 } 328 // Get values live-out of predecessors. 329 LiveInterval &LI = LIS.getInterval(Reg); 330 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 331 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 332 PE = MBB->pred_end(); PI != PE; ++PI) { 333 VNInfo *PVNI = LI.getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot()); 334 if (PVNI) 335 WorkList.push_back(std::make_pair(Reg, PVNI)); 336 } 337 continue; 338 } 339 340 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 341 assert(MI && "Missing def"); 342 343 // Trace through sibling copies. 344 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) { 345 if (isSibling(SrcReg)) { 346 LiveInterval &SrcLI = LIS.getInterval(SrcReg); 347 VNInfo *SrcVNI = SrcLI.getVNInfoAt(VNI->def.getUseIndex()); 348 assert(SrcVNI && "Copy from non-existing value"); 349 DEBUG(dbgs() << " copy of " << PrintReg(SrcReg) << ':' 350 << SrcVNI->id << '@' << SrcVNI->def << '\n'); 351 WorkList.push_back(std::make_pair(SrcReg, SrcVNI)); 352 continue; 353 } 354 } 355 356 // Track reachable reloads. 357 int FI; 358 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) { 359 DEBUG(dbgs() << " reload " << PrintReg(Reg) << ':' 360 << VNI->id << "@" << VNI->def << '\n'); 361 SVI.AllDefsAreReloads = true; 362 continue; 363 } 364 365 // We have an 'original' def. Don't record trivial cases. 366 if (VNI == UseVNI) { 367 DEBUG(dbgs() << "Not a sibling copy.\n"); 368 return; 369 } 370 371 // Potential remat candidate. 372 DEBUG(dbgs() << " def " << PrintReg(Reg) << ':' 373 << VNI->id << '@' << VNI->def << '\t' << *MI); 374 SVI.DefMI = MI; 375 } while (!WorkList.empty()); 376 377 if (SeenOrigPHI || SVI.DefMI) 378 SVI.AllDefsAreReloads = false; 379 380 DEBUG({ 381 if (SVI.AllDefsAreReloads) 382 dbgs() << "All defs are reloads.\n"; 383 else 384 dbgs() << "Prefer to spill " << PrintReg(SVI.SpillReg) << ':' 385 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def << '\n'; 386 }); 387 SibValues.insert(std::make_pair(UseVNI, SVI)); 388} 389 390/// analyzeSiblingValues - Trace values defined by sibling copies back to 391/// something that isn't a sibling copy. 392void InlineSpiller::analyzeSiblingValues() { 393 SibValues.clear(); 394 395 // No siblings at all? 396 if (Edit->getReg() == Original) 397 return; 398 399 LiveInterval &OrigLI = LIS.getInterval(Original); 400 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { 401 unsigned Reg = RegsToSpill[i]; 402 LiveInterval &LI = LIS.getInterval(Reg); 403 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(), 404 VE = LI.vni_end(); VI != VE; ++VI) { 405 VNInfo *VNI = *VI; 406 if (VNI->isUnused() || !(VNI->isPHIDef() || VNI->getCopy())) 407 continue; 408 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); 409 if (OrigVNI->def != VNI->def) 410 traceSiblingValue(Reg, VNI, OrigVNI); 411 } 412 } 413} 414 415/// hoistSpill - Given a sibling copy that defines a value to be spilled, insert 416/// a spill at a better location. 417bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) { 418 SlotIndex Idx = LIS.getInstructionIndex(CopyMI); 419 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex()); 420 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy"); 421 SibValueMap::const_iterator I = SibValues.find(VNI); 422 if (I == SibValues.end()) 423 return false; 424 425 const SibValueInfo &SVI = I->second; 426 427 // Let the normal folding code deal with the boring case. 428 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI) 429 return false; 430 431 // Conservatively extend the stack slot range to the range of the original 432 // value. We may be able to do better with stack slot coloring by being more 433 // careful here. 434 LiveInterval &StackInt = LSS.getInterval(StackSlot); 435 LiveInterval &OrigLI = LIS.getInterval(Original); 436 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx); 437 StackInt.MergeValueInAsValue(OrigLI, OrigVNI, StackInt.getValNumInfo(0)); 438 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": " 439 << StackInt << '\n'); 440 441 // Already spilled everywhere. 442 if (SVI.AllDefsAreReloads) 443 return true; 444 445 // We are going to spill SVI.SpillVNI immediately after its def, so clear out 446 // any later spills of the same value. 447 eliminateRedundantSpills(SVI.SpillReg, SVI.SpillVNI); 448 449 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def); 450 MachineBasicBlock::iterator MII; 451 if (SVI.SpillVNI->isPHIDef()) 452 MII = MBB->SkipPHIsAndLabels(MBB->begin()); 453 else { 454 MII = LIS.getInstructionFromIndex(SVI.SpillVNI->def); 455 ++MII; 456 } 457 // Insert spill without kill flag immediately after def. 458 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot, RC, &TRI); 459 --MII; // Point to store instruction. 460 LIS.InsertMachineInstrInMaps(MII); 461 VRM.addSpillSlotUse(StackSlot, MII); 462 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII); 463 return true; 464} 465 466/// eliminateRedundantSpills - Reg:VNI is known to be on the stack. Remove any 467/// redundant spills of this value in Reg and sibling copies. 468void InlineSpiller::eliminateRedundantSpills(unsigned Reg, VNInfo *VNI) { 469 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList; 470 WorkList.push_back(std::make_pair(Reg, VNI)); 471 LiveInterval &StackInt = LSS.getInterval(StackSlot); 472 473 do { 474 tie(Reg, VNI) = WorkList.pop_back_val(); 475 DEBUG(dbgs() << "Checking redundant spills for " << PrintReg(Reg) << ':' 476 << VNI->id << '@' << VNI->def << '\n'); 477 478 // Regs to spill are taken care of. 479 if (isRegToSpill(Reg)) 480 continue; 481 482 // Add all of VNI's live range to StackInt. 483 LiveInterval &LI = LIS.getInterval(Reg); 484 StackInt.MergeValueInAsValue(LI, VNI, StackInt.getValNumInfo(0)); 485 DEBUG(dbgs() << "Merged to stack int: " << StackInt << '\n'); 486 487 // Find all spills and copies of VNI. 488 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg); 489 MachineInstr *MI = UI.skipInstruction();) { 490 if (!MI->isCopy() && !MI->getDesc().mayStore()) 491 continue; 492 SlotIndex Idx = LIS.getInstructionIndex(MI); 493 if (LI.getVNInfoAt(Idx) != VNI) 494 continue; 495 496 // Follow sibling copies down the dominator tree. 497 if (unsigned DstReg = isFullCopyOf(MI, Reg)) { 498 if (isSibling(DstReg)) { 499 LiveInterval &DstLI = LIS.getInterval(DstReg); 500 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex()); 501 assert(DstVNI && "Missing defined value"); 502 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot"); 503 WorkList.push_back(std::make_pair(DstReg, DstVNI)); 504 } 505 continue; 506 } 507 508 // Erase spills. 509 int FI; 510 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { 511 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); 512 // eliminateDeadDefs won't normally remove stores, so switch opcode. 513 MI->setDesc(TII.get(TargetOpcode::KILL)); 514 DeadDefs.push_back(MI); 515 } 516 } 517 } while (!WorkList.empty()); 518} 519 520/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 521bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 522 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex(); 523 VNInfo *OrigVNI = Edit->getParent().getVNInfoAt(UseIdx); 524 525 if (!OrigVNI) { 526 DEBUG(dbgs() << "\tadding <undef> flags: "); 527 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 528 MachineOperand &MO = MI->getOperand(i); 529 if (MO.isReg() && MO.isUse() && MO.getReg() == Edit->getReg()) 530 MO.setIsUndef(); 531 } 532 DEBUG(dbgs() << UseIdx << '\t' << *MI); 533 return true; 534 } 535 536 // FIXME: Properly remat for snippets as well. 537 if (SnippetCopies.count(MI)) { 538 UsedValues.insert(OrigVNI); 539 return false; 540 } 541 542 LiveRangeEdit::Remat RM(OrigVNI); 543 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) { 544 UsedValues.insert(OrigVNI); 545 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 546 return false; 547 } 548 549 // If the instruction also writes Edit->getReg(), it had better not require 550 // the same register for uses and defs. 551 bool Reads, Writes; 552 SmallVector<unsigned, 8> Ops; 553 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Edit->getReg(), &Ops); 554 if (Writes) { 555 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 556 MachineOperand &MO = MI->getOperand(Ops[i]); 557 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 558 UsedValues.insert(OrigVNI); 559 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 560 return false; 561 } 562 } 563 } 564 565 // Before rematerializing into a register for a single instruction, try to 566 // fold a load into the instruction. That avoids allocating a new register. 567 if (RM.OrigMI->getDesc().canFoldAsLoad() && 568 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 569 Edit->markRematerialized(RM.ParentVNI); 570 return true; 571 } 572 573 // Alocate a new register for the remat. 574 LiveInterval &NewLI = Edit->create(LIS, VRM); 575 NewLI.markNotSpillable(); 576 577 // Rematting for a copy: Set allocation hint to be the destination register. 578 if (MI->isCopy()) 579 MRI.setRegAllocationHint(NewLI.reg, 0, MI->getOperand(0).getReg()); 580 581 // Finally we can rematerialize OrigMI before MI. 582 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 583 LIS, TII, TRI); 584 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 585 << *LIS.getInstructionFromIndex(DefIdx)); 586 587 // Replace operands 588 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 589 MachineOperand &MO = MI->getOperand(Ops[i]); 590 if (MO.isReg() && MO.isUse() && MO.getReg() == Edit->getReg()) { 591 MO.setReg(NewLI.reg); 592 MO.setIsKill(); 593 } 594 } 595 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 596 597 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator()); 598 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 599 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 600 return true; 601} 602 603/// reMaterializeAll - Try to rematerialize as many uses as possible, 604/// and trim the live ranges after. 605void InlineSpiller::reMaterializeAll() { 606 // Do a quick scan of the interval values to find if any are remattable. 607 if (!Edit->anyRematerializable(LIS, TII, AA)) 608 return; 609 610 UsedValues.clear(); 611 612 // Try to remat before all uses of Edit->getReg(). 613 bool anyRemat = false; 614 for (MachineRegisterInfo::use_nodbg_iterator 615 RI = MRI.use_nodbg_begin(Edit->getReg()); 616 MachineInstr *MI = RI.skipInstruction();) 617 anyRemat |= reMaterializeFor(MI); 618 619 if (!anyRemat) 620 return; 621 622 // Remove any values that were completely rematted. 623 bool anyRemoved = false; 624 for (LiveInterval::vni_iterator I = Edit->getParent().vni_begin(), 625 E = Edit->getParent().vni_end(); I != E; ++I) { 626 VNInfo *VNI = *I; 627 if (VNI->hasPHIKill() || !Edit->didRematerialize(VNI) || 628 UsedValues.count(VNI)) 629 continue; 630 MachineInstr *DefMI = LIS.getInstructionFromIndex(VNI->def); 631 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 632 LIS.RemoveMachineInstrFromMaps(DefMI); 633 VRM.RemoveMachineInstrFromMaps(DefMI); 634 DefMI->eraseFromParent(); 635 VNI->def = SlotIndex(); 636 anyRemoved = true; 637 } 638 639 if (!anyRemoved) 640 return; 641 642 // Removing values may cause debug uses where parent is not live. 643 for (MachineRegisterInfo::use_iterator RI = MRI.use_begin(Edit->getReg()); 644 MachineInstr *MI = RI.skipInstruction();) { 645 if (!MI->isDebugValue()) 646 continue; 647 // Try to preserve the debug value if parent is live immediately after it. 648 MachineBasicBlock::iterator NextMI = MI; 649 ++NextMI; 650 if (NextMI != MI->getParent()->end() && !LIS.isNotInMIMap(NextMI)) { 651 SlotIndex Idx = LIS.getInstructionIndex(NextMI); 652 VNInfo *VNI = Edit->getParent().getVNInfoAt(Idx); 653 if (VNI && (VNI->hasPHIKill() || UsedValues.count(VNI))) 654 continue; 655 } 656 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 657 MI->eraseFromParent(); 658 } 659} 660 661/// If MI is a load or store of StackSlot, it can be removed. 662bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 663 int FI = 0; 664 unsigned InstrReg; 665 if (!(InstrReg = TII.isLoadFromStackSlot(MI, FI)) && 666 !(InstrReg = TII.isStoreToStackSlot(MI, FI))) 667 return false; 668 669 // We have a stack access. Is it the right register and slot? 670 if (InstrReg != Reg || FI != StackSlot) 671 return false; 672 673 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 674 LIS.RemoveMachineInstrFromMaps(MI); 675 MI->eraseFromParent(); 676 return true; 677} 678 679/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 680/// @param MI Instruction using or defining the current register. 681/// @param Ops Operand indices from readsWritesVirtualRegister(). 682/// @param LoadMI Load instruction to use instead of stack slot when non-null. 683/// @return True on success, and MI will be erased. 684bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 685 const SmallVectorImpl<unsigned> &Ops, 686 MachineInstr *LoadMI) { 687 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 688 // operands. 689 SmallVector<unsigned, 8> FoldOps; 690 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 691 unsigned Idx = Ops[i]; 692 MachineOperand &MO = MI->getOperand(Idx); 693 if (MO.isImplicit()) 694 continue; 695 // FIXME: Teach targets to deal with subregs. 696 if (MO.getSubReg()) 697 return false; 698 // We cannot fold a load instruction into a def. 699 if (LoadMI && MO.isDef()) 700 return false; 701 // Tied use operands should not be passed to foldMemoryOperand. 702 if (!MI->isRegTiedToDefOperand(Idx)) 703 FoldOps.push_back(Idx); 704 } 705 706 MachineInstr *FoldMI = 707 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) 708 : TII.foldMemoryOperand(MI, FoldOps, StackSlot); 709 if (!FoldMI) 710 return false; 711 LIS.ReplaceMachineInstrInMaps(MI, FoldMI); 712 if (!LoadMI) 713 VRM.addSpillSlotUse(StackSlot, FoldMI); 714 MI->eraseFromParent(); 715 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 716 return true; 717} 718 719/// insertReload - Insert a reload of NewLI.reg before MI. 720void InlineSpiller::insertReload(LiveInterval &NewLI, 721 MachineBasicBlock::iterator MI) { 722 MachineBasicBlock &MBB = *MI->getParent(); 723 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex(); 724 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot, RC, &TRI); 725 --MI; // Point to load instruction. 726 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 727 VRM.addSpillSlotUse(StackSlot, MI); 728 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 729 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 730 LIS.getVNInfoAllocator()); 731 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 732} 733 734/// insertSpill - Insert a spill of NewLI.reg after MI. 735void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 736 MachineBasicBlock::iterator MI) { 737 MachineBasicBlock &MBB = *MI->getParent(); 738 739 // Get the defined value. It could be an early clobber so keep the def index. 740 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex(); 741 VNInfo *VNI = OldLI.getVNInfoAt(Idx); 742 assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo"); 743 Idx = VNI->def; 744 745 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot, RC, &TRI); 746 --MI; // Point to store instruction. 747 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex(); 748 VRM.addSpillSlotUse(StackSlot, MI); 749 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 750 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator()); 751 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 752} 753 754/// spillAroundUses - insert spill code around each use of Reg. 755void InlineSpiller::spillAroundUses(unsigned Reg) { 756 LiveInterval &OldLI = LIS.getInterval(Reg); 757 758 // Iterate over instructions using Reg. 759 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); 760 MachineInstr *MI = RI.skipInstruction();) { 761 762 // Debug values are not allowed to affect codegen. 763 if (MI->isDebugValue()) { 764 // Modify DBG_VALUE now that the value is in a spill slot. 765 uint64_t Offset = MI->getOperand(1).getImm(); 766 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 767 DebugLoc DL = MI->getDebugLoc(); 768 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot, 769 Offset, MDPtr, DL)) { 770 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 771 MachineBasicBlock *MBB = MI->getParent(); 772 MBB->insert(MBB->erase(MI), NewDV); 773 } else { 774 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 775 MI->eraseFromParent(); 776 } 777 continue; 778 } 779 780 // Ignore copies to/from snippets. We'll delete them. 781 if (SnippetCopies.count(MI)) 782 continue; 783 784 // Stack slot accesses may coalesce away. 785 if (coalesceStackAccess(MI, Reg)) 786 continue; 787 788 // Analyze instruction. 789 bool Reads, Writes; 790 SmallVector<unsigned, 8> Ops; 791 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops); 792 793 // Check for a sibling copy. 794 unsigned SibReg = isFullCopyOf(MI, Reg); 795 if (!isSibling(SibReg)) 796 SibReg = 0; 797 798 // Hoist the spill of a sib-reg copy. 799 if (SibReg && Writes && !Reads && hoistSpill(OldLI, MI)) { 800 // This COPY is now dead, the value is already in the stack slot. 801 MI->getOperand(0).setIsDead(); 802 DeadDefs.push_back(MI); 803 continue; 804 } 805 806 // Attempt to fold memory ops. 807 if (foldMemoryOperand(MI, Ops)) 808 continue; 809 810 // Allocate interval around instruction. 811 // FIXME: Infer regclass from instruction alone. 812 LiveInterval &NewLI = Edit->create(LIS, VRM); 813 NewLI.markNotSpillable(); 814 815 if (Reads) 816 insertReload(NewLI, MI); 817 818 // Rewrite instruction operands. 819 bool hasLiveDef = false; 820 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 821 MachineOperand &MO = MI->getOperand(Ops[i]); 822 MO.setReg(NewLI.reg); 823 if (MO.isUse()) { 824 if (!MI->isRegTiedToDefOperand(Ops[i])) 825 MO.setIsKill(); 826 } else { 827 if (!MO.isDead()) 828 hasLiveDef = true; 829 } 830 } 831 832 // FIXME: Use a second vreg if instruction has no tied ops. 833 if (Writes && hasLiveDef) 834 insertSpill(NewLI, OldLI, MI); 835 836 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 837 } 838} 839 840void InlineSpiller::spill(LiveRangeEdit &edit) { 841 Edit = &edit; 842 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 843 && "Trying to spill a stack slot."); 844 845 // Share a stack slot among all descendants of Original. 846 Original = VRM.getOriginal(edit.getReg()); 847 StackSlot = VRM.getStackSlot(Original); 848 849 DEBUG(dbgs() << "Inline spilling " 850 << MRI.getRegClass(edit.getReg())->getName() 851 << ':' << edit.getParent() << "\nFrom original " 852 << LIS.getInterval(Original) << '\n'); 853 assert(edit.getParent().isSpillable() && 854 "Attempting to spill already spilled value."); 855 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs"); 856 857 collectRegsToSpill(); 858 analyzeSiblingValues(); 859 reMaterializeAll(); 860 861 // Remat may handle everything. 862 if (Edit->getParent().empty()) 863 return; 864 865 RC = MRI.getRegClass(edit.getReg()); 866 867 if (StackSlot == VirtRegMap::NO_STACK_SLOT) 868 StackSlot = VRM.assignVirt2StackSlot(Original); 869 870 if (Original != edit.getReg()) 871 VRM.assignVirt2StackSlot(edit.getReg(), StackSlot); 872 873 // Update LiveStacks now that we are committed to spilling. 874 LiveInterval &stacklvr = LSS.getOrCreateInterval(StackSlot, RC); 875 if (!stacklvr.hasAtLeastOneValue()) 876 stacklvr.getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator()); 877 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 878 stacklvr.MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]), 879 stacklvr.getValNumInfo(0)); 880 DEBUG(dbgs() << "Merged spilled regs: " << stacklvr << '\n'); 881 882 // Spill around uses of all RegsToSpill. 883 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 884 spillAroundUses(RegsToSpill[i]); 885 886 // Hoisted spills may cause dead code. 887 if (!DeadDefs.empty()) { 888 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); 889 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII); 890 } 891 892 // Finally delete the SnippetCopies. 893 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(edit.getReg()); 894 MachineInstr *MI = RI.skipInstruction();) { 895 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 896 // FIXME: Do this with a LiveRangeEdit callback. 897 VRM.RemoveMachineInstrFromMaps(MI); 898 LIS.RemoveMachineInstrFromMaps(MI); 899 MI->eraseFromParent(); 900 } 901 902 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 903 edit.eraseVirtReg(RegsToSpill[i], LIS); 904} 905