InlineSpiller.cpp revision 7792e980c43536814ea42448db9799b4da32fef6
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// The inline spiller modifies the machine function directly instead of 11// inserting spills and restores in VirtRegMap. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "Spiller.h" 17#include "LiveRangeEdit.h" 18#include "VirtRegMap.h" 19#include "llvm/Analysis/AliasAnalysis.h" 20#include "llvm/CodeGen/LiveIntervalAnalysis.h" 21#include "llvm/CodeGen/LiveStackAnalysis.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29 30using namespace llvm; 31 32namespace { 33class InlineSpiller : public Spiller { 34 MachineFunctionPass &pass_; 35 MachineFunction &mf_; 36 LiveIntervals &lis_; 37 LiveStacks &lss_; 38 AliasAnalysis *aa_; 39 VirtRegMap &vrm_; 40 MachineFrameInfo &mfi_; 41 MachineRegisterInfo &mri_; 42 const TargetInstrInfo &tii_; 43 const TargetRegisterInfo &tri_; 44 const BitVector reserved_; 45 46 // Variables that are valid during spill(), but used by multiple methods. 47 LiveRangeEdit *edit_; 48 const TargetRegisterClass *rc_; 49 int stackSlot_; 50 51 // All registers to spill to stackSlot_, including the main register. 52 SmallVector<unsigned, 8> RegsToSpill; 53 54 // All COPY instructions to/from snippets. 55 // They are ignored since both operands refer to the same stack slot. 56 SmallPtrSet<MachineInstr*, 8> SnippetCopies; 57 58 // Values that failed to remat at some point. 59 SmallPtrSet<VNInfo*, 8> usedValues_; 60 61 ~InlineSpiller() {} 62 63public: 64 InlineSpiller(MachineFunctionPass &pass, 65 MachineFunction &mf, 66 VirtRegMap &vrm) 67 : pass_(pass), 68 mf_(mf), 69 lis_(pass.getAnalysis<LiveIntervals>()), 70 lss_(pass.getAnalysis<LiveStacks>()), 71 aa_(&pass.getAnalysis<AliasAnalysis>()), 72 vrm_(vrm), 73 mfi_(*mf.getFrameInfo()), 74 mri_(mf.getRegInfo()), 75 tii_(*mf.getTarget().getInstrInfo()), 76 tri_(*mf.getTarget().getRegisterInfo()), 77 reserved_(tri_.getReservedRegs(mf_)) {} 78 79 void spill(LiveRangeEdit &); 80 81private: 82 bool isSnippet(const LiveInterval &SnipLI); 83 void collectRegsToSpill(); 84 85 bool reMaterializeFor(MachineBasicBlock::iterator MI); 86 void reMaterializeAll(); 87 88 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg); 89 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 90 const SmallVectorImpl<unsigned> &Ops, 91 MachineInstr *LoadMI = 0); 92 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 93 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 94 MachineBasicBlock::iterator MI); 95 96 void spillAroundUses(unsigned Reg); 97}; 98} 99 100namespace llvm { 101Spiller *createInlineSpiller(MachineFunctionPass &pass, 102 MachineFunction &mf, 103 VirtRegMap &vrm) { 104 return new InlineSpiller(pass, mf, vrm); 105} 106} 107 108//===----------------------------------------------------------------------===// 109// Snippets 110//===----------------------------------------------------------------------===// 111 112// When spilling a virtual register, we also spill any snippets it is connected 113// to. The snippets are small live ranges that only have a single real use, 114// leftovers from live range splitting. Spilling them enables memory operand 115// folding or tightens the live range around the single use. 116// 117// This minimizes register pressure and maximizes the store-to-load distance for 118// spill slots which can be important in tight loops. 119 120/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, 121/// otherwise return 0. 122static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { 123 if (!MI->isCopy()) 124 return 0; 125 if (MI->getOperand(0).getSubReg() != 0) 126 return 0; 127 if (MI->getOperand(1).getSubReg() != 0) 128 return 0; 129 if (MI->getOperand(0).getReg() == Reg) 130 return MI->getOperand(1).getReg(); 131 if (MI->getOperand(1).getReg() == Reg) 132 return MI->getOperand(0).getReg(); 133 return 0; 134} 135 136/// isSnippet - Identify if a live interval is a snippet that should be spilled. 137/// It is assumed that SnipLI is a virtual register with the same original as 138/// edit_->getReg(). 139bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { 140 unsigned Reg = edit_->getReg(); 141 142 // A snippet is a tiny live range with only a single instruction using it 143 // besides copies to/from Reg or spills/fills. We accept: 144 // 145 // %snip = COPY %Reg / FILL fi# 146 // %snip = USE %snip 147 // %Reg = COPY %snip / SPILL %snip, fi# 148 // 149 if (SnipLI.getNumValNums() > 2 || !lis_.intervalIsInOneMBB(SnipLI)) 150 return false; 151 152 MachineInstr *UseMI = 0; 153 154 // Check that all uses satisfy our criteria. 155 for (MachineRegisterInfo::reg_nodbg_iterator 156 RI = mri_.reg_nodbg_begin(SnipLI.reg); 157 MachineInstr *MI = RI.skipInstruction();) { 158 159 // Allow copies to/from Reg. 160 if (isFullCopyOf(MI, Reg)) 161 continue; 162 163 // Allow stack slot loads. 164 int FI; 165 if (SnipLI.reg == tii_.isLoadFromStackSlot(MI, FI) && FI == stackSlot_) 166 continue; 167 168 // Allow stack slot stores. 169 if (SnipLI.reg == tii_.isStoreToStackSlot(MI, FI) && FI == stackSlot_) 170 continue; 171 172 // Allow a single additional instruction. 173 if (UseMI && MI != UseMI) 174 return false; 175 UseMI = MI; 176 } 177 return true; 178} 179 180/// collectRegsToSpill - Collect live range snippets that only have a single 181/// real use. 182void InlineSpiller::collectRegsToSpill() { 183 unsigned Reg = edit_->getReg(); 184 unsigned Orig = vrm_.getOriginal(Reg); 185 186 // Main register always spills. 187 RegsToSpill.assign(1, Reg); 188 SnippetCopies.clear(); 189 190 // Snippets all have the same original, so there can't be any for an original 191 // register. 192 if (Orig == Reg) 193 return; 194 195 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(Reg); 196 MachineInstr *MI = RI.skipInstruction();) { 197 unsigned SnipReg = isFullCopyOf(MI, Reg); 198 if (!SnipReg) 199 continue; 200 if (!TargetRegisterInfo::isVirtualRegister(SnipReg)) 201 continue; 202 if (vrm_.getOriginal(SnipReg) != Orig) 203 continue; 204 LiveInterval &SnipLI = lis_.getInterval(SnipReg); 205 if (!isSnippet(SnipLI)) 206 continue; 207 SnippetCopies.insert(MI); 208 if (std::find(RegsToSpill.begin(), RegsToSpill.end(), 209 SnipReg) == RegsToSpill.end()) 210 RegsToSpill.push_back(SnipReg); 211 212 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); 213 } 214} 215 216/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. 217bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 218 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 219 VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx); 220 221 if (!OrigVNI) { 222 DEBUG(dbgs() << "\tadding <undef> flags: "); 223 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 224 MachineOperand &MO = MI->getOperand(i); 225 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) 226 MO.setIsUndef(); 227 } 228 DEBUG(dbgs() << UseIdx << '\t' << *MI); 229 return true; 230 } 231 232 // FIXME: Properly remat for snippets as well. 233 if (SnippetCopies.count(MI)) { 234 usedValues_.insert(OrigVNI); 235 return false; 236 } 237 238 LiveRangeEdit::Remat RM(OrigVNI); 239 if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) { 240 usedValues_.insert(OrigVNI); 241 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 242 return false; 243 } 244 245 // If the instruction also writes edit_->getReg(), it had better not require 246 // the same register for uses and defs. 247 bool Reads, Writes; 248 SmallVector<unsigned, 8> Ops; 249 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops); 250 if (Writes) { 251 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 252 MachineOperand &MO = MI->getOperand(Ops[i]); 253 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 254 usedValues_.insert(OrigVNI); 255 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 256 return false; 257 } 258 } 259 } 260 261 // Before rematerializing into a register for a single instruction, try to 262 // fold a load into the instruction. That avoids allocating a new register. 263 if (RM.OrigMI->getDesc().canFoldAsLoad() && 264 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 265 edit_->markRematerialized(RM.ParentVNI); 266 return true; 267 } 268 269 // Alocate a new register for the remat. 270 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_); 271 NewLI.markNotSpillable(); 272 273 // Rematting for a copy: Set allocation hint to be the destination register. 274 if (MI->isCopy()) 275 mri_.setRegAllocationHint(NewLI.reg, 0, MI->getOperand(0).getReg()); 276 277 // Finally we can rematerialize OrigMI before MI. 278 SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 279 lis_, tii_, tri_); 280 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' 281 << *lis_.getInstructionFromIndex(DefIdx)); 282 283 // Replace operands 284 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 285 MachineOperand &MO = MI->getOperand(Ops[i]); 286 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) { 287 MO.setReg(NewLI.reg); 288 MO.setIsKill(); 289 } 290 } 291 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 292 293 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator()); 294 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 295 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 296 return true; 297} 298 299/// reMaterializeAll - Try to rematerialize as many uses as possible, 300/// and trim the live ranges after. 301void InlineSpiller::reMaterializeAll() { 302 // Do a quick scan of the interval values to find if any are remattable. 303 if (!edit_->anyRematerializable(lis_, tii_, aa_)) 304 return; 305 306 usedValues_.clear(); 307 308 // Try to remat before all uses of edit_->getReg(). 309 bool anyRemat = false; 310 for (MachineRegisterInfo::use_nodbg_iterator 311 RI = mri_.use_nodbg_begin(edit_->getReg()); 312 MachineInstr *MI = RI.skipInstruction();) 313 anyRemat |= reMaterializeFor(MI); 314 315 if (!anyRemat) 316 return; 317 318 // Remove any values that were completely rematted. 319 bool anyRemoved = false; 320 for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(), 321 E = edit_->getParent().vni_end(); I != E; ++I) { 322 VNInfo *VNI = *I; 323 if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) || 324 usedValues_.count(VNI)) 325 continue; 326 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 327 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 328 lis_.RemoveMachineInstrFromMaps(DefMI); 329 vrm_.RemoveMachineInstrFromMaps(DefMI); 330 DefMI->eraseFromParent(); 331 VNI->def = SlotIndex(); 332 anyRemoved = true; 333 } 334 335 if (!anyRemoved) 336 return; 337 338 // Removing values may cause debug uses where parent is not live. 339 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg()); 340 MachineInstr *MI = RI.skipInstruction();) { 341 if (!MI->isDebugValue()) 342 continue; 343 // Try to preserve the debug value if parent is live immediately after it. 344 MachineBasicBlock::iterator NextMI = MI; 345 ++NextMI; 346 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 347 SlotIndex Idx = lis_.getInstructionIndex(NextMI); 348 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 349 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 350 continue; 351 } 352 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 353 MI->eraseFromParent(); 354 } 355} 356 357/// If MI is a load or store of stackSlot_, it can be removed. 358bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { 359 int FI = 0; 360 unsigned InstrReg; 361 if (!(InstrReg = tii_.isLoadFromStackSlot(MI, FI)) && 362 !(InstrReg = tii_.isStoreToStackSlot(MI, FI))) 363 return false; 364 365 // We have a stack access. Is it the right register and slot? 366 if (InstrReg != Reg || FI != stackSlot_) 367 return false; 368 369 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 370 lis_.RemoveMachineInstrFromMaps(MI); 371 MI->eraseFromParent(); 372 return true; 373} 374 375/// foldMemoryOperand - Try folding stack slot references in Ops into MI. 376/// @param MI Instruction using or defining the current register. 377/// @param Ops Operand indices from readsWritesVirtualRegister(). 378/// @param LoadMI Load instruction to use instead of stack slot when non-null. 379/// @return True on success, and MI will be erased. 380bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 381 const SmallVectorImpl<unsigned> &Ops, 382 MachineInstr *LoadMI) { 383 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 384 // operands. 385 SmallVector<unsigned, 8> FoldOps; 386 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 387 unsigned Idx = Ops[i]; 388 MachineOperand &MO = MI->getOperand(Idx); 389 if (MO.isImplicit()) 390 continue; 391 // FIXME: Teach targets to deal with subregs. 392 if (MO.getSubReg()) 393 return false; 394 // We cannot fold a load instruction into a def. 395 if (LoadMI && MO.isDef()) 396 return false; 397 // Tied use operands should not be passed to foldMemoryOperand. 398 if (!MI->isRegTiedToDefOperand(Idx)) 399 FoldOps.push_back(Idx); 400 } 401 402 MachineInstr *FoldMI = 403 LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI) 404 : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 405 if (!FoldMI) 406 return false; 407 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 408 if (!LoadMI) 409 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 410 MI->eraseFromParent(); 411 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 412 return true; 413} 414 415/// insertReload - Insert a reload of NewLI.reg before MI. 416void InlineSpiller::insertReload(LiveInterval &NewLI, 417 MachineBasicBlock::iterator MI) { 418 MachineBasicBlock &MBB = *MI->getParent(); 419 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 420 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 421 --MI; // Point to load instruction. 422 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 423 vrm_.addSpillSlotUse(stackSlot_, MI); 424 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 425 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 426 lis_.getVNInfoAllocator()); 427 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 428} 429 430/// insertSpill - Insert a spill of NewLI.reg after MI. 431void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI, 432 MachineBasicBlock::iterator MI) { 433 MachineBasicBlock &MBB = *MI->getParent(); 434 435 // Get the defined value. It could be an early clobber so keep the def index. 436 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 437 VNInfo *VNI = OldLI.getVNInfoAt(Idx); 438 assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo"); 439 Idx = VNI->def; 440 441 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 442 --MI; // Point to store instruction. 443 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 444 vrm_.addSpillSlotUse(stackSlot_, MI); 445 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 446 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator()); 447 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 448} 449 450/// spillAroundUses - insert spill code around each use of Reg. 451void InlineSpiller::spillAroundUses(unsigned Reg) { 452 LiveInterval &OldLI = lis_.getInterval(Reg); 453 454 // Iterate over instructions using Reg. 455 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(Reg); 456 MachineInstr *MI = RI.skipInstruction();) { 457 458 // Debug values are not allowed to affect codegen. 459 if (MI->isDebugValue()) { 460 // Modify DBG_VALUE now that the value is in a spill slot. 461 uint64_t Offset = MI->getOperand(1).getImm(); 462 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 463 DebugLoc DL = MI->getDebugLoc(); 464 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 465 Offset, MDPtr, DL)) { 466 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 467 MachineBasicBlock *MBB = MI->getParent(); 468 MBB->insert(MBB->erase(MI), NewDV); 469 } else { 470 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 471 MI->eraseFromParent(); 472 } 473 continue; 474 } 475 476 // Ignore copies to/from snippets. We'll delete them. 477 if (SnippetCopies.count(MI)) 478 continue; 479 480 // Stack slot accesses may coalesce away. 481 if (coalesceStackAccess(MI, Reg)) 482 continue; 483 484 // Analyze instruction. 485 bool Reads, Writes; 486 SmallVector<unsigned, 8> Ops; 487 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops); 488 489 // Attempt to fold memory ops. 490 if (foldMemoryOperand(MI, Ops)) 491 continue; 492 493 // Allocate interval around instruction. 494 // FIXME: Infer regclass from instruction alone. 495 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_); 496 NewLI.markNotSpillable(); 497 498 if (Reads) 499 insertReload(NewLI, MI); 500 501 // Rewrite instruction operands. 502 bool hasLiveDef = false; 503 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 504 MachineOperand &MO = MI->getOperand(Ops[i]); 505 MO.setReg(NewLI.reg); 506 if (MO.isUse()) { 507 if (!MI->isRegTiedToDefOperand(Ops[i])) 508 MO.setIsKill(); 509 } else { 510 if (!MO.isDead()) 511 hasLiveDef = true; 512 } 513 } 514 515 // FIXME: Use a second vreg if instruction has no tied ops. 516 if (Writes && hasLiveDef) 517 insertSpill(NewLI, OldLI, MI); 518 519 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 520 } 521} 522 523void InlineSpiller::spill(LiveRangeEdit &edit) { 524 edit_ = &edit; 525 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 526 && "Trying to spill a stack slot."); 527 DEBUG(dbgs() << "Inline spilling " 528 << mri_.getRegClass(edit.getReg())->getName() 529 << ':' << edit.getParent() << "\nFrom original " 530 << PrintReg(vrm_.getOriginal(edit.getReg())) << '\n'); 531 assert(edit.getParent().isSpillable() && 532 "Attempting to spill already spilled value."); 533 534 // Share a stack slot among all descendants of Orig. 535 unsigned Orig = vrm_.getOriginal(edit.getReg()); 536 stackSlot_ = vrm_.getStackSlot(Orig); 537 538 collectRegsToSpill(); 539 540 reMaterializeAll(); 541 542 // Remat may handle everything. 543 if (edit_->getParent().empty()) 544 return; 545 546 rc_ = mri_.getRegClass(edit.getReg()); 547 548 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 549 stackSlot_ = vrm_.assignVirt2StackSlot(Orig); 550 551 if (Orig != edit.getReg()) 552 vrm_.assignVirt2StackSlot(edit.getReg(), stackSlot_); 553 554 // Update LiveStacks now that we are committed to spilling. 555 LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_); 556 if (!stacklvr.hasAtLeastOneValue()) 557 stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator()); 558 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 559 stacklvr.MergeRangesInAsValue(lis_.getInterval(RegsToSpill[i]), 560 stacklvr.getValNumInfo(0)); 561 562 // Spill around uses of all RegsToSpill. 563 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 564 spillAroundUses(RegsToSpill[i]); 565 566 // Finally delete the SnippetCopies. 567 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg()); 568 MachineInstr *MI = RI.skipInstruction();) { 569 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); 570 // FIXME: Do this with a LiveRangeEdit callback. 571 vrm_.RemoveMachineInstrFromMaps(MI); 572 lis_.RemoveMachineInstrFromMaps(MI); 573 MI->eraseFromParent(); 574 } 575 576 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) 577 edit.eraseVirtReg(RegsToSpill[i], lis_); 578} 579